Новости
О Центре
Кластер
Обучение
Основной курс по параллельному программированию
Учебные курсы
Магистратура
Дополнительное образование
Работы студентов
Библиотека
Исследования
Конференции
Полезные ссылки
NVIDIA
Контакты
О сайте
Имя:
Пароль:
запомнить:
Забыли пароль? Регистрация

Реализация алгоритма

      
      //MAlgebra.h
      typedef int* Matrix;
      //MAlgebra.cpp
      void MultMatrix(const Matrix left, const Matrix right, OUT Matrix result, int size)
      {
         for (int i = 0; i < size; ++i)
           for (int j = 0; j < size; ++j)
             for (int k = 0; k < size; ++k)
             {
               result[i*size+j] += left[i*size+k] * right[k*size+j];

} }; //main.cpp Matrix Cb=InitializeMatrix(blockSize,0); Matrix Ab=InitializeMatrix(blockSize,0); Matrix Bb=InitializeMatrix(blockSize,0); if (myRank==0) { /* printf ("Matrix size: %d \n",matrixSize); printf ("Processor count: %d \r\n", processorsCount);*/ D=InitializeMatrix(matrixSize, 0); C=InitializeMatrix(matrixSize, 0); A=InitializeMatrix(matrixSize, 1); B=InitializeMatrix(matrixSize, 2); startwtime = MPI_Wtime(); A=Repack(A, matrixSize, blockSize); B=Repack(B, matrixSize, blockSize); } MPI_Barrier(workCommutator); MPI_Scatter(A, blockSize*blockSize,MPI_INT,Ab,blockSize*blockSize,MPI_INT,0,workCommutator); MPI_Scatter(B, blockSize*blockSize,MPI_INT,Bb,blockSize*blockSize,MPI_INT,0,workCommutator); int rowIndex = myRank / numOfBlocks; int columnIndex = myRank % numOfBlocks; if (rowIndex != 0) { if (columnIndex < rowIndex) { MPI_Bsend(Ab, blockSize*blockSize, MPI_INT, myRank+numOfBlocks-rowIndex, 0, workCommutator); } else { MPI_Bsend(Ab, blockSize*blockSize, MPI_INT, myRank-rowIndex, 0, workCommutator); } } if (columnIndex != 0) { if (rowIndex < columnIndex) { MPI_Bsend(Bb, blockSize*blockSize, MPI_INT, myRank+(numOfBlocks-columnIndex)*numOfBlocks, 1, workCommutator); } else { MPI_Bsend(Bb, blockSize*blockSize, MPI_INT, myRank-numOfBlocks*columnIndex, 1, workCommutator); } } if ((rowIndex != 0)&&(columnIndex != 0)) { MPI_Recv(Ab, blockSize*blockSize, MPI_INT, MPI_ANY_SOURCE, 0, workCommutator, &status); MPI_Recv(Bb, blockSize*blockSize, MPI_INT, MPI_ANY_SOURCE, 1, workCommutator, &status); } if((rowIndex == 0)&&(columnIndex != 0)) { MPI_Recv(Bb, blockSize*blockSize, MPI_INT, MPI_ANY_SOURCE, 1, workCommutator, &status); } if ((rowIndex != 0)&&(columnIndex == 0)) { MPI_Recv(Ab, blockSize*blockSize, MPI_INT, MPI_ANY_SOURCE, 0, workCommutator, &status); } MPI_Barrier(workCommutator); MultMatrix (Ab,Bb,Cb,blockSize); for (int i=0; i < numOfBlocks-1; i++) { if (myRank == rowIndex*numOfBlocks) { MPI_Bsend(Ab, blockSize*blockSize, MPI_INT, (rowIndex+1)*numOfBlocks-1, 0, workCommutator); } else { MPI_Bsend(Ab, blockSize*blockSize, MPI_INT, myRank-1, 0, workCommutator); } if (myRank < numOfBlocks) { MPI_Bsend(Bb, blockSize*blockSize, MPI_INT, myRank+(numOfBlocks-1)*numOfBlocks, 1, workCommutator); } else { MPI_Bsend (Bb,blockSize*blockSize,MPI_INT,myRank-numOfBlocks,1,workCommutator); } MPI_Recv(Ab, blockSize*blockSize, MPI_INT, MPI_ANY_SOURCE, 0, workCommutator, &status); MPI_Recv(Bb, blockSize*blockSize, MPI_INT, MPI_ANY_SOURCE, 1, workCommutator, &status); MultMatrix(Ab, Bb, Cb, blockSize); } MPI_Gather (Cb, blockSize*blockSize, MPI_INT, C, blockSize*blockSize, MPI_INT, 0, workCommutator);

Новости

22.10.2012
04.09.2012
05.04.2012
06.03.2012
02.03.2012