/************************************************************************/ /* Ovo je samo jedno od mnogih mogućih ispravnih rešenja */ /************************************************************************/ #include #include #include // posto akcenat zadatka nije na OO pristupu, nego na MPI komunikaciji, // neke od promenljivih i konstanti su organizovane kao globalne. const unsigned MAX_ROW_COUNT = 20; const unsigned MAX_COLUMN_COUNT = 30; const int MASTER_RANK = 0; enum Tags { ROW_TAG = 1000, COLUMN_TAG, ROW_INDEX_TAG, COLUMN_INDEX_TAG, RESULT_TAG, END_TAG }; //MPI::Datatype rowType(MPI::DOUBLE); //MPI::Datatype columnType(MPI::DOUBLE); MPI::Datatype rowType; MPI::Datatype columnType; double matrixA[MAX_ROW_COUNT][MAX_COLUMN_COUNT], matrixB[MAX_ROW_COUNT][MAX_COLUMN_COUNT], matrixC[MAX_ROW_COUNT][MAX_COLUMN_COUNT]; unsigned matrixARowCount, matrixAColumnCount, matrixBRowCount, matrixBColumnCount, matrixCRowCount, matrixCColumnCount; void loadMatrix(double pp_matrix[][MAX_COLUMN_COUNT], unsigned& pr_rowCount, unsigned& pr_columnCount) { std::cout << "Unesite dimenzije matrice: "; std::cin >> pr_rowCount >> pr_columnCount; std::cout << "Unesite elemente matrice:\n"; for (unsigned i = 0; i < pr_rowCount; i++) { for (unsigned j = 0; j < pr_columnCount; j++) { std::cin >> pp_matrix[i][j]; } } } void writeMatrix(int rank, double pp_matrix[][MAX_COLUMN_COUNT], unsigned& pr_rowCount, unsigned& pr_columnCount) { std::cout << "Elementi matrice u procesu " << rank << std::endl; for (unsigned i = 0; i < pr_rowCount; i++) { for (unsigned j = 0; j < pr_columnCount; j++) { std::cout << std::setw(5) << pp_matrix[i][j]; } std::cout << std::endl; } } void writeArray(int rank, double *pp_array, unsigned pv_elementCount) { std::cout << "Elementi niza u procesu " << rank << std::endl; for (unsigned i = 0; i < pv_elementCount; i++) { std::cout << std::setw(5) << pp_array[i]; } std::cout << std::endl; } double multiplyRowAndColumn(double* pp_row, double* pp_column, unsigned pv_length) { double result = 0; for (unsigned i = 0; i < pv_length; i++) { result += pp_row[i] * pp_column[i]; } return result; } void sendArrays(unsigned& pr_sentCount, int pv_workerRank) { unsigned currentRowIndex = pr_sentCount / matrixCColumnCount; unsigned currentColumnIndex = pr_sentCount % matrixCColumnCount; MPI::COMM_WORLD.Send(¤tRowIndex, 1, MPI::UNSIGNED, pv_workerRank, ROW_INDEX_TAG); MPI::COMM_WORLD.Send(¤tColumnIndex, 1, MPI::UNSIGNED, pv_workerRank, COLUMN_INDEX_TAG); MPI::COMM_WORLD.Send(matrixA[currentRowIndex], 1, rowType, pv_workerRank, ROW_TAG); MPI::COMM_WORLD.Send(matrixB[0] + currentColumnIndex, 1, columnType, pv_workerRank, COLUMN_TAG); pr_sentCount++; } int main(int argc, char *argv[]) { double matrixARow[MAX_ROW_COUNT]; // double matrixBColumn[MAX_ROW_COUNT*MAX_COLUMN_COUNT]; double matrixBColumn[MAX_ROW_COUNT]; // inicijalizuj MPI MPI::Init(argc, argv); // odredi svoj poredak unutar MPI sveta (prvi je 0) unsigned rank = MPI::COMM_WORLD.Get_rank(); // odredi veličinu MPI sveta unsigned size = MPI::COMM_WORLD.Get_size(); // ako si "master", učitaj matrice i pošalji svima potrebne podatke if (MASTER_RANK == rank) { loadMatrix(matrixA, matrixARowCount, matrixAColumnCount); loadMatrix(matrixB, matrixBRowCount, matrixBColumnCount); // ako se matrice ne poklapaju, kraj sveta std::cout << "Ja sam " << rank << " od " << size << ", vidim sledece vrednosti: A " << matrixARowCount << "X" << matrixAColumnCount << ", B" << matrixBRowCount << "X" << matrixBColumnCount << std::endl; if (matrixAColumnCount != matrixBRowCount || matrixAColumnCount > MAX_ROW_COUNT) { std::cerr << "Kraj sveta\n"; MPI::COMM_WORLD.Abort(2908); } else { writeMatrix(rank, matrixC, matrixARowCount, matrixBRowCount); } } std::cout << "Ja sam " << rank << " od " << size << ", vidim sledece vrednosti: A " << matrixARowCount << "X" << matrixAColumnCount << ", B" << matrixBRowCount << "X" << matrixBColumnCount << std::endl; // ako je izvršavanje stiglo do ovde, svi treba da // 1. dobiju dimenzije matrica A i B MPI::COMM_WORLD.Bcast(&matrixAColumnCount, 1, MPI::UNSIGNED, MASTER_RANK); MPI::COMM_WORLD.Bcast(&matrixARowCount, 1, MPI::UNSIGNED, MASTER_RANK); MPI::COMM_WORLD.Bcast(&matrixBColumnCount, 1, MPI::UNSIGNED, MASTER_RANK); MPI::COMM_WORLD.Bcast(&matrixBRowCount, 1, MPI::UNSIGNED, MASTER_RANK); std::cout << "Ja sam " << rank << " od " << size << ", vidim sledece vrednosti: A " << matrixARowCount << "X" << matrixAColumnCount << ", B" << matrixBRowCount << "X" << matrixBColumnCount << std::endl; // 2. popune sve potrebne tipove podataka // rowType = rowType.Create_contiguous(matrixAColumnCount); rowType = MPI::DOUBLE.Create_contiguous(matrixAColumnCount); rowType.Commit(); // columnType = columnType.Create_vector(matrixBRowCount, 1, MAX_COLUMN_COUNT); columnType = MPI::DOUBLE.Create_vector(matrixBRowCount, 1, MAX_COLUMN_COUNT); columnType.Commit(); if (MASTER_RANK == rank) { matrixCRowCount = matrixARowCount; matrixCColumnCount = matrixBColumnCount; const unsigned matrixCElementCount = matrixCRowCount * matrixCColumnCount; unsigned sentCount = 0, processedCount = 0, excessSlavesCount = 0; // prvo pošalji svakom procesu po jedan posao while (sentCount + excessSlavesCount < size - 1) { // ako je proces potreban, pošalji mu podatke if (sentCount < matrixCElementCount) { // sentCount+1 zato što treba poslati procesima od 1 nadalje, ne treba slati samom sebi // sendArrays povećava prvi argument za 1, zato nema povećavanja u ovom delu programa sendArrays(sentCount, (int)sentCount+1); } else // inače, isključi ga { // šalji bilo koji podatak, pošto nije bitan podatak, nego tema poruke excessSlavesCount++; MPI::COMM_WORLD.Send(&sentCount, 1, MPI::UNSIGNED, sentCount + excessSlavesCount, END_TAG); } } // sakupljaj obrađene elemente i šalji podatke za obradu ili poruku za kraj rada while (processedCount < matrixCElementCount) { unsigned currentRowIndex; unsigned currentColumnIndex; MPI::Status status; double matrixCElement; // primaj jedan po jedan rezultat MPI::COMM_WORLD.Recv(&matrixCElement, 1, MPI::DOUBLE, MPI::ANY_SOURCE, RESULT_TAG, status); int workerRank = status.Get_source(); MPI::COMM_WORLD.Recv(¤tRowIndex, 1, MPI::UNSIGNED, workerRank, ROW_INDEX_TAG, status); MPI::COMM_WORLD.Recv(¤tColumnIndex, 1, MPI::UNSIGNED, workerRank, COLUMN_INDEX_TAG, status); matrixC[currentRowIndex][currentColumnIndex] = matrixCElement; processedCount++; // ako ima još paketa podataka koje treba obraditi, šalji istom koji je poslao rezultat if (sentCount < matrixCElementCount) { sendArrays(sentCount, workerRank); } else { // šalji bilo koji podatak, pošto nije bitan podatak, nego tema poruke MPI::COMM_WORLD.Send(¤tRowIndex, 1, MPI::UNSIGNED, workerRank, END_TAG); } } // na kraju, "master" ispisuje dobijenu matricu writeMatrix(rank, matrixC, matrixCRowCount, matrixCColumnCount); } else { bool isSlaveNeeded = true; while (isSlaveNeeded) { unsigned currentRowIndex; unsigned currentColumnIndex = 0; MPI::Status status; MPI::COMM_WORLD.Recv(¤tRowIndex, 1, MPI::UNSIGNED, MASTER_RANK, MPI::ANY_TAG, status); int receivedTag = status.Get_tag(); if (END_TAG == receivedTag) { // ako tema poruke označava kraj rada, nema razloga da se dalje vrtiš u ciklusu isSlaveNeeded = false; std::cout << rank << ": Zbogom, okrutni svete!" << std::endl; } else { // inače, primi preostale podatke MPI::COMM_WORLD.Recv(¤tColumnIndex, 1, MPI::UNSIGNED, MASTER_RANK, COLUMN_INDEX_TAG); MPI::COMM_WORLD.Recv(matrixARow, 1, rowType, MASTER_RANK, ROW_TAG); // MPI::COMM_WORLD.Recv(matrixBColumn, 1, columnType, MASTER_RANK, COLUMN_TAG); MPI::COMM_WORLD.Recv(matrixBColumn, matrixBRowCount, MPI::DOUBLE, MASTER_RANK, COLUMN_TAG); // ispisi sta si dobio std::cout << rank << ": Racunam element [" << currentRowIndex << "," << currentColumnIndex << "]" << std::endl; writeArray(rank, matrixARow, matrixARowCount); writeArray(rank, matrixBColumn, matrixBColumnCount); // izračunaj šta treba i pošalji nazad double matrixCElement = multiplyRowAndColumn(matrixARow, matrixBColumn, matrixBRowCount); MPI::COMM_WORLD.Send(&matrixCElement, 1, MPI::DOUBLE, MASTER_RANK, RESULT_TAG); MPI::COMM_WORLD.Send(¤tRowIndex, 1, MPI::UNSIGNED, MASTER_RANK, ROW_INDEX_TAG); MPI::COMM_WORLD.Send(¤tColumnIndex, 1, MPI::UNSIGNED, MASTER_RANK, COLUMN_INDEX_TAG); } } } rowType.Free(); columnType.Free(); // odjava iz MPI sveta MPI::Finalize(); return 0; }