FOR-LOOP的简单MPI并行化(Simple MPI parallelization of FOR-LOOP)
我正在尝试将for循环迭代的简单MPI分发到“Worker”核心。 “Workers”应该完成这项工作(以格式double的形式获取大小为“nGenes”的向量,通常大小为6)并发送回结果(一个变量格式为double)。 但是,即使是第一步,我也遇到了麻烦,将消息从“Master”核心(0)传递给“Worker”核心(1,2,...,nWorkers)。 程序通过发送消息部分,但它堆叠在MPI_Recv(...)行所在的接收部分。 我看不出可能是什么问题。 请帮忙。
#include <iostream> #include <mpi.h> #include <math.h> #include <stdlib.h> /* srand, rand */ double fR(double a); void Calculate_MPI_Division_of_Work_Per_Core_Master0AndSlaves(int Number_of_Tasks, int NumberOfProcessors, int* START_on_core, int* END_on_core); int main(int argc, char* argv[]) { int nIndividuals = 10; int nGenes = 6; double** DATA; DATA = new double* [nIndividuals]; for (int ii=0; ii<nIndividuals; ii++) { DATA[ii] = new double [nGenes]; } for (int ii=0; ii<nIndividuals; ii++) { for (int jj=0; jj<nGenes; jj++) { DATA[ii][jj] = ii+jj; // random intialization of the elements. } } int MyRank, NumberOfProcessors; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &MyRank); MPI_Comm_size(MPI_COMM_WORLD, &NumberOfProcessors); int NumberOfWorkers = NumberOfProcessors - 1; int* Iteration_START_on_core; int* Iteration_STOP_on_core; Iteration_START_on_core = new int [NumberOfWorkers+1]; Iteration_STOP_on_core = new int [NumberOfWorkers+1]; Calculate_MPI_Division_of_Work_Per_Core_Master0AndSlaves(nIndividuals, NumberOfProcessors, Iteration_START_on_core, Iteration_STOP_on_core); if (MyRank == 0) { std::cout << " ======================== " << std::endl; std::cout << std::endl; std::cout << "NumberOfProcessors=" << NumberOfProcessors << std::endl; std::cout << "NumberOfWorkers= " << NumberOfWorkers << std::endl; std::cout << "NumberOfTasks= " << nIndividuals << std::endl; for (int ww=0; ww<=NumberOfWorkers; ww++) { std::cout << "(Core: " << ww << ") S:" << Iteration_START_on_core[ww] << " E:" << Iteration_STOP_on_core[ww] << " LoadOnCore: "; if (ww==0) { std::cout << 0 ; } else { std::cout << Iteration_STOP_on_core[ww] - Iteration_START_on_core[ww] +1; } std::cout << std::endl; } std::cout << std::endl; std::cout << " ======================== " << std::endl; }/// End_If(MyRank==0) if (MyRank == 0) { std::cout << "Start Sending...." << std::endl ; double* sendbuff; sendbuff = new double [nGenes]; for (int cc=1; cc<=NumberOfWorkers; cc++) { for (int jj=Iteration_START_on_core[cc]; jj<=Iteration_STOP_on_core[cc]; jj++) { for (int gg=0; gg<nGenes; gg++) { sendbuff[gg] = DATA[jj][gg]; } std::cout << std::endl << "SEND to Core " << cc << ": Start=" << Iteration_START_on_core[cc] << ", End=" << Iteration_STOP_on_core[cc] << ". Taks#: " << jj << " -- DATA: "; MPI_Send(&sendbuff, nGenes, MPI_DOUBLE, cc, 0, MPI_COMM_WORLD); for (int mm=0; mm<nGenes; mm++) { std::cout << DATA[jj][mm] << " | "; } } } std::cout << std::endl; delete[] sendbuff; std::cout << std::endl << "Finish sending." << std::endl ; } else { std::cout << std::endl << "...Worker Cores..." << std::endl ; for (int cc=1; cc<=NumberOfWorkers; cc++) { if (MyRank == cc) { MPI_Status status; double* receivebuff; receivebuff = new double [nGenes]; //std::cout << "Start Receiving on Core " << cc << ". FROM job: " << Iteration_START_on_core[cc] << " TO job: " << Iteration_STOP_on_core[cc] << "." << std::endl ; for (int kk=Iteration_START_on_core[cc]; kk<=Iteration_STOP_on_core[cc]; kk++) { MPI_Recv(&receivebuff, nGenes, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); std::cout << std::endl << "RECEIVE on Core: " << cc << ". From Core " << 0 << ": Start=" << Iteration_START_on_core[cc] << ", End=" << Iteration_STOP_on_core[cc] << ". Work on task: " << kk << "."; std::cout << " | "; for (int aa=0; aa<nGenes; aa++) { std::cout << receivebuff[aa] << " | "; } std::cout << std::endl; } delete [] receivebuff; std::cout << std::endl << "Finish receiving on core " << cc << "." << std::endl ; } } } for (int ii=1; ii<nIndividuals; ii++) { delete[] DATA[ii]; } delete[] DATA; if (MyRank==0) std::cout << std::endl << "Prepare to MPI_Finalize ... " << std::endl ; MPI_Finalize(); if (MyRank==0) std::cout << std::endl << "... Completed MPI_Finalize. " << std::endl ; ///###################################################################################################### return 0; } /// END MAIN PROGRAM ///=========================================================================================================================== /// /// Function: MPI Division of Work per Core. void Calculate_MPI_Division_of_Work_Per_Core_Master0AndSlaves(int Number_of_Tasks, int NumberOfProcessors, int* START_on_core, int* END_on_core) { int NuberOfWorkers = NumberOfProcessors-1; int integer_Num_Tasks_Per_Worker = floor(Number_of_Tasks/NuberOfWorkers); int reminder_Num_Taska_Per_Worker = Number_of_Tasks - integer_Num_Tasks_Per_Worker*NuberOfWorkers; START_on_core[0] = -1; END_on_core[0] = -1; //std::cout << std::endl << "F: integer_Num_Tasks_Per_Worker = " << integer_Num_Tasks_Per_Worker << std::endl; //std::cout << "F: reminder_Num_Taska_Per_Worker = " << reminder_Num_Taska_Per_Worker << std::endl; if (reminder_Num_Taska_Per_Worker==0) { START_on_core[1] = 0; END_on_core[1] = START_on_core[1] + integer_Num_Tasks_Per_Worker - 1; for (int iProcess=2; iProcess<NumberOfProcessors; iProcess++) { START_on_core[iProcess] = START_on_core[iProcess-1] + integer_Num_Tasks_Per_Worker; END_on_core[iProcess] = END_on_core[iProcess-1] + integer_Num_Tasks_Per_Worker; } } else { START_on_core[1] = 0; END_on_core[1] = START_on_core[1] + integer_Num_Tasks_Per_Worker - 1 + 1; for (int iProcess=2; iProcess<reminder_Num_Taska_Per_Worker+1; iProcess++) { START_on_core[iProcess] = START_on_core[iProcess-1] + integer_Num_Tasks_Per_Worker+1; END_on_core[iProcess] = END_on_core[iProcess-1] + integer_Num_Tasks_Per_Worker+1; } for (int iProcess=reminder_Num_Taska_Per_Worker+1; iProcess<NumberOfProcessors; iProcess++) { START_on_core[iProcess] = END_on_core[iProcess-1] +1; END_on_core[iProcess] = START_on_core[iProcess] +integer_Num_Tasks_Per_Worker-1; } } // }
I am trying to do simple MPI distribution of for-loop iterations to “Worker” cores. The “Workers” should do the job (take vector of size “nGenes” in format double, typically ~6 in size) and send back the result (one variable in format double). But, I have trouble even with the first step, passing messages from “Master” core (0) to “Worker” cores (1,2,…,nWorkers). The program goes through the send messages part, but it’s stacked in the receiving part where the line with MPI_Recv(…) is. I can't see what might be the problem. Please help.
#include <iostream> #include <mpi.h> #include <math.h> #include <stdlib.h> /* srand, rand */ double fR(double a); void Calculate_MPI_Division_of_Work_Per_Core_Master0AndSlaves(int Number_of_Tasks, int NumberOfProcessors, int* START_on_core, int* END_on_core); int main(int argc, char* argv[]) { int nIndividuals = 10; int nGenes = 6; double** DATA; DATA = new double* [nIndividuals]; for (int ii=0; ii<nIndividuals; ii++) { DATA[ii] = new double [nGenes]; } for (int ii=0; ii<nIndividuals; ii++) { for (int jj=0; jj<nGenes; jj++) { DATA[ii][jj] = ii+jj; // random intialization of the elements. } } int MyRank, NumberOfProcessors; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &MyRank); MPI_Comm_size(MPI_COMM_WORLD, &NumberOfProcessors); int NumberOfWorkers = NumberOfProcessors - 1; int* Iteration_START_on_core; int* Iteration_STOP_on_core; Iteration_START_on_core = new int [NumberOfWorkers+1]; Iteration_STOP_on_core = new int [NumberOfWorkers+1]; Calculate_MPI_Division_of_Work_Per_Core_Master0AndSlaves(nIndividuals, NumberOfProcessors, Iteration_START_on_core, Iteration_STOP_on_core); if (MyRank == 0) { std::cout << " ======================== " << std::endl; std::cout << std::endl; std::cout << "NumberOfProcessors=" << NumberOfProcessors << std::endl; std::cout << "NumberOfWorkers= " << NumberOfWorkers << std::endl; std::cout << "NumberOfTasks= " << nIndividuals << std::endl; for (int ww=0; ww<=NumberOfWorkers; ww++) { std::cout << "(Core: " << ww << ") S:" << Iteration_START_on_core[ww] << " E:" << Iteration_STOP_on_core[ww] << " LoadOnCore: "; if (ww==0) { std::cout << 0 ; } else { std::cout << Iteration_STOP_on_core[ww] - Iteration_START_on_core[ww] +1; } std::cout << std::endl; } std::cout << std::endl; std::cout << " ======================== " << std::endl; }/// End_If(MyRank==0) if (MyRank == 0) { std::cout << "Start Sending...." << std::endl ; double* sendbuff; sendbuff = new double [nGenes]; for (int cc=1; cc<=NumberOfWorkers; cc++) { for (int jj=Iteration_START_on_core[cc]; jj<=Iteration_STOP_on_core[cc]; jj++) { for (int gg=0; gg<nGenes; gg++) { sendbuff[gg] = DATA[jj][gg]; } std::cout << std::endl << "SEND to Core " << cc << ": Start=" << Iteration_START_on_core[cc] << ", End=" << Iteration_STOP_on_core[cc] << ". Taks#: " << jj << " -- DATA: "; MPI_Send(&sendbuff, nGenes, MPI_DOUBLE, cc, 0, MPI_COMM_WORLD); for (int mm=0; mm<nGenes; mm++) { std::cout << DATA[jj][mm] << " | "; } } } std::cout << std::endl; delete[] sendbuff; std::cout << std::endl << "Finish sending." << std::endl ; } else { std::cout << std::endl << "...Worker Cores..." << std::endl ; for (int cc=1; cc<=NumberOfWorkers; cc++) { if (MyRank == cc) { MPI_Status status; double* receivebuff; receivebuff = new double [nGenes]; //std::cout << "Start Receiving on Core " << cc << ". FROM job: " << Iteration_START_on_core[cc] << " TO job: " << Iteration_STOP_on_core[cc] << "." << std::endl ; for (int kk=Iteration_START_on_core[cc]; kk<=Iteration_STOP_on_core[cc]; kk++) { MPI_Recv(&receivebuff, nGenes, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); std::cout << std::endl << "RECEIVE on Core: " << cc << ". From Core " << 0 << ": Start=" << Iteration_START_on_core[cc] << ", End=" << Iteration_STOP_on_core[cc] << ". Work on task: " << kk << "."; std::cout << " | "; for (int aa=0; aa<nGenes; aa++) { std::cout << receivebuff[aa] << " | "; } std::cout << std::endl; } delete [] receivebuff; std::cout << std::endl << "Finish receiving on core " << cc << "." << std::endl ; } } } for (int ii=1; ii<nIndividuals; ii++) { delete[] DATA[ii]; } delete[] DATA; if (MyRank==0) std::cout << std::endl << "Prepare to MPI_Finalize ... " << std::endl ; MPI_Finalize(); if (MyRank==0) std::cout << std::endl << "... Completed MPI_Finalize. " << std::endl ; ///###################################################################################################### return 0; } /// END MAIN PROGRAM ///=========================================================================================================================== /// /// Function: MPI Division of Work per Core. void Calculate_MPI_Division_of_Work_Per_Core_Master0AndSlaves(int Number_of_Tasks, int NumberOfProcessors, int* START_on_core, int* END_on_core) { int NuberOfWorkers = NumberOfProcessors-1; int integer_Num_Tasks_Per_Worker = floor(Number_of_Tasks/NuberOfWorkers); int reminder_Num_Taska_Per_Worker = Number_of_Tasks - integer_Num_Tasks_Per_Worker*NuberOfWorkers; START_on_core[0] = -1; END_on_core[0] = -1; //std::cout << std::endl << "F: integer_Num_Tasks_Per_Worker = " << integer_Num_Tasks_Per_Worker << std::endl; //std::cout << "F: reminder_Num_Taska_Per_Worker = " << reminder_Num_Taska_Per_Worker << std::endl; if (reminder_Num_Taska_Per_Worker==0) { START_on_core[1] = 0; END_on_core[1] = START_on_core[1] + integer_Num_Tasks_Per_Worker - 1; for (int iProcess=2; iProcess<NumberOfProcessors; iProcess++) { START_on_core[iProcess] = START_on_core[iProcess-1] + integer_Num_Tasks_Per_Worker; END_on_core[iProcess] = END_on_core[iProcess-1] + integer_Num_Tasks_Per_Worker; } } else { START_on_core[1] = 0; END_on_core[1] = START_on_core[1] + integer_Num_Tasks_Per_Worker - 1 + 1; for (int iProcess=2; iProcess<reminder_Num_Taska_Per_Worker+1; iProcess++) { START_on_core[iProcess] = START_on_core[iProcess-1] + integer_Num_Tasks_Per_Worker+1; END_on_core[iProcess] = END_on_core[iProcess-1] + integer_Num_Tasks_Per_Worker+1; } for (int iProcess=reminder_Num_Taska_Per_Worker+1; iProcess<NumberOfProcessors; iProcess++) { START_on_core[iProcess] = END_on_core[iProcess-1] +1; END_on_core[iProcess] = START_on_core[iProcess] +integer_Num_Tasks_Per_Worker-1; } } // }
原文:https://stackoverflow.com/questions/46457075
最满意答案
正如我所看到的,您已经在使用Twig帮手,这是正确的选择。
options('{"sort_by":"name:asc","is_startpage":false}')
您可以直接使用此语法使用filter_by参数:
options('{"sort_by":"name:asc","filter_by[component]":"reference"}')
这个语法也是可能的:
getStories('starts_with', 1, 10, 'name:ASC', options('{"filter_by":{"component":"reference"}}'))
这将被我们的PHP客户端库直接映射到API调用。 这些请求也可以缓存在Silex Boilerplate中,无需额外的工作。
As I can see you're already using the options Twig helper which is the right way to go.
options('{"sort_by":"name:asc","is_startpage":false}')
you can use the filter_by parameter directly using this syntax:
options('{"sort_by":"name:asc","filter_by[component]":"reference"}')
Also this syntax would be possible:
getStories('starts_with', 1, 10, 'name:ASC', options('{"filter_by":{"component":"reference"}}'))
this will be mapped directly for the API call by our PHP Client Library. Those requests are also cached in your Silex Boilerplate without any extra effort.
相关问答
更多-
ASP.NET - 使用多个不同的参数调用和处理Web API(ASP.NET - Call and handle Web API with multiple different parameters)[2022-03-28]
WebApi不支持以这种方式传递多个参数,只需创建一个dto / model类并将该类从body传递给该方法。 public class Foo { public Byte[] s {get;set;} public string first {get;set;} public string second {get;set;} } WepApi控制器: public HttpResponseMessage Register([FromBody] Foo foo) { // ... -
另一种选择是使用批量选择/更新: declare type t_rid_arr is table of rowid index by pls_integer; type t_row_status_arr is table of eba_cm_checklist_rows.row_status%type index by pls_integer; type t_percent_complete_arr is table of number index by pls_integer; l_ri ...
-
UserId应该在查询字符串中: var paymentServiceResponse = paymentServicePostClient .PostAsJsonAsync("api/billpayment?userId=" + userId.ToString(), payData) .Result; UserId should be in query string: var paymentS ...
-
在单个函数中包含sqlalchemy的filter和filter_by函数(Wrap sqlalchemy's filter and filter_by functions in a single function)[2022-07-04]
我认为你可以根据参数是否作为关键字给出来做到这一点: def get(*args, **kwargs): if kwargs: return query.filter_by(**kwargs) else: return query.filter(*args) I think you can do it based on whether the arguments are given as keywords or not: def get(*args, **k ... -
基于PHP的广告轮播脚本(包括点击/印象监控等)(PHP Based Advert Rotation Script (incl. click/impression monitoring etc))[2020-12-23]
最有可能的是,你正在寻找OpenX (以前称为OpenAds,之前称为phpAdsNew)。 Most likely, you're looking for OpenX (previously known as OpenAds, and phpAdsNew before that). -
AFHTTPRequestOperationManager *manager = [AFHTTPRequestOperationManager manager]; NSDictionary *parameters = @{@"foo": @"bar", @"key_param2":@"obj_param2", @"key_param3":@"obj_param3",@"key_paramN":@"obj_paramN"}; [manager POST:@"http://example.com/resourc ...
-
如何使用包含多个参数的Storyblok-API调用。(How do I make an Storyblok-API call with multiple parameters incl. “filter_by”?)[2023-07-24]
正如我所看到的,您已经在使用Twig帮手,这是正确的选择。 options('{"sort_by":"name:asc","is_startpage":false}') 您可以直接使用此语法使用filter_by参数: options('{"sort_by":"name:asc","filter_by[component]":"reference"}') 这个语法也是可能的: getStories('starts_with', 1, 10, 'name:ASC', options('{"filter_b ... -
JNI调用api:获取具有多个参数的方法的方法id(JNI invocation api: get method id for method with multiple parameters)[2022-07-05]
签名可能是错误的。 尝试以下签名: (Ljava/io/File;Lorg/glassfish/api/deployment/DeployCommandParameters;)Ljava/lang/String; 它对应于以下Java方法: String deploy(File f, DeployCommandParameters p); The signature is likely wrong. Try the following signature: (Ljava/io/File;Lorg/glas ... -
IE8不支持ajax文件上传,所以你必须填充它。 一个简单的方法是提交包含文件的表单,并将结果加载到iframe中 $('#iframe').on('load', function(){ var $body = $ ...
-
您可以使用下面的正则表达式并使用空替换字符串: \(.* 工作演示 $re = "/\\(.*/"; $str = "Paris New York (Manhattan)\n\nParis is OK. I only want the city name. So, New York (Manhattan) should be New York.\n"; $result = preg_replace($re, "", $str); 检查下面的替换面板: You can use a regex lik ...