首页 \ 问答 \ FOR-LOOP的简单MPI并行化(Simple MPI parallelization of FOR-LOOP)

FOR-LOOP的简单MPI并行化(Simple MPI parallelization of FOR-LOOP)

我正在尝试将for循环迭代的简单MPI分发到“Worker”核心。 “Workers”应该完成这项工作(以格式double的形式获取大小为“nGenes”的向量,通常大小为6)并发送回结果(一个变量格式为double)。 但是,即使是第一步,我也遇到了麻烦,将消息从“Master”核心(0)传递给“Worker”核心(1,2,...,nWorkers)。 程序通过发送消息部分,但它堆叠在MPI_Recv(...)行所在的接收部分。 我看不出可能是什么问题。 请帮忙。

#include <iostream>
#include <mpi.h>
#include <math.h>
#include <stdlib.h>     /* srand, rand */

double fR(double a);
void Calculate_MPI_Division_of_Work_Per_Core_Master0AndSlaves(int Number_of_Tasks, int NumberOfProcessors, int* START_on_core, int* END_on_core);
int main(int argc, char* argv[])
{
int nIndividuals = 10;
int nGenes = 6;

double** DATA;
DATA = new double* [nIndividuals];
for (int ii=0; ii<nIndividuals; ii++)
{
    DATA[ii] = new double [nGenes];
}
for (int ii=0; ii<nIndividuals; ii++)
{
    for (int jj=0; jj<nGenes; jj++)
    {
        DATA[ii][jj] = ii+jj; // random intialization of the elements.
    }
}

int MyRank, NumberOfProcessors;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &MyRank);
MPI_Comm_size(MPI_COMM_WORLD, &NumberOfProcessors);
int NumberOfWorkers = NumberOfProcessors - 1;

int* Iteration_START_on_core;
int* Iteration_STOP_on_core;
Iteration_START_on_core = new int [NumberOfWorkers+1];
Iteration_STOP_on_core  = new int [NumberOfWorkers+1];

Calculate_MPI_Division_of_Work_Per_Core_Master0AndSlaves(nIndividuals, NumberOfProcessors, Iteration_START_on_core, Iteration_STOP_on_core);


if (MyRank == 0)
{
    std::cout << " ======================== " << std::endl;
    std::cout << std::endl;
    std::cout << "NumberOfProcessors=" << NumberOfProcessors << std::endl;
    std::cout << "NumberOfWorkers=   " << NumberOfWorkers    << std::endl;
    std::cout << "NumberOfTasks=     " << nIndividuals       << std::endl;
    for (int ww=0; ww<=NumberOfWorkers; ww++)
    {
        std::cout << "(Core: " << ww << ")  S:" << Iteration_START_on_core[ww] << "   E:" << Iteration_STOP_on_core[ww] << "   LoadOnCore: ";
        if (ww==0)
        {
            std::cout << 0 ;
        }
        else
        {
            std::cout << Iteration_STOP_on_core[ww] - Iteration_START_on_core[ww] +1;
        }
        std::cout << std::endl;
    }
    std::cout << std::endl;
    std::cout << " ======================== " << std::endl;
}/// End_If(MyRank==0)


if (MyRank == 0)
{
    std::cout << "Start Sending...." << std::endl ;

    double* sendbuff;
    sendbuff = new double [nGenes];

    for (int cc=1; cc<=NumberOfWorkers; cc++)
    {
        for (int jj=Iteration_START_on_core[cc]; jj<=Iteration_STOP_on_core[cc]; jj++)
        {


            for (int gg=0; gg<nGenes; gg++)
            {
                sendbuff[gg] = DATA[jj][gg];
            }
            std::cout << std::endl << "SEND to Core " << cc << ": Start=" << Iteration_START_on_core[cc] << ", End=" << Iteration_STOP_on_core[cc] << ". Taks#: " << jj << " -- DATA: ";
            MPI_Send(&sendbuff, nGenes, MPI_DOUBLE, cc, 0, MPI_COMM_WORLD);
            for (int mm=0; mm<nGenes; mm++)
            {
                std::cout << DATA[jj][mm] << " | ";
            }
        }
    }
    std::cout << std::endl;

    delete[] sendbuff;

    std::cout << std::endl << "Finish sending." << std::endl ;
}
else
{
    std::cout << std::endl << "...Worker Cores..." << std::endl ;


    for (int cc=1; cc<=NumberOfWorkers; cc++)
    {
        if (MyRank == cc)
        {
            MPI_Status status;

            double* receivebuff;
            receivebuff = new double [nGenes];
            //std::cout << "Start Receiving on Core " << cc << ".  FROM job: " << Iteration_START_on_core[cc] << "  TO job: " << Iteration_STOP_on_core[cc] << "." << std::endl ;
            for (int kk=Iteration_START_on_core[cc]; kk<=Iteration_STOP_on_core[cc]; kk++)
            {

                MPI_Recv(&receivebuff, nGenes, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
                std::cout << std::endl << "RECEIVE on Core: " << cc << ". From Core " << 0 << ": Start=" << Iteration_START_on_core[cc] << ", End=" << Iteration_STOP_on_core[cc] << ". Work on task: " << kk << ".";
                std::cout << " | ";
                for (int aa=0; aa<nGenes; aa++)
                {
                    std::cout << receivebuff[aa] << " | ";
                }
                std::cout << std::endl;
            }
            delete [] receivebuff;
            std::cout << std::endl << "Finish receiving on core " << cc << "." << std::endl ;
        }
    }
}

for (int ii=1; ii<nIndividuals; ii++)
{
    delete[] DATA[ii];
}
delete[] DATA;

if (MyRank==0) std::cout << std::endl << "Prepare to MPI_Finalize ... " << std::endl ;
   MPI_Finalize();
if (MyRank==0) std::cout << std::endl << "... Completed MPI_Finalize. " << std::endl ;
///######################################################################################################


return 0;
} /// END MAIN PROGRAM



///===========================================================================================================================
///
///  Function: MPI Division of Work per Core.

void Calculate_MPI_Division_of_Work_Per_Core_Master0AndSlaves(int Number_of_Tasks, int NumberOfProcessors, int* START_on_core, int* END_on_core)
{
    int NuberOfWorkers = NumberOfProcessors-1;
    int integer_Num_Tasks_Per_Worker = floor(Number_of_Tasks/NuberOfWorkers);
    int reminder_Num_Taska_Per_Worker = Number_of_Tasks - integer_Num_Tasks_Per_Worker*NuberOfWorkers;

    START_on_core[0] = -1;
    END_on_core[0]   = -1;
    //std::cout << std::endl << "F: integer_Num_Tasks_Per_Worker  = " << integer_Num_Tasks_Per_Worker  << std::endl;
    //std::cout              << "F: reminder_Num_Taska_Per_Worker = " << reminder_Num_Taska_Per_Worker << std::endl;

    if (reminder_Num_Taska_Per_Worker==0)
    {
        START_on_core[1] = 0;
        END_on_core[1]   = START_on_core[1] + integer_Num_Tasks_Per_Worker - 1;

        for (int iProcess=2; iProcess<NumberOfProcessors; iProcess++)
        {
            START_on_core[iProcess] =  START_on_core[iProcess-1] + integer_Num_Tasks_Per_Worker;
            END_on_core[iProcess]   =  END_on_core[iProcess-1]   + integer_Num_Tasks_Per_Worker;
        }
    }
    else
    {
        START_on_core[1] = 0;
        END_on_core[1]   = START_on_core[1] + integer_Num_Tasks_Per_Worker - 1 + 1;

        for (int iProcess=2; iProcess<reminder_Num_Taska_Per_Worker+1; iProcess++)
        {
            START_on_core[iProcess] =  START_on_core[iProcess-1] + integer_Num_Tasks_Per_Worker+1;
            END_on_core[iProcess]   =  END_on_core[iProcess-1]   + integer_Num_Tasks_Per_Worker+1;
        }
        for (int iProcess=reminder_Num_Taska_Per_Worker+1; iProcess<NumberOfProcessors; iProcess++)
        {
            START_on_core[iProcess] =  END_on_core[iProcess-1]  +1;
            END_on_core[iProcess]   =  START_on_core[iProcess]  +integer_Num_Tasks_Per_Worker-1;
        }
    }
//
}

I am trying to do simple MPI distribution of for-loop iterations to “Worker” cores. The “Workers” should do the job (take vector of size “nGenes” in format double, typically ~6 in size) and send back the result (one variable in format double). But, I have trouble even with the first step, passing messages from “Master” core (0) to “Worker” cores (1,2,…,nWorkers). The program goes through the send messages part, but it’s stacked in the receiving part where the line with MPI_Recv(…) is. I can't see what might be the problem. Please help.

#include <iostream>
#include <mpi.h>
#include <math.h>
#include <stdlib.h>     /* srand, rand */

double fR(double a);
void Calculate_MPI_Division_of_Work_Per_Core_Master0AndSlaves(int Number_of_Tasks, int NumberOfProcessors, int* START_on_core, int* END_on_core);
int main(int argc, char* argv[])
{
int nIndividuals = 10;
int nGenes = 6;

double** DATA;
DATA = new double* [nIndividuals];
for (int ii=0; ii<nIndividuals; ii++)
{
    DATA[ii] = new double [nGenes];
}
for (int ii=0; ii<nIndividuals; ii++)
{
    for (int jj=0; jj<nGenes; jj++)
    {
        DATA[ii][jj] = ii+jj; // random intialization of the elements.
    }
}

int MyRank, NumberOfProcessors;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &MyRank);
MPI_Comm_size(MPI_COMM_WORLD, &NumberOfProcessors);
int NumberOfWorkers = NumberOfProcessors - 1;

int* Iteration_START_on_core;
int* Iteration_STOP_on_core;
Iteration_START_on_core = new int [NumberOfWorkers+1];
Iteration_STOP_on_core  = new int [NumberOfWorkers+1];

Calculate_MPI_Division_of_Work_Per_Core_Master0AndSlaves(nIndividuals, NumberOfProcessors, Iteration_START_on_core, Iteration_STOP_on_core);


if (MyRank == 0)
{
    std::cout << " ======================== " << std::endl;
    std::cout << std::endl;
    std::cout << "NumberOfProcessors=" << NumberOfProcessors << std::endl;
    std::cout << "NumberOfWorkers=   " << NumberOfWorkers    << std::endl;
    std::cout << "NumberOfTasks=     " << nIndividuals       << std::endl;
    for (int ww=0; ww<=NumberOfWorkers; ww++)
    {
        std::cout << "(Core: " << ww << ")  S:" << Iteration_START_on_core[ww] << "   E:" << Iteration_STOP_on_core[ww] << "   LoadOnCore: ";
        if (ww==0)
        {
            std::cout << 0 ;
        }
        else
        {
            std::cout << Iteration_STOP_on_core[ww] - Iteration_START_on_core[ww] +1;
        }
        std::cout << std::endl;
    }
    std::cout << std::endl;
    std::cout << " ======================== " << std::endl;
}/// End_If(MyRank==0)


if (MyRank == 0)
{
    std::cout << "Start Sending...." << std::endl ;

    double* sendbuff;
    sendbuff = new double [nGenes];

    for (int cc=1; cc<=NumberOfWorkers; cc++)
    {
        for (int jj=Iteration_START_on_core[cc]; jj<=Iteration_STOP_on_core[cc]; jj++)
        {


            for (int gg=0; gg<nGenes; gg++)
            {
                sendbuff[gg] = DATA[jj][gg];
            }
            std::cout << std::endl << "SEND to Core " << cc << ": Start=" << Iteration_START_on_core[cc] << ", End=" << Iteration_STOP_on_core[cc] << ". Taks#: " << jj << " -- DATA: ";
            MPI_Send(&sendbuff, nGenes, MPI_DOUBLE, cc, 0, MPI_COMM_WORLD);
            for (int mm=0; mm<nGenes; mm++)
            {
                std::cout << DATA[jj][mm] << " | ";
            }
        }
    }
    std::cout << std::endl;

    delete[] sendbuff;

    std::cout << std::endl << "Finish sending." << std::endl ;
}
else
{
    std::cout << std::endl << "...Worker Cores..." << std::endl ;


    for (int cc=1; cc<=NumberOfWorkers; cc++)
    {
        if (MyRank == cc)
        {
            MPI_Status status;

            double* receivebuff;
            receivebuff = new double [nGenes];
            //std::cout << "Start Receiving on Core " << cc << ".  FROM job: " << Iteration_START_on_core[cc] << "  TO job: " << Iteration_STOP_on_core[cc] << "." << std::endl ;
            for (int kk=Iteration_START_on_core[cc]; kk<=Iteration_STOP_on_core[cc]; kk++)
            {

                MPI_Recv(&receivebuff, nGenes, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
                std::cout << std::endl << "RECEIVE on Core: " << cc << ". From Core " << 0 << ": Start=" << Iteration_START_on_core[cc] << ", End=" << Iteration_STOP_on_core[cc] << ". Work on task: " << kk << ".";
                std::cout << " | ";
                for (int aa=0; aa<nGenes; aa++)
                {
                    std::cout << receivebuff[aa] << " | ";
                }
                std::cout << std::endl;
            }
            delete [] receivebuff;
            std::cout << std::endl << "Finish receiving on core " << cc << "." << std::endl ;
        }
    }
}

for (int ii=1; ii<nIndividuals; ii++)
{
    delete[] DATA[ii];
}
delete[] DATA;

if (MyRank==0) std::cout << std::endl << "Prepare to MPI_Finalize ... " << std::endl ;
   MPI_Finalize();
if (MyRank==0) std::cout << std::endl << "... Completed MPI_Finalize. " << std::endl ;
///######################################################################################################


return 0;
} /// END MAIN PROGRAM



///===========================================================================================================================
///
///  Function: MPI Division of Work per Core.

void Calculate_MPI_Division_of_Work_Per_Core_Master0AndSlaves(int Number_of_Tasks, int NumberOfProcessors, int* START_on_core, int* END_on_core)
{
    int NuberOfWorkers = NumberOfProcessors-1;
    int integer_Num_Tasks_Per_Worker = floor(Number_of_Tasks/NuberOfWorkers);
    int reminder_Num_Taska_Per_Worker = Number_of_Tasks - integer_Num_Tasks_Per_Worker*NuberOfWorkers;

    START_on_core[0] = -1;
    END_on_core[0]   = -1;
    //std::cout << std::endl << "F: integer_Num_Tasks_Per_Worker  = " << integer_Num_Tasks_Per_Worker  << std::endl;
    //std::cout              << "F: reminder_Num_Taska_Per_Worker = " << reminder_Num_Taska_Per_Worker << std::endl;

    if (reminder_Num_Taska_Per_Worker==0)
    {
        START_on_core[1] = 0;
        END_on_core[1]   = START_on_core[1] + integer_Num_Tasks_Per_Worker - 1;

        for (int iProcess=2; iProcess<NumberOfProcessors; iProcess++)
        {
            START_on_core[iProcess] =  START_on_core[iProcess-1] + integer_Num_Tasks_Per_Worker;
            END_on_core[iProcess]   =  END_on_core[iProcess-1]   + integer_Num_Tasks_Per_Worker;
        }
    }
    else
    {
        START_on_core[1] = 0;
        END_on_core[1]   = START_on_core[1] + integer_Num_Tasks_Per_Worker - 1 + 1;

        for (int iProcess=2; iProcess<reminder_Num_Taska_Per_Worker+1; iProcess++)
        {
            START_on_core[iProcess] =  START_on_core[iProcess-1] + integer_Num_Tasks_Per_Worker+1;
            END_on_core[iProcess]   =  END_on_core[iProcess-1]   + integer_Num_Tasks_Per_Worker+1;
        }
        for (int iProcess=reminder_Num_Taska_Per_Worker+1; iProcess<NumberOfProcessors; iProcess++)
        {
            START_on_core[iProcess] =  END_on_core[iProcess-1]  +1;
            END_on_core[iProcess]   =  START_on_core[iProcess]  +integer_Num_Tasks_Per_Worker-1;
        }
    }
//
}

原文:https://stackoverflow.com/questions/46457075
更新时间:2021-06-21 09:06

最满意答案

正如我所看到的,您已经在使用Twig帮手,这是正确的选择。

options('{"sort_by":"name:asc","is_startpage":false}')

您可以直接使用此语法使用filter_by参数:

options('{"sort_by":"name:asc","filter_by[component]":"reference"}')

这个语法也是可能的:

getStories('starts_with', 1, 10, 'name:ASC', options('{"filter_by":{"component":"reference"}}'))

这将被我们的PHP客户端库直接映射到API调用。 这些请求也可以缓存在Silex Boilerplate中,无需额外的工作。


As I can see you're already using the options Twig helper which is the right way to go.

options('{"sort_by":"name:asc","is_startpage":false}')

you can use the filter_by parameter directly using this syntax:

options('{"sort_by":"name:asc","filter_by[component]":"reference"}')

Also this syntax would be possible:

getStories('starts_with', 1, 10, 'name:ASC', options('{"filter_by":{"component":"reference"}}'))

this will be mapped directly for the API call by our PHP Client Library. Those requests are also cached in your Silex Boilerplate without any extra effort.

相关问答

更多

相关文章

更多

最新问答

更多
  • 如何在Laravel 5.2中使用paginate与关系?(How to use paginate with relationships in Laravel 5.2?)
  • linux的常用命令干什么用的
  • 由于有四个新控制器,Auth刀片是否有任何变化?(Are there any changes in Auth blades due to four new controllers?)
  • 如何交换返回集中的行?(How to swap rows in a return set?)
  • 在ios 7中的UITableView部分周围绘制边界线(draw borderline around UITableView section in ios 7)
  • 使用Boost.Spirit Qi和Lex时的空白队长(Whitespace skipper when using Boost.Spirit Qi and Lex)
  • Java中的不可变类(Immutable class in Java)
  • WordPress发布查询(WordPress post query)
  • 如何在关系数据库中存储与IPv6兼容的地址(How to store IPv6-compatible address in a relational database)
  • 是否可以检查对象值的条件并返回密钥?(Is it possible to check the condition of a value of an object and JUST return the key?)
  • GEP分段错误LLVM C ++ API(GEP segmentation fault LLVM C++ API)
  • 绑定属性设置器未被调用(Bound Property Setter not getting Called)
  • linux ubuntu14.04版没有那个文件或目录
  • 如何使用JSF EL表达式在param中迭代变量(How to iterate over variable in param using JSF EL expression)
  • 是否有可能在WPF中的一个单独的进程中隔离一些控件?(Is it possible to isolate some controls in a separate process in WPF?)
  • 使用Python 2.7的MSI安装的默认安装目录是什么?(What is the default installation directory with an MSI install of Python 2.7?)
  • 寻求多次出现的表达式(Seeking for more than one occurrence of an expression)
  • ckeditor config.protectedSource不适用于editor.insertHtml上的html元素属性(ckeditor config.protectedSource dont work for html element attributes on editor.insertHtml)
  • linux只知道文件名,不知道在哪个目录,怎么找到文件所在目录
  • Actionscript:检查字符串是否包含域或子域(Actionscript: check if string contains domain or subdomain)
  • 将CouchDB与AJAX一起使用是否安全?(Is it safe to use CouchDB with AJAX?)
  • 懒惰地初始化AutoMapper(Lazily initializing AutoMapper)
  • 使用hasclass为多个div与一个按钮问题(using hasclass for multiple divs with one button Problems)
  • Windows Phone 7:检查资源是否存在(Windows Phone 7: Check If Resource Exists)
  • 无法在新线程中从FREContext调用getActivity()?(Can't call getActivity() from FREContext in a new thread?)
  • 在Alpine上升级到postgres96(/ usr / bin / pg_dump:没有这样的文件或目录)(Upgrade to postgres96 on Alpine (/usr/bin/pg_dump: No such file or directory))
  • 如何按部门显示报告(How to display a report by Department wise)
  • Facebook墙贴在需要访问令牌密钥后无法正常工作(Facebook wall post not working after access token key required)
  • Javascript - 如何在不擦除输入的情况下更改标签的innerText(Javascript - how to change innerText of label while not wiping out the input)
  • WooCommerce / WordPress - 不显示具有特定标题的产品(WooCommerce/WordPress - Products with specific titles are not displayed)