changing blocking-recive to non-blocking (in boundaryProcessor) to improve MPI run

This commit is contained in:
HRN 2024-05-24 00:10:15 +03:30
parent a0c51e94e1
commit 5a25556b8a
3 changed files with 59 additions and 37 deletions

View File

@ -106,7 +106,7 @@ bool pFlow::MPI::processorBoundarySphereInteraction<cFM, gMM>::sphereSphereInter
return true; return true;
} }
if(iter % 1000 == 0u) if(iter % 100 == 0u)
{ {
pOutput<<"inter "<< inter_.totalTime()<<endl; pOutput<<"inter "<< inter_.totalTime()<<endl;
pOutput<<"send "<< send_.totalTime()<<endl<<endl;; pOutput<<"send "<< send_.totalTime()<<endl<<endl;;
@ -144,7 +144,7 @@ bool pFlow::MPI::processorBoundarySphereInteraction<cFM, gMM>::sphereSphereInter
return true; return true;
} }
if(iter % 1000 == 0u) if(iter % 100 == 0u)
{ {
pOutput<<"recive "<< recv_.totalTime()<<endl; pOutput<<"recive "<< recv_.totalTime()<<endl;
pOutput<<"add "<< add_.totalTime()<<endl<<endl; pOutput<<"add "<< add_.totalTime()<<endl<<endl;

View File

@ -65,43 +65,61 @@ pFlow::MPI::boundaryProcessor::boundaryProcessor(
} }
bool bool
pFlow::MPI::boundaryProcessor::beforeIteration(uint32 iterNum, real t, real dt) pFlow::MPI::boundaryProcessor::beforeIteration(
uint32 step,
uint32 iterNum,
real t,
real dt)
{ {
if(step == 1 )
thisNumPoints_ = size();
auto req = MPI_REQUEST_NULL;
MPI_Isend(
&thisNumPoints_,
1,
MPI_UNSIGNED,
neighborProcessorNo(),
thisBoundaryIndex(),
pFlowProcessors().localCommunicator(),
&req);
MPI_Recv(
&neighborProcNumPoints_,
1,
MPI_UNSIGNED,
neighborProcessorNo(),
mirrorBoundaryIndex(),
pFlowProcessors().localCommunicator(),
MPI_STATUS_IGNORE
);
MPI_Request_free(&req);
anyList varList;
message msg;
varList.emplaceBack(msg.addAndName(message::BNDR_PROC_SIZE_CHANGED), neighborProcNumPoints_);
if( !notify(iterNum, t, dt, msg, varList) )
{ {
fatalErrorInFunction; thisNumPoints_ = size();
return false;
uint32 oldNeighborProcNumPoints = neighborProcNumPoints_;
MPI_Isend(
&thisNumPoints_,
1,
MPI_UNSIGNED,
neighborProcessorNo(),
thisBoundaryIndex(),
pFlowProcessors().localCommunicator(),
&numPointsRequest0_);
MPI_Irecv(
&neighborProcNumPoints_,
1,
MPI_UNSIGNED,
neighborProcessorNo(),
mirrorBoundaryIndex(),
pFlowProcessors().localCommunicator(),
&numPointsRequest_
);
} }
else if(step == 2 )
{
if(numPointsRequest_ != RequestNull)
{
MPI_Wait(&numPointsRequest_, MPI_STATUS_IGNORE);
if(numPointsRequest0_!= RequestNull)
{
MPI_Request_free(&numPointsRequest0_);
}
}
anyList varList;
message msg;
varList.emplaceBack(msg.addAndName(message::BNDR_PROC_SIZE_CHANGED), neighborProcNumPoints_);
if( !notify(iterNum, t, dt, msg, varList) )
{
fatalErrorInFunction;
return false;
}
}
return true; return true;
} }

View File

@ -42,6 +42,10 @@ namespace pFlow::MPI
uint32 thisNumPoints_ = 0; uint32 thisNumPoints_ = 0;
Request numPointsRequest_ = RequestNull;
Request numPointsRequest0_ = RequestNull;
realx3Vector_D neighborProcPoints_; realx3Vector_D neighborProcPoints_;
dataSender<realx3> sender_; dataSender<realx3> sender_;
@ -87,7 +91,7 @@ namespace pFlow::MPI
boundaryProcessor, boundaryProcessor,
dictionary); dictionary);
bool beforeIteration(uint32 iterNum, real t, real dt) override; bool beforeIteration(uint32 step, uint32 iterNum, real t, real dt) override;
bool iterate(uint32 iterNum, real t, real dt) override; bool iterate(uint32 iterNum, real t, real dt) override;