MPI developement zeroTime
This commit is contained in:
parent
d7e6292e41
commit
93617a6ee5
|
@ -0,0 +1,71 @@
|
|||
#include "processorAB2BoundaryIntegration.hpp"
|
||||
#include "AdamsBashforth2.hpp"
|
||||
#include "AB2Kernels.hpp"
|
||||
#include "boundaryConfigs.hpp"
|
||||
|
||||
pFlow::processorAB2BoundaryIntegration::processorAB2BoundaryIntegration(
|
||||
const boundaryBase &boundary,
|
||||
const pointStructure &pStruct,
|
||||
const word &method,
|
||||
integration& intgrtn
|
||||
)
|
||||
:
|
||||
boundaryIntegration(boundary, pStruct, method, intgrtn)
|
||||
{}
|
||||
|
||||
bool pFlow::processorAB2BoundaryIntegration::correct(
|
||||
real dt,
|
||||
const realx3PointField_D& y,
|
||||
const realx3PointField_D& dy
|
||||
)
|
||||
{
|
||||
|
||||
#ifndef BoundaryModel1
|
||||
if(this->isBoundaryMaster())
|
||||
{
|
||||
const uint32 thisIndex = thisBoundaryIndex();
|
||||
const auto& AB2 = static_cast<const AdamsBashforth2&>(Integration());
|
||||
const auto& dy1View = AB2.BoundaryField(thisIndex).neighborProcField().deviceView();
|
||||
const auto& dyView = dy.BoundaryField(thisIndex).neighborProcField().deviceView();
|
||||
const auto& yView = y.BoundaryField(thisIndex).neighborProcField().deviceView();
|
||||
const rangeU32 aRange(0u, dy1View.size());
|
||||
return AB2Kernels::intAllActive(
|
||||
"AB2Integration::correct."+this->boundaryName(),
|
||||
dt,
|
||||
aRange,
|
||||
yView,
|
||||
dyView,
|
||||
dy1View
|
||||
);
|
||||
}
|
||||
#endif //BoundaryModel1
|
||||
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool pFlow::processorAB2BoundaryIntegration::correctPStruct(real dt, const realx3PointField_D &vel)
|
||||
{
|
||||
|
||||
#ifndef BoundaryModel1
|
||||
if(this->isBoundaryMaster())
|
||||
{
|
||||
const uint32 thisIndex = thisBoundaryIndex();
|
||||
const auto& AB2 = static_cast<const AdamsBashforth2&>(Integration());
|
||||
const auto& dy1View = AB2.BoundaryField(thisIndex).neighborProcField().deviceView();
|
||||
const auto& velView = vel.BoundaryField(thisIndex).neighborProcField().deviceView();
|
||||
const auto& xposView = boundary().neighborProcPoints().deviceView();
|
||||
const rangeU32 aRange(0u, dy1View.size());
|
||||
return AB2Kernels::intAllActive(
|
||||
"AB2Integration::correctPStruct."+this->boundaryName(),
|
||||
dt,
|
||||
aRange,
|
||||
xposView,
|
||||
velView,
|
||||
dy1View
|
||||
);
|
||||
}
|
||||
#endif //BoundaryModel1
|
||||
|
||||
return true;
|
||||
}
|
|
@ -0,0 +1,51 @@
|
|||
|
||||
|
||||
#ifndef __processorAB2BoundaryIntegration_hpp__
|
||||
#define __processorAB2BoundaryIntegration_hpp__
|
||||
|
||||
#include "boundaryIntegration.hpp"
|
||||
|
||||
namespace pFlow
|
||||
{
|
||||
|
||||
class processorAB2BoundaryIntegration
|
||||
:
|
||||
public boundaryIntegration
|
||||
{
|
||||
public:
|
||||
|
||||
TypeInfo("boundaryIntegration<processor,AdamsBashforth2>");
|
||||
|
||||
processorAB2BoundaryIntegration(
|
||||
const boundaryBase& boundary,
|
||||
const pointStructure& pStruct,
|
||||
const word& method,
|
||||
integration& intgrtn
|
||||
);
|
||||
|
||||
~processorAB2BoundaryIntegration()override=default;
|
||||
|
||||
|
||||
bool correct(
|
||||
real dt,
|
||||
const realx3PointField_D& y,
|
||||
const realx3PointField_D& dy)override;
|
||||
|
||||
|
||||
|
||||
bool correctPStruct(real dt, const realx3PointField_D& vel)override;
|
||||
|
||||
|
||||
add_vCtor(
|
||||
boundaryIntegration,
|
||||
processorAB2BoundaryIntegration,
|
||||
boundaryBase
|
||||
);
|
||||
|
||||
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
|
@ -99,7 +99,7 @@ bool pFlow::processorBoundaryContactSearch::broadSearch
|
|||
thisDiams,
|
||||
neighborProcPoints,
|
||||
neighborProcDiams,
|
||||
name()
|
||||
boundaryName()
|
||||
);
|
||||
//pOutput<<"ppSize "<< ppPairs.size()<<endl;
|
||||
return true;
|
||||
|
|
|
@ -9,6 +9,7 @@ template<typename ContactListType, typename ContactForceModel>
|
|||
inline
|
||||
void sphereSphereInteraction
|
||||
(
|
||||
const word& kernalName,
|
||||
real dt,
|
||||
const ContactListType& cntctList,
|
||||
const ContactForceModel& forceModel,
|
||||
|
@ -36,7 +37,7 @@ void sphereSphereInteraction
|
|||
uint32 lastItem = cntctList.loopCount();
|
||||
|
||||
Kokkos::parallel_for(
|
||||
"pFlow::MPI::processorBoundarySIKernels::sphereSphereInteraction",
|
||||
kernalName,
|
||||
deviceRPolicyDynamic(0,lastItem),
|
||||
LAMBDA_HD(uint32 n)
|
||||
{
|
||||
|
|
|
@ -32,11 +32,6 @@ pFlow::MPI::processorBoundarySphereInteraction<cFM, gMM>::processorBoundarySpher
|
|||
geomMotion
|
||||
),
|
||||
masterInteraction_(boundary.isBoundaryMaster())
|
||||
,
|
||||
inter_("inter"),
|
||||
send_("send"),
|
||||
recv_("recv"),
|
||||
add_("add")
|
||||
{
|
||||
if(masterInteraction_)
|
||||
{
|
||||
|
@ -46,6 +41,9 @@ pFlow::MPI::processorBoundarySphereInteraction<cFM, gMM>::processorBoundarySpher
|
|||
|
||||
}
|
||||
|
||||
|
||||
#ifdef BoundaryModel1
|
||||
|
||||
template <typename cFM, typename gMM>
|
||||
bool pFlow::MPI::processorBoundarySphereInteraction<cFM, gMM>::sphereSphereInteraction
|
||||
(
|
||||
|
@ -74,8 +72,8 @@ bool pFlow::MPI::processorBoundarySphereInteraction<cFM, gMM>::sphereSphereInter
|
|||
if(step == 2 )
|
||||
{
|
||||
iter++;
|
||||
inter_.start();
|
||||
pFlow::MPI::processorBoundarySIKernels::sphereSphereInteraction(
|
||||
"ppBoundaryInteraction."+this->boundaryName(),
|
||||
dt,
|
||||
this->ppPairs(),
|
||||
cfModel,
|
||||
|
@ -94,64 +92,165 @@ bool pFlow::MPI::processorBoundarySphereInteraction<cFM, gMM>::sphereSphereInter
|
|||
cfBndry.neighborProcField().deviceViewAll(),
|
||||
ctBndry.neighborProcField().deviceViewAll()
|
||||
);
|
||||
inter_.end();
|
||||
|
||||
return true;
|
||||
}
|
||||
else if(step == 3 )
|
||||
{
|
||||
send_.start();
|
||||
cfBndry.sendBackData();
|
||||
ctBndry.sendBackData();
|
||||
send_.end();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
if(iter % 100 == 0u)
|
||||
{
|
||||
pOutput<<"inter "<< inter_.totalTime()<<endl;
|
||||
pOutput<<"send "<< send_.totalTime()<<endl<<endl;;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
const auto & sphPar = this->sphParticles();
|
||||
uint32 thisIndex = this->boundary().thisBoundaryIndex();
|
||||
const auto& cfBndry = static_cast<const processorBoundaryField<realx3>&>(
|
||||
sphPar.contactForce().BoundaryField(thisIndex));
|
||||
const auto& ctBndry = static_cast<const processorBoundaryField<realx3>&> (
|
||||
sphPar.contactTorque().BoundaryField(thisIndex));
|
||||
if(step==1)
|
||||
if(step == 1 )
|
||||
{
|
||||
recv_.start();
|
||||
const auto & sphPar = this->sphParticles();
|
||||
uint32 thisIndex = this->boundary().thisBoundaryIndex();
|
||||
const auto& cfBndry = static_cast<const processorBoundaryField<realx3>&>(
|
||||
sphPar.contactForce().BoundaryField(thisIndex));
|
||||
const auto& ctBndry = static_cast<const processorBoundaryField<realx3>&> (
|
||||
sphPar.contactTorque().BoundaryField(thisIndex));
|
||||
|
||||
cfBndry.recieveBackData();
|
||||
ctBndry.recieveBackData();
|
||||
recv_.end();
|
||||
|
||||
return false;
|
||||
}
|
||||
else if(step == 2)
|
||||
else if(step == 11)
|
||||
{
|
||||
iter++;
|
||||
return true;
|
||||
}
|
||||
else if(step == 3)
|
||||
{
|
||||
add_.start();
|
||||
const auto & sphPar = this->sphParticles();
|
||||
uint32 thisIndex = this->boundary().thisBoundaryIndex();
|
||||
const auto& cfBndry = static_cast<const processorBoundaryField<realx3>&>(
|
||||
sphPar.contactForce().BoundaryField(thisIndex));
|
||||
const auto& ctBndry = static_cast<const processorBoundaryField<realx3>&> (
|
||||
sphPar.contactTorque().BoundaryField(thisIndex));
|
||||
|
||||
cfBndry.addBufferToInternalField();
|
||||
ctBndry.addBufferToInternalField();
|
||||
add_.end();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
if(iter % 100 == 0u)
|
||||
{
|
||||
pOutput<<"recive "<< recv_.totalTime()<<endl;
|
||||
pOutput<<"add "<< add_.totalTime()<<endl<<endl;
|
||||
}
|
||||
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
}
|
||||
#else
|
||||
|
||||
template <typename cFM, typename gMM>
|
||||
bool pFlow::MPI::processorBoundarySphereInteraction<cFM, gMM>::sphereSphereInteraction
|
||||
(
|
||||
real dt,
|
||||
const ContactForceModel &cfModel,
|
||||
uint32 step
|
||||
)
|
||||
{
|
||||
|
||||
// master processor calculates the contact force/torque and sends data back to the
|
||||
// neighbor processor (slave processor).
|
||||
// slave processor recieves the data and adds the data to the internalField
|
||||
if(masterInteraction_)
|
||||
{
|
||||
if(step==1)return true;
|
||||
|
||||
const auto & sphPar = this->sphParticles();
|
||||
uint32 thisIndex = this->boundary().thisBoundaryIndex();
|
||||
|
||||
const auto& cfBndry = static_cast<const processorBoundaryField<realx3>&> (
|
||||
sphPar.contactForce().BoundaryField(thisIndex));
|
||||
|
||||
const auto& ctBndry = static_cast<const processorBoundaryField<realx3>&> (
|
||||
sphPar.contactTorque().BoundaryField(thisIndex));
|
||||
|
||||
if(step == 2 )
|
||||
{
|
||||
|
||||
pFlow::MPI::processorBoundarySIKernels::sphereSphereInteraction(
|
||||
"ppBoundaryInteraction."+this->boundaryName(),
|
||||
dt,
|
||||
this->ppPairs(),
|
||||
cfModel,
|
||||
this->boundary().thisPoints(),
|
||||
sphPar.diameter().deviceViewAll(),
|
||||
sphPar.propertyId().deviceViewAll(),
|
||||
sphPar.velocity().deviceViewAll(),
|
||||
sphPar.rVelocity().deviceViewAll(),
|
||||
sphPar.contactForce().deviceViewAll(),
|
||||
sphPar.contactTorque().deviceViewAll(),
|
||||
this->boundary().neighborProcPoints().deviceViewAll(),
|
||||
sphPar.diameter().BoundaryField(thisIndex).neighborProcField().deviceViewAll(),
|
||||
sphPar.propertyId().BoundaryField(thisIndex).neighborProcField().deviceViewAll(),
|
||||
sphPar.velocity().BoundaryField(thisIndex).neighborProcField().deviceViewAll(),
|
||||
sphPar.rVelocity().BoundaryField(thisIndex).neighborProcField().deviceViewAll(),
|
||||
cfBndry.neighborProcField().deviceViewAll(),
|
||||
ctBndry.neighborProcField().deviceViewAll()
|
||||
);
|
||||
|
||||
return true;
|
||||
}
|
||||
else if(step == 3 )
|
||||
{
|
||||
cfBndry.sendBackData();
|
||||
ctBndry.sendBackData();
|
||||
return true;
|
||||
}
|
||||
else if(step == 11 )
|
||||
{
|
||||
cfBndry.updateBoundaryFromSlave();
|
||||
ctBndry.updateBoundaryFromSlave();
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
if(step == 1 )
|
||||
{
|
||||
const auto & sphPar = this->sphParticles();
|
||||
uint32 thisIndex = this->boundary().thisBoundaryIndex();
|
||||
const auto& cfBndry = static_cast<const processorBoundaryField<realx3>&>(
|
||||
sphPar.contactForce().BoundaryField(thisIndex));
|
||||
const auto& ctBndry = static_cast<const processorBoundaryField<realx3>&> (
|
||||
sphPar.contactTorque().BoundaryField(thisIndex));
|
||||
|
||||
cfBndry.recieveBackData();
|
||||
ctBndry.recieveBackData();
|
||||
|
||||
return false;
|
||||
}
|
||||
else if(step == 11)
|
||||
{
|
||||
const auto & sphPar = this->sphParticles();
|
||||
uint32 thisIndex = this->boundary().thisBoundaryIndex();
|
||||
const auto& cfBndry = static_cast<const processorBoundaryField<realx3>&>(
|
||||
sphPar.contactForce().BoundaryField(thisIndex));
|
||||
const auto& ctBndry = static_cast<const processorBoundaryField<realx3>&> (
|
||||
sphPar.contactTorque().BoundaryField(thisIndex));
|
||||
|
||||
cfBndry.addBufferToInternalField();
|
||||
cfBndry.updateBoundaryToMaster();
|
||||
|
||||
ctBndry.addBufferToInternalField();
|
||||
ctBndry.updateBoundaryToMaster();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif
|
|
@ -22,6 +22,7 @@ Licence:
|
|||
|
||||
#include "boundarySphereInteraction.hpp"
|
||||
#include "processorBoundaryField.hpp"
|
||||
#include "boundaryProcessor.hpp"
|
||||
|
||||
namespace pFlow::MPI
|
||||
{
|
||||
|
@ -57,12 +58,6 @@ private:
|
|||
|
||||
bool masterInteraction_;
|
||||
|
||||
Timer inter_;
|
||||
Timer send_;
|
||||
Timer recv_;
|
||||
Timer add_;
|
||||
uint32 iter=0;
|
||||
|
||||
public:
|
||||
|
||||
TypeInfoTemplate22("boundarySphereInteraction", "processor",ContactForceModel, MotionModel);
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
#include "processorBoundarySphereParticles.hpp"
|
||||
#include "sphereParticles.hpp"
|
||||
#include "boundaryProcessor.hpp"
|
||||
|
||||
pFlow::processorBoundarySphereParticles::processorBoundarySphereParticles(
|
||||
const boundaryBase &boundary,
|
||||
sphereParticles &prtcls
|
||||
)
|
||||
:
|
||||
boundarySphereParticles(boundary, prtcls)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
bool pFlow::processorBoundarySphereParticles::acceleration(const timeInfo &ti, const realx3& g)
|
||||
{
|
||||
|
||||
|
||||
#ifndef BoundaryModel1
|
||||
|
||||
|
||||
if(isBoundaryMaster())
|
||||
{
|
||||
auto thisIndex = thisBoundaryIndex();
|
||||
auto mass = Particles().mass().BoundaryField(thisIndex).neighborProcField().deviceView();
|
||||
auto I = Particles().I().BoundaryField(thisIndex).neighborProcField().deviceView();
|
||||
auto cf = Particles().contactForce().BoundaryField(thisIndex).neighborProcField().deviceView();
|
||||
auto ct = Particles().contactTorque().BoundaryField(thisIndex).neighborProcField().deviceView();
|
||||
auto acc = Particles().accelertion().BoundaryField(thisIndex).neighborProcField().deviceView();
|
||||
auto rAcc = Particles().rAcceleration().BoundaryField(thisIndex).neighborProcField().deviceView();
|
||||
|
||||
Kokkos::parallel_for(
|
||||
"processorBoundary::acceleration."+this->boundaryName(),
|
||||
deviceRPolicyStatic(0,mass.size()),
|
||||
LAMBDA_HD(uint32 i){
|
||||
acc[i] = cf[i]/mass[i] + g;
|
||||
rAcc[i] = ct[i]/I[i];
|
||||
});
|
||||
Kokkos::fence();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
return true;
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
#ifndef __processorBoundarySphereParticles_hpp__
|
||||
#define __processorBoundarySphereParticles_hpp__
|
||||
|
||||
#include "boundarySphereParticles.hpp"
|
||||
|
||||
namespace pFlow
|
||||
{
|
||||
|
||||
class processorBoundarySphereParticles
|
||||
:
|
||||
public boundarySphereParticles
|
||||
{
|
||||
|
||||
public:
|
||||
|
||||
/// type info
|
||||
TypeInfo("boundarySphereParticles<MPI,processor>");
|
||||
|
||||
processorBoundarySphereParticles(
|
||||
const boundaryBase &boundary,
|
||||
sphereParticles& prtcls
|
||||
);
|
||||
|
||||
add_vCtor(
|
||||
boundarySphereParticles,
|
||||
processorBoundarySphereParticles,
|
||||
boundaryBase
|
||||
);
|
||||
|
||||
bool acceleration(const timeInfo& ti, const realx3& g)override;
|
||||
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
|
||||
#endif
|
|
@ -17,7 +17,6 @@ Licence:
|
|||
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
|
||||
-----------------------------------------------------------------------------*/
|
||||
|
||||
#ifndef __mpiCommunication_H__
|
||||
#define __mpiCommunication_H__
|
||||
|
||||
|
@ -37,6 +36,8 @@ extern DataType realx4Type__;
|
|||
|
||||
extern DataType int32x3Type__;
|
||||
|
||||
extern DataType uint32x3Type__;
|
||||
|
||||
template<typename T>
|
||||
auto constexpr Type()
|
||||
{
|
||||
|
@ -190,6 +191,20 @@ auto constexpr sFactor<int32x3>()
|
|||
return 1;
|
||||
}
|
||||
|
||||
template<>
|
||||
inline
|
||||
auto Type<uint32x3>()
|
||||
{
|
||||
return uint32x3Type__;
|
||||
}
|
||||
|
||||
|
||||
template<>
|
||||
auto constexpr sFactor<uint32x3>()
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*inline
|
||||
auto createByteSequence(int sizeOfElement)
|
||||
{
|
||||
|
@ -211,6 +226,7 @@ auto TypeFree(DataType* type)
|
|||
return MPI_Type_free(type);
|
||||
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
inline auto getCount(Status* status, int& count)
|
||||
{
|
||||
|
@ -440,11 +456,6 @@ inline auto Wait(Request* request, Status* status)
|
|||
return MPI_Wait(request, status);
|
||||
}
|
||||
|
||||
inline auto typeFree(DataType& type)
|
||||
{
|
||||
return MPI_Type_free(&type);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -55,6 +55,8 @@ namespace pFlow::MPI
|
|||
inline const auto ErrOp = MPI_ERR_OP;
|
||||
|
||||
inline const auto SumOp = MPI_SUM;
|
||||
inline const auto MaxOp = MPI_MAX;
|
||||
inline const auto MinOp = MPI_MIN;
|
||||
|
||||
inline const size_t MaxNoProcessors = 2048;
|
||||
|
||||
|
|
|
@ -35,46 +35,25 @@ pFlow::MPI::MPISimulationDomain::MPISimulationDomain(systemControl& control)
|
|||
|
||||
bool pFlow::MPI::MPISimulationDomain::createBoundaryDicts()
|
||||
{
|
||||
|
||||
if(!prepareBoundaryDicts())
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
auto& boundaries = this->subDict("boundaries");
|
||||
|
||||
this->addDict("MPIBoundaries", boundaries);
|
||||
auto& mpiBoundaries = this->subDict("MPIBoundaries");
|
||||
|
||||
real neighborLength = boundaries.getVal<real>("neighborLength");
|
||||
auto boundaryExtntionLengthRatio = max(
|
||||
boundaries.getValOrSet<real>("boundaryExtntionLengthRatio", 0.1),
|
||||
0.0);
|
||||
auto updateIntercal = max(
|
||||
boundaries.getValOrSet<uint32>("updateInterval", 1u),
|
||||
1u);
|
||||
|
||||
auto& thisBoundaries = this->subDict(thisBoundariesDictName());
|
||||
|
||||
|
||||
auto neighbors = findPlaneNeighbors();
|
||||
|
||||
for(uint32 i=0; i<sizeOfBoundaries(); i++)
|
||||
{
|
||||
word bName = bundaryName(i);
|
||||
if( !boundaries.containsDictionay(bName) )
|
||||
{
|
||||
fatalErrorInFunction<<"dictionary "<< bName<<
|
||||
"does not exist in "<< boundaries.globalName()<<endl;
|
||||
return false;
|
||||
}
|
||||
auto& bDict = mpiBoundaries.subDict(bName);
|
||||
|
||||
if(!bDict.addOrKeep("neighborLength", neighborLength))
|
||||
{
|
||||
fatalErrorInFunction<<"error in adding neighborLength to "<< bName <<
|
||||
"in dictionary "<< boundaries.globalName()<<endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
if(!bDict.addOrReplace("updateInterval", updateIntercal))
|
||||
{
|
||||
fatalErrorInFunction<<"error in adding updateIntercal to "<< bName <<
|
||||
"in dictionary "<< boundaries.globalName()<<endl;
|
||||
}
|
||||
|
||||
bDict.addOrReplace("boundaryExtntionLengthRatio", boundaryExtntionLengthRatio);
|
||||
|
||||
word bName = bundaryName(i);
|
||||
auto& bDict = thisBoundaries.subDict(bName);
|
||||
|
||||
if( thisDomainActive_ )
|
||||
{
|
||||
|
@ -276,11 +255,6 @@ std::vector<int> pFlow::MPI::MPISimulationDomain::findPlaneNeighbors() const
|
|||
return neighbors;
|
||||
}
|
||||
|
||||
const pFlow::dictionary &
|
||||
pFlow::MPI::MPISimulationDomain::thisBoundaryDict() const
|
||||
{
|
||||
return this->subDict("MPIBoundaries");
|
||||
}
|
||||
|
||||
bool pFlow::MPI::MPISimulationDomain::initialUpdateDomains(span<realx3> pointPos)
|
||||
{
|
||||
|
|
|
@ -72,8 +72,6 @@ public:
|
|||
systemControl
|
||||
);
|
||||
|
||||
const dictionary& thisBoundaryDict() const final;
|
||||
|
||||
/// @brief
|
||||
/// @param pointPos
|
||||
/// @return
|
||||
|
|
|
@ -25,13 +25,17 @@ pFlow::MPI::processorBoundaryField<T, MemorySpace>::checkDataRecieved() const
|
|||
{
|
||||
if (!dataRecieved_)
|
||||
{
|
||||
uint32 nRecv = reciever_.waitBufferForUse();
|
||||
uint32 nRecv = neighborProcField_.waitBufferForUse();
|
||||
dataRecieved_ = true;
|
||||
if (nRecv != this->neighborProcSize())
|
||||
{
|
||||
fatalErrorInFunction;
|
||||
fatalErrorInFunction<<
|
||||
"number of recived data is "<< nRecv <<" and expected number is "<<
|
||||
this->neighborProcSize()<< " in "<<this->name() <<endl;
|
||||
fatalExit;
|
||||
}
|
||||
|
||||
//pOutput<<"field data "<< this->name()<<" has recieved with size "<< nRecv<<endl;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -42,6 +46,11 @@ pFlow::MPI::processorBoundaryField<T, MemorySpace>::updateBoundary(
|
|||
DataDirection direction
|
||||
)
|
||||
{
|
||||
#ifndef BoundaryModel1
|
||||
if(!this->boundary().performBoundarytUpdate())
|
||||
return true;
|
||||
#endif
|
||||
|
||||
if (step == 1)
|
||||
{
|
||||
// Isend
|
||||
|
@ -49,9 +58,11 @@ pFlow::MPI::processorBoundaryField<T, MemorySpace>::updateBoundary(
|
|||
( this->isBoundaryMaster() && direction == DataDirection::MasterToSlave) ||
|
||||
(!this->isBoundaryMaster() && direction == DataDirection::SlaveToMaster))
|
||||
{
|
||||
sender_.sendData(pFlowProcessors(), this->thisField());
|
||||
thisFieldInNeighbor_.sendData(pFlowProcessors(), this->thisField(), this->name());
|
||||
dataRecieved_ = false;
|
||||
//pOutput<<"request for boundary update "<< this->name()<<" direction "<< (int)direction<<endl;
|
||||
}
|
||||
|
||||
}
|
||||
else if (step == 2)
|
||||
{
|
||||
|
@ -60,8 +71,9 @@ pFlow::MPI::processorBoundaryField<T, MemorySpace>::updateBoundary(
|
|||
(!this->isBoundaryMaster() && direction == DataDirection::MasterToSlave) ||
|
||||
( this->isBoundaryMaster() && direction == DataDirection::SlaveToMaster))
|
||||
{
|
||||
reciever_.recieveData(pFlowProcessors(), this->neighborProcSize());
|
||||
neighborProcField_.recieveData(pFlowProcessors(), this->neighborProcSize(), this->name());
|
||||
dataRecieved_ = false;
|
||||
//pOutput<<"request for boundary update "<< this->name()<<" direction "<< (int)direction<<endl;
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -80,13 +92,13 @@ pFlow::MPI::processorBoundaryField<T, MemorySpace>::processorBoundaryField(
|
|||
InternalFieldType& internal
|
||||
)
|
||||
: BoundaryFieldType(boundary, pStruct, internal),
|
||||
sender_(
|
||||
groupNames("sendBufferField", boundary.name()),
|
||||
thisFieldInNeighbor_(
|
||||
groupNames("sendBuffer", this->name()),
|
||||
boundary.neighborProcessorNo(),
|
||||
boundary.thisBoundaryIndex()
|
||||
),
|
||||
reciever_(
|
||||
groupNames("neighborProcField", boundary.name()),
|
||||
neighborProcField_(
|
||||
groupNames("recieveBuffer", boundary.name()),
|
||||
boundary.neighborProcessorNo(),
|
||||
boundary.mirrorBoundaryIndex()
|
||||
)
|
||||
|
@ -102,7 +114,7 @@ typename pFlow::MPI::processorBoundaryField<T, MemorySpace>::ProcVectorType&
|
|||
pFlow::MPI::processorBoundaryField<T, MemorySpace>::neighborProcField()
|
||||
{
|
||||
checkDataRecieved();
|
||||
return reciever_.buffer();
|
||||
return neighborProcField_.buffer();
|
||||
}
|
||||
|
||||
template<class T, class MemorySpace>
|
||||
|
@ -111,7 +123,7 @@ const typename pFlow::MPI::processorBoundaryField<T, MemorySpace>::
|
|||
pFlow::MPI::processorBoundaryField<T, MemorySpace>::neighborProcField() const
|
||||
{
|
||||
checkDataRecieved();
|
||||
return reciever_.buffer();
|
||||
return neighborProcField_.buffer();
|
||||
}
|
||||
|
||||
template<class T, class MemorySpace>
|
||||
|
@ -127,7 +139,7 @@ bool pFlow::MPI::processorBoundaryField<T, MemorySpace>::hearChanges(
|
|||
if(msg.equivalentTo(message::BNDR_PROC_SIZE_CHANGED))
|
||||
{
|
||||
auto newProcSize = varList.getObject<uint32>("size");
|
||||
reciever_.resize(newProcSize);
|
||||
neighborProcField_.resize(newProcSize);
|
||||
}
|
||||
|
||||
if(msg.equivalentTo(message::BNDR_PROCTRANSFER_SEND))
|
||||
|
@ -144,7 +156,7 @@ bool pFlow::MPI::processorBoundaryField<T, MemorySpace>::hearChanges(
|
|||
this->internal().deviceViewAll()
|
||||
);
|
||||
|
||||
sender_.sendData(pFlowProcessors(),transferData);
|
||||
thisFieldInNeighbor_.sendData(pFlowProcessors(),transferData);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -154,7 +166,7 @@ bool pFlow::MPI::processorBoundaryField<T, MemorySpace>::hearChanges(
|
|||
this->internal().deviceViewAll()
|
||||
);
|
||||
|
||||
sender_.sendData(pFlowProcessors(),transferData);
|
||||
thisFieldInNeighbor_.sendData(pFlowProcessors(),transferData);
|
||||
}
|
||||
|
||||
|
||||
|
@ -164,12 +176,12 @@ bool pFlow::MPI::processorBoundaryField<T, MemorySpace>::hearChanges(
|
|||
uint32 numRecieved = varList.getObject<uint32>(
|
||||
message::eventName(message::BNDR_PROCTRANSFER_RECIEVE)
|
||||
);
|
||||
reciever_.recieveData(pFlowProcessors(), numRecieved);
|
||||
neighborProcField_.recieveData(pFlowProcessors(), numRecieved);
|
||||
}
|
||||
else if(msg.equivalentTo(message::BNDR_PROCTRANSFER_WAITFILL))
|
||||
{
|
||||
|
||||
uint32 numRecieved = reciever_.waitBufferForUse();
|
||||
uint32 numRecieved = neighborProcField_.waitBufferForUse();
|
||||
|
||||
if(msg.equivalentTo(message::CAP_CHANGED))
|
||||
{
|
||||
|
@ -188,7 +200,7 @@ bool pFlow::MPI::processorBoundaryField<T, MemorySpace>::hearChanges(
|
|||
const auto& indices = varList.getObject<uint32IndexContainer>(
|
||||
message::eventName(message::ITEM_INSERT));
|
||||
|
||||
this->internal().field().insertSetElement(indices, reciever_.buffer().deviceView());
|
||||
this->internal().field().insertSetElement(indices, neighborProcField_.buffer().deviceView());
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -198,14 +210,14 @@ bool pFlow::MPI::processorBoundaryField<T, MemorySpace>::hearChanges(
|
|||
template <class T, class MemorySpace>
|
||||
void pFlow::MPI::processorBoundaryField<T, MemorySpace>::sendBackData() const
|
||||
{
|
||||
reciever_.sendBackData(pFlowProcessors());
|
||||
neighborProcField_.sendBackData(pFlowProcessors());
|
||||
dataRecieved_ = false;
|
||||
}
|
||||
|
||||
template <class T, class MemorySpace>
|
||||
void pFlow::MPI::processorBoundaryField<T, MemorySpace>::recieveBackData() const
|
||||
{
|
||||
sender_.recieveBackData(pFlowProcessors(), this->size());
|
||||
thisFieldInNeighbor_.recieveBackData(pFlowProcessors(), this->size());
|
||||
}
|
||||
|
||||
template <class T, class MemorySpace>
|
||||
|
@ -216,16 +228,17 @@ void pFlow::MPI::processorBoundaryField<T, MemorySpace>::addBufferToInternalFiel
|
|||
Kokkos::Schedule<Kokkos::Static>,
|
||||
Kokkos::IndexType<pFlow::uint32>>;
|
||||
|
||||
sender_.waitBufferForUse();
|
||||
//pOutput<<"waiting for buffer to be recived in addBufferToInternalField "<<this->name()<<endl;
|
||||
thisFieldInNeighbor_.waitBufferForUse();
|
||||
|
||||
const auto& buffView = sender_.buffer().deviceViewAll();
|
||||
const auto& buffView = thisFieldInNeighbor_.buffer().deviceViewAll();
|
||||
const auto& field = this->internal().deviceViewAll();
|
||||
|
||||
if constexpr( isDeviceAccessible<execution_space> )
|
||||
{
|
||||
const auto& indices = this->indexList().deviceViewAll();
|
||||
Kokkos::parallel_for(
|
||||
"dataSender::recieveBackData",
|
||||
"recieveBackData::"+this->name(),
|
||||
RPolicy(0,this->size()),
|
||||
LAMBDA_HD(uint32 i)
|
||||
{
|
||||
|
@ -238,7 +251,7 @@ void pFlow::MPI::processorBoundaryField<T, MemorySpace>::addBufferToInternalFiel
|
|||
{
|
||||
const auto& indices = this->boundary().indexListHost().deviceViewAll();
|
||||
Kokkos::parallel_for(
|
||||
"dataSender::recieveBackData",
|
||||
"recieveBackData::"+this->name(),
|
||||
RPolicy(0,this->size()),
|
||||
LAMBDA_HD(uint32 i)
|
||||
{
|
||||
|
@ -247,4 +260,25 @@ void pFlow::MPI::processorBoundaryField<T, MemorySpace>::addBufferToInternalFiel
|
|||
);
|
||||
Kokkos::fence();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <class T, class MemorySpace>
|
||||
void pFlow::MPI::processorBoundaryField<T, MemorySpace>::updateBoundaryToMaster()const
|
||||
{
|
||||
if (!this->isBoundaryMaster() )
|
||||
{
|
||||
thisFieldInNeighbor_.sendData(pFlowProcessors(), this->thisField(), this->name());
|
||||
dataRecieved_ = false;
|
||||
}
|
||||
}
|
||||
|
||||
template <class T, class MemorySpace>
|
||||
void pFlow::MPI::processorBoundaryField<T, MemorySpace>::updateBoundaryFromSlave()const
|
||||
{
|
||||
if( this->isBoundaryMaster() )
|
||||
{
|
||||
neighborProcField_.recieveData(pFlowProcessors(), this->neighborProcSize(), this->name());
|
||||
dataRecieved_ = false;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ Licence:
|
|||
#include "boundaryField.hpp"
|
||||
#include "dataSender.hpp"
|
||||
#include "dataReciever.hpp"
|
||||
#include "boundaryProcessor.hpp"
|
||||
|
||||
namespace pFlow::MPI
|
||||
{
|
||||
|
@ -50,9 +51,9 @@ public:
|
|||
|
||||
private:
|
||||
|
||||
dataSender<T, MemorySpace> sender_;
|
||||
mutable dataSender<T, MemorySpace> thisFieldInNeighbor_;
|
||||
|
||||
dataReciever<T, MemorySpace> reciever_;
|
||||
mutable dataReciever<T, MemorySpace> neighborProcField_;
|
||||
|
||||
mutable bool dataRecieved_ = true;
|
||||
|
||||
|
@ -86,7 +87,7 @@ public:
|
|||
|
||||
void fill(const T& val)override
|
||||
{
|
||||
reciever_.fill(val);
|
||||
neighborProcField_.fill(val);
|
||||
}
|
||||
|
||||
bool hearChanges(
|
||||
|
@ -103,6 +104,10 @@ public:
|
|||
|
||||
void addBufferToInternalField()const;
|
||||
|
||||
void updateBoundaryToMaster()const;
|
||||
|
||||
void updateBoundaryFromSlave()const;
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -33,11 +33,13 @@ pFlow::MPI::boundaryProcessor::checkDataRecieved() const
|
|||
{
|
||||
if (!dataRecieved_)
|
||||
{
|
||||
uint32 nRecv = reciever_.waitBufferForUse();
|
||||
uint32 nRecv = neighborProcPoints_.waitBufferForUse();
|
||||
dataRecieved_ = true;
|
||||
if (nRecv != neighborProcSize())
|
||||
{
|
||||
fatalErrorInFunction;
|
||||
fatalErrorInFunction<<"In boundary "<<this->name()<<
|
||||
" ,number of recieved data is "<< nRecv<<
|
||||
" and neighborProcSize is "<<neighborProcSize()<<endl;
|
||||
fatalExit;
|
||||
}
|
||||
}
|
||||
|
@ -51,12 +53,12 @@ pFlow::MPI::boundaryProcessor::boundaryProcessor(
|
|||
uint32 thisIndex
|
||||
)
|
||||
: boundaryBase(dict, bplane, internal, bndrs, thisIndex),
|
||||
sender_(
|
||||
thisPointsInNeighbor_(
|
||||
groupNames("sendBuffer", name()),
|
||||
neighborProcessorNo(),
|
||||
thisBoundaryIndex()
|
||||
),
|
||||
reciever_(
|
||||
neighborProcPoints_(
|
||||
groupNames("neighborProcPoints", name()),
|
||||
neighborProcessorNo(),
|
||||
mirrorBoundaryIndex()
|
||||
|
@ -67,57 +69,94 @@ pFlow::MPI::boundaryProcessor::boundaryProcessor(
|
|||
bool
|
||||
pFlow::MPI::boundaryProcessor::beforeIteration(
|
||||
uint32 step,
|
||||
uint32 iterNum,
|
||||
real t,
|
||||
real dt)
|
||||
const timeInfo& ti,
|
||||
bool updateIter,
|
||||
bool iterBeforeUpdate ,
|
||||
bool& callAgain
|
||||
)
|
||||
{
|
||||
if(step == 1 )
|
||||
if(step == 1)
|
||||
{
|
||||
thisNumPoints_ = size();
|
||||
|
||||
uint32 oldNeighborProcNumPoints = neighborProcNumPoints_;
|
||||
|
||||
MPI_Isend(
|
||||
&thisNumPoints_,
|
||||
1,
|
||||
MPI_UNSIGNED,
|
||||
neighborProcessorNo(),
|
||||
thisBoundaryIndex(),
|
||||
pFlowProcessors().localCommunicator(),
|
||||
&numPointsRequest0_);
|
||||
|
||||
MPI_Irecv(
|
||||
&neighborProcNumPoints_,
|
||||
1,
|
||||
MPI_UNSIGNED,
|
||||
neighborProcessorNo(),
|
||||
mirrorBoundaryIndex(),
|
||||
pFlowProcessors().localCommunicator(),
|
||||
&numPointsRequest_
|
||||
);
|
||||
|
||||
boundaryBase::beforeIteration(step, ti, updateIter, iterBeforeUpdate, callAgain);
|
||||
callAgain = true;
|
||||
}
|
||||
else if(step == 2 )
|
||||
{
|
||||
|
||||
#ifdef BoundaryModel1
|
||||
callAgain = true;
|
||||
#else
|
||||
if(!performBoundarytUpdate())
|
||||
{
|
||||
callAgain = false;
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
thisNumPoints_ = size();
|
||||
|
||||
MPI_Isend(
|
||||
&thisNumPoints_,
|
||||
1,
|
||||
MPI_UNSIGNED,
|
||||
neighborProcessorNo(),
|
||||
thisBoundaryIndex(),
|
||||
pFlowProcessors().localCommunicator(),
|
||||
&numPointsRequest0_);
|
||||
|
||||
MPI_Irecv(
|
||||
&neighborProcNumPoints_,
|
||||
1,
|
||||
MPI_UNSIGNED,
|
||||
neighborProcessorNo(),
|
||||
mirrorBoundaryIndex(),
|
||||
pFlowProcessors().localCommunicator(),
|
||||
&numPointsRequest_
|
||||
);
|
||||
|
||||
}
|
||||
else if(step == 3 )
|
||||
{
|
||||
callAgain = true;
|
||||
|
||||
if(numPointsRequest_ != RequestNull)
|
||||
{
|
||||
MPI_Wait(&numPointsRequest_, MPI_STATUS_IGNORE);
|
||||
if(numPointsRequest0_!= RequestNull)
|
||||
{
|
||||
MPI_Request_free(&numPointsRequest0_);
|
||||
MPI_Wait(&numPointsRequest0_, MPI_STATUS_IGNORE);
|
||||
}
|
||||
}
|
||||
|
||||
// Size has not been changed. Notification is not required.
|
||||
if(neighborProcNumPoints_ == neighborProcPoints_.size()) return true;
|
||||
|
||||
anyList varList;
|
||||
message msg;
|
||||
|
||||
varList.emplaceBack(msg.addAndName(message::BNDR_PROC_SIZE_CHANGED), neighborProcNumPoints_);
|
||||
|
||||
if( !notify(iterNum, t, dt, msg, varList) )
|
||||
if( !notify(ti.iter(), ti.t(), ti.dt(), msg, varList) )
|
||||
{
|
||||
fatalErrorInFunction;
|
||||
callAgain = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
else if(step == 4)
|
||||
{
|
||||
dataRecieved_ = false;
|
||||
if ( !isBoundaryMaster())
|
||||
{
|
||||
thisPointsInNeighbor_.sendData(pFlowProcessors(), thisPoints(),"positions");
|
||||
}
|
||||
else if (isBoundaryMaster())
|
||||
{
|
||||
neighborProcPoints_.recieveData(pFlowProcessors(), neighborProcSize(), "positions");
|
||||
}
|
||||
|
||||
callAgain = false;
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -133,62 +172,46 @@ pFlow::realx3Vector_D&
|
|||
pFlow::MPI::boundaryProcessor::neighborProcPoints()
|
||||
{
|
||||
checkDataRecieved();
|
||||
return reciever_.buffer();
|
||||
return neighborProcPoints_.buffer();
|
||||
}
|
||||
|
||||
const pFlow::realx3Vector_D&
|
||||
pFlow::MPI::boundaryProcessor::neighborProcPoints() const
|
||||
{
|
||||
checkDataRecieved();
|
||||
return reciever_.buffer();
|
||||
return neighborProcPoints_.buffer();
|
||||
}
|
||||
|
||||
bool
|
||||
pFlow::MPI::boundaryProcessor::updataBoundaryData(int step)
|
||||
{
|
||||
if (step == 1)
|
||||
{
|
||||
sender_.sendData(pFlowProcessors(), thisPoints());
|
||||
dataRecieved_ = false;
|
||||
}
|
||||
else if (step == 2)
|
||||
{
|
||||
reciever_.recieveData(pFlowProcessors(), neighborProcSize());
|
||||
dataRecieved_ = false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool pFlow::MPI::boundaryProcessor::transferData(uint32 iter, int step)
|
||||
bool pFlow::MPI::boundaryProcessor::transferData(
|
||||
uint32 iter,
|
||||
int step,
|
||||
bool& callAgain
|
||||
)
|
||||
{
|
||||
if(!boundaryListUpdate(iter))return false;
|
||||
|
||||
if(step==1)
|
||||
if( !iterBeforeBoundaryUpdate() )
|
||||
{
|
||||
uint32 s = size();
|
||||
uint32Vector_D transferFlags("transferFlags",s+1, s+1, RESERVE());
|
||||
transferFlags.fill(0u);
|
||||
|
||||
const auto& transferD = transferFlags.deviceViewAll();
|
||||
deviceScatteredFieldAccess<realx3> points = thisPoints();
|
||||
auto p = boundaryPlane().infPlane();
|
||||
|
||||
numToTransfer_ = 0;
|
||||
callAgain = false;
|
||||
return true;
|
||||
}
|
||||
|
||||
if(step == 1)
|
||||
{
|
||||
|
||||
Kokkos::parallel_reduce
|
||||
(
|
||||
"boundaryProcessor::afterIteration",
|
||||
deviceRPolicyStatic(0,s),
|
||||
boundaryProcessorKernels::markNegative(
|
||||
boundaryPlane().infPlane(),
|
||||
transferFlags.deviceViewAll(),
|
||||
thisPoints()
|
||||
),
|
||||
numToTransfer_
|
||||
);
|
||||
uint32Vector_D transferFlags("transferFlags"+this->name());
|
||||
|
||||
numToTransfer_ = markInNegativeSide(
|
||||
"transferData::markToTransfer"+this->name(),
|
||||
transferFlags);
|
||||
|
||||
uint32Vector_D keepIndices("keepIndices");
|
||||
|
||||
if(numToTransfer_ != 0u)
|
||||
{
|
||||
pFlow::boundaryBaseKernels::createRemoveKeepIndices
|
||||
|
@ -200,6 +223,7 @@ bool pFlow::MPI::boundaryProcessor::transferData(uint32 iter, int step)
|
|||
keepIndices,
|
||||
false
|
||||
);
|
||||
|
||||
// delete transfer point from this processor
|
||||
if( !setRemoveKeepIndices(transferIndices_, keepIndices))
|
||||
{
|
||||
|
@ -212,60 +236,80 @@ bool pFlow::MPI::boundaryProcessor::transferData(uint32 iter, int step)
|
|||
{
|
||||
transferIndices_.clear();
|
||||
}
|
||||
|
||||
auto req = RequestNull;
|
||||
|
||||
CheckMPI( Isend(
|
||||
numToTransfer_,
|
||||
neighborProcessorNo(),
|
||||
thisBoundaryIndex(),
|
||||
pFlowProcessors().localCommunicator(),
|
||||
&req), true );
|
||||
//pOutput<<"sent "<< numToTransfer_<<endl;
|
||||
CheckMPI(recv(
|
||||
&numTransferRequest_), true );
|
||||
|
||||
CheckMPI(Irecv(
|
||||
numToRecieve_,
|
||||
neighborProcessorNo(),
|
||||
mirrorBoundaryIndex(),
|
||||
pFlowProcessors().localCommunicator(),
|
||||
StatusesIgnore), true);
|
||||
&numRecieveRequest_), true);
|
||||
|
||||
//pOutput<<"recieved "<<numToRecieve_<<endl;
|
||||
|
||||
MPI_Request_free(&req);
|
||||
callAgain = true;
|
||||
return true;
|
||||
}
|
||||
else if(step ==2 )
|
||||
else if(step ==2) // to transferData to neighbor
|
||||
{
|
||||
if( transferIndices_.empty() )return true;
|
||||
if(numTransferRequest_!= RequestNull)
|
||||
{
|
||||
Wait(&numTransferRequest_, StatusIgnore);
|
||||
}
|
||||
|
||||
if( numToTransfer_ == 0u)
|
||||
{
|
||||
callAgain = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
pointFieldAccessType transferPoints(
|
||||
transferIndices_.size(),
|
||||
transferIndices_.deviceViewAll(),
|
||||
internal().pointPositionDevice());
|
||||
transferIndices_.size(),
|
||||
transferIndices_.deviceViewAll(),
|
||||
internal().pointPositionDevice()
|
||||
);
|
||||
|
||||
sender_.sendData(pFlowProcessors(), transferPoints);
|
||||
// this buffer is used temporarily
|
||||
thisPointsInNeighbor_.sendData(pFlowProcessors(), transferPoints);
|
||||
|
||||
message msg;
|
||||
anyList varList;
|
||||
varList.emplaceBack(
|
||||
msg.addAndName(message::BNDR_PROCTRANSFER_SEND),
|
||||
transferIndices_);
|
||||
|
||||
if(!notify(
|
||||
internal().time().currentIter(),
|
||||
internal().time().currentTime(),
|
||||
internal().time().dt(),
|
||||
msg,
|
||||
varList))
|
||||
const auto ti = internal().time().TimeInfo();
|
||||
|
||||
if(!notify(ti, msg, varList)
|
||||
)
|
||||
{
|
||||
fatalErrorInFunction;
|
||||
callAgain = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
else if(step == 3)
|
||||
else if(step == 3) // to recieve data
|
||||
{
|
||||
if(numToRecieve_ == 0u) return false;
|
||||
reciever_.recieveData(pFlowProcessors(), numToRecieve_);
|
||||
|
||||
if(numRecieveRequest_ != RequestNull)
|
||||
{
|
||||
Wait(&numRecieveRequest_, StatusIgnore);
|
||||
}
|
||||
|
||||
if(numToRecieve_ == 0u)
|
||||
{
|
||||
callAgain = false;
|
||||
return true;
|
||||
}
|
||||
|
||||
// this buffer is being used temporarily
|
||||
neighborProcPoints_.recieveData(pFlowProcessors(), numToRecieve_);
|
||||
|
||||
message msg;
|
||||
anyList varList;
|
||||
|
@ -273,65 +317,70 @@ bool pFlow::MPI::boundaryProcessor::transferData(uint32 iter, int step)
|
|||
msg.addAndName(message::BNDR_PROCTRANSFER_RECIEVE),
|
||||
numToRecieve_);
|
||||
|
||||
if(!notify(
|
||||
internal().time().currentIter(),
|
||||
internal().time().currentTime(),
|
||||
internal().time().dt(),
|
||||
msg,
|
||||
varList))
|
||||
const auto ti = internal().time().TimeInfo();
|
||||
if(!notify( ti, msg, varList))
|
||||
{
|
||||
fatalErrorInFunction;
|
||||
callAgain = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
callAgain = true;
|
||||
return true;
|
||||
}
|
||||
else if(step == 4)
|
||||
else if(step == 4) // to insert data
|
||||
{
|
||||
if(numToRecieve_ == 0u) return false;
|
||||
reciever_.waitBufferForUse();
|
||||
if(numToRecieve_ == 0u)
|
||||
{
|
||||
callAgain = false;
|
||||
return true;
|
||||
}
|
||||
|
||||
// points should be inserted first
|
||||
message msg(message::BNDR_PROCTRANSFER_WAITFILL);
|
||||
anyList varList;
|
||||
|
||||
internal().insertPointsOnly(reciever_.buffer(), msg, varList);
|
||||
neighborProcPoints_.waitBufferForUse();
|
||||
internal().insertPointsOnly(neighborProcPoints_.buffer(), msg, varList);
|
||||
|
||||
const auto& indices = varList.getObject<uint32IndexContainer>(message::eventName(message::ITEM_INSERT));
|
||||
|
||||
auto indView = deviceViewType1D<uint32>(indices.deviceView().data(), indices.deviceView().size());
|
||||
|
||||
uint32Vector_D newIndices("newIndices", indView);
|
||||
|
||||
if(! appendNewIndices(newIndices))
|
||||
{
|
||||
fatalErrorInFunction;
|
||||
callAgain = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
if(!notify(
|
||||
internal().time().currentIter(),
|
||||
internal().time().currentTime(),
|
||||
internal().time().dt(),
|
||||
msg,
|
||||
varList))
|
||||
const auto ti = internal().time().TimeInfo();
|
||||
if(!notify(ti, msg, varList))
|
||||
{
|
||||
fatalErrorInFunction;
|
||||
callAgain = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
return false;
|
||||
callAgain = false;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
callAgain = false;
|
||||
return true;
|
||||
|
||||
}
|
||||
|
||||
bool
|
||||
pFlow::MPI::boundaryProcessor::iterate(uint32 iterNum, real t, real dt)
|
||||
pFlow::MPI::boundaryProcessor::iterate(const timeInfo& ti)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
pFlow::MPI::boundaryProcessor::afterIteration(uint32 iterNum, real t, real dt)
|
||||
pFlow::MPI::boundaryProcessor::afterIteration(const timeInfo& ti)
|
||||
{
|
||||
|
||||
uint32 s = size();
|
||||
|
|
|
@ -25,6 +25,7 @@ Licence:
|
|||
#include "mpiTypes.hpp"
|
||||
#include "dataSender.hpp"
|
||||
#include "dataReciever.hpp"
|
||||
#include "boundaryConfigs.hpp"
|
||||
|
||||
namespace pFlow::MPI
|
||||
{
|
||||
|
@ -46,11 +47,9 @@ namespace pFlow::MPI
|
|||
|
||||
Request numPointsRequest0_ = RequestNull;
|
||||
|
||||
realx3Vector_D neighborProcPoints_;
|
||||
dataSender<realx3> thisPointsInNeighbor_;
|
||||
|
||||
dataSender<realx3> sender_;
|
||||
|
||||
dataReciever<realx3> reciever_;
|
||||
dataReciever<realx3> neighborProcPoints_;
|
||||
|
||||
mutable bool dataRecieved_ = true;
|
||||
|
||||
|
@ -60,6 +59,10 @@ namespace pFlow::MPI
|
|||
|
||||
uint32Vector_D transferIndices_{"transferIndices"};
|
||||
|
||||
Request numTransferRequest_ = RequestNull;
|
||||
|
||||
Request numRecieveRequest_ = RequestNull;
|
||||
|
||||
void checkDataRecieved() const;
|
||||
|
||||
/// @brief Update processor boundary data for this processor
|
||||
|
@ -72,7 +75,7 @@ namespace pFlow::MPI
|
|||
/// step is non-blocking recieve to get data.
|
||||
bool updataBoundaryData(int step) override;
|
||||
|
||||
bool transferData(uint32 iter, int step) override;
|
||||
bool transferData(uint32 iter, int step, bool& callAgain) override;
|
||||
|
||||
public:
|
||||
TypeInfo("boundary<processor>");
|
||||
|
@ -91,11 +94,17 @@ namespace pFlow::MPI
|
|||
boundaryProcessor,
|
||||
dictionary);
|
||||
|
||||
bool beforeIteration(uint32 step, uint32 iterNum, real t, real dt) override;
|
||||
bool beforeIteration(
|
||||
uint32 step,
|
||||
const timeInfo& ti,
|
||||
bool updateIter,
|
||||
bool iterBeforeUpdate ,
|
||||
bool& callAgain
|
||||
) override;
|
||||
|
||||
bool iterate(uint32 iterNum, real t, real dt) override;
|
||||
bool iterate(const timeInfo& ti) override;
|
||||
|
||||
bool afterIteration(uint32 iterNum, real t, real dt) override;
|
||||
bool afterIteration(const timeInfo& ti) override;
|
||||
|
||||
/// @brief Return number of points in the neighbor processor boundary.
|
||||
/// This is overriden from boundaryBase.
|
||||
|
|
|
@ -76,7 +76,8 @@ public:
|
|||
|
||||
void recieveData(
|
||||
const localProcessors& processors,
|
||||
uint32 numToRecv
|
||||
uint32 numToRecv,
|
||||
const word& name = "dataReciver"
|
||||
)
|
||||
{
|
||||
resize(numToRecv);
|
||||
|
|
|
@ -1,3 +1,23 @@
|
|||
/*------------------------------- phasicFlow ---------------------------------
|
||||
O C enter of
|
||||
O O E ngineering and
|
||||
O O M ultiscale modeling of
|
||||
OOOOOOO F luid flow
|
||||
------------------------------------------------------------------------------
|
||||
Copyright (C): www.cemf.ir
|
||||
email: hamid.r.norouzi AT gmail.com
|
||||
------------------------------------------------------------------------------
|
||||
Licence:
|
||||
This file is part of phasicFlow code. It is a free software for simulating
|
||||
granular and multiphase flows. You can redistribute it and/or modify it under
|
||||
the terms of GNU General Public License v3 or any other later versions.
|
||||
|
||||
phasicFlow is distributed to help others in their research in the field of
|
||||
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
|
||||
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
|
||||
-----------------------------------------------------------------------------*/
|
||||
|
||||
#ifndef __dataSender_hpp__
|
||||
#define __dataSender_hpp__
|
||||
|
||||
|
@ -61,7 +81,8 @@ public:
|
|||
|
||||
void sendData(
|
||||
const localProcessors& processors,
|
||||
const scatteredFieldAccess<T, memory_space>& scatterField
|
||||
const scatteredFieldAccess<T, memory_space>& scatterField,
|
||||
const word& name = "dataSender::sendData"
|
||||
)
|
||||
{
|
||||
using RPolicy = Kokkos::RangePolicy<
|
||||
|
@ -79,10 +100,10 @@ public:
|
|||
buffer_.clear();
|
||||
buffer_.resize(n);
|
||||
|
||||
const auto& buffView = buffer_.deviceViewAll();
|
||||
const auto& buffView = buffer_.deviceViewAll();
|
||||
|
||||
Kokkos::parallel_for(
|
||||
"dataSender::sendData",
|
||||
"packDataForSend::"+name,
|
||||
RPolicy(0,n),
|
||||
LAMBDA_HD(uint32 i)
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue