commit
3d0f31a2b2
|
@ -0,0 +1,63 @@
|
|||
|
||||
|
||||
#ifndef __AB2Kernels_hpp__
|
||||
#define __AB2Kernels_hpp__
|
||||
|
||||
#include "KokkosTypes.hpp"
|
||||
#include "types.hpp"
|
||||
|
||||
namespace pFlow::AB2Kernels
|
||||
{
|
||||
inline
|
||||
bool intAllActive(
|
||||
const word& name,
|
||||
real dt,
|
||||
rangeU32 activeRng,
|
||||
const deviceViewType1D<realx3>& y,
|
||||
const deviceViewType1D<realx3>& dy,
|
||||
const deviceViewType1D<realx3>& dy1
|
||||
)
|
||||
{
|
||||
Kokkos::parallel_for(
|
||||
name,
|
||||
deviceRPolicyStatic (activeRng.start(), activeRng.end()),
|
||||
LAMBDA_HD(uint32 i){
|
||||
y[i] += dt*(static_cast<real>(1.5) * dy[i] - static_cast<real>(0.5) * dy1[i]);
|
||||
dy1[i] = dy[i];
|
||||
});
|
||||
Kokkos::fence();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
inline
|
||||
bool intScattered
|
||||
(
|
||||
const word& name,
|
||||
real dt,
|
||||
const pFlagTypeDevice& activePoints,
|
||||
const deviceViewType1D<realx3>& y,
|
||||
const deviceViewType1D<realx3>& dy,
|
||||
const deviceViewType1D<realx3>& dy1
|
||||
)
|
||||
{
|
||||
|
||||
Kokkos::parallel_for(
|
||||
name,
|
||||
deviceRPolicyStatic (activePoints.activeRange().start(), activePoints.activeRange().end()),
|
||||
LAMBDA_HD(uint32 i){
|
||||
if( activePoints(i))
|
||||
{
|
||||
y[i] += dt*(static_cast<real>(1.5) * dy[i] - static_cast<real>(0.5) * dy1[i]);
|
||||
dy1[i] = dy[i];
|
||||
}
|
||||
});
|
||||
Kokkos::fence();
|
||||
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
|
@ -0,0 +1,71 @@
|
|||
#include "processorAB2BoundaryIntegration.hpp"
|
||||
#include "AdamsBashforth2.hpp"
|
||||
#include "AB2Kernels.hpp"
|
||||
#include "boundaryConfigs.hpp"
|
||||
|
||||
pFlow::processorAB2BoundaryIntegration::processorAB2BoundaryIntegration(
|
||||
const boundaryBase &boundary,
|
||||
const pointStructure &pStruct,
|
||||
const word &method,
|
||||
integration& intgrtn
|
||||
)
|
||||
:
|
||||
boundaryIntegration(boundary, pStruct, method, intgrtn)
|
||||
{}
|
||||
|
||||
bool pFlow::processorAB2BoundaryIntegration::correct(
|
||||
real dt,
|
||||
const realx3PointField_D& y,
|
||||
const realx3PointField_D& dy
|
||||
)
|
||||
{
|
||||
|
||||
#ifndef BoundaryModel1
|
||||
if(this->isBoundaryMaster())
|
||||
{
|
||||
const uint32 thisIndex = thisBoundaryIndex();
|
||||
const auto& AB2 = static_cast<const AdamsBashforth2&>(Integration());
|
||||
const auto& dy1View = AB2.BoundaryField(thisIndex).neighborProcField().deviceView();
|
||||
const auto& dyView = dy.BoundaryField(thisIndex).neighborProcField().deviceView();
|
||||
const auto& yView = y.BoundaryField(thisIndex).neighborProcField().deviceView();
|
||||
const rangeU32 aRange(0u, dy1View.size());
|
||||
return AB2Kernels::intAllActive(
|
||||
"AB2Integration::correct."+this->boundaryName(),
|
||||
dt,
|
||||
aRange,
|
||||
yView,
|
||||
dyView,
|
||||
dy1View
|
||||
);
|
||||
}
|
||||
#endif //BoundaryModel1
|
||||
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool pFlow::processorAB2BoundaryIntegration::correctPStruct(real dt, const realx3PointField_D &vel)
|
||||
{
|
||||
|
||||
#ifndef BoundaryModel1
|
||||
if(this->isBoundaryMaster())
|
||||
{
|
||||
const uint32 thisIndex = thisBoundaryIndex();
|
||||
const auto& AB2 = static_cast<const AdamsBashforth2&>(Integration());
|
||||
const auto& dy1View = AB2.BoundaryField(thisIndex).neighborProcField().deviceView();
|
||||
const auto& velView = vel.BoundaryField(thisIndex).neighborProcField().deviceView();
|
||||
const auto& xposView = boundary().neighborProcPoints().deviceView();
|
||||
const rangeU32 aRange(0u, dy1View.size());
|
||||
return AB2Kernels::intAllActive(
|
||||
"AB2Integration::correctPStruct."+this->boundaryName(),
|
||||
dt,
|
||||
aRange,
|
||||
xposView,
|
||||
velView,
|
||||
dy1View
|
||||
);
|
||||
}
|
||||
#endif //BoundaryModel1
|
||||
|
||||
return true;
|
||||
}
|
|
@ -0,0 +1,51 @@
|
|||
|
||||
|
||||
#ifndef __processorAB2BoundaryIntegration_hpp__
|
||||
#define __processorAB2BoundaryIntegration_hpp__
|
||||
|
||||
#include "boundaryIntegration.hpp"
|
||||
|
||||
namespace pFlow
|
||||
{
|
||||
|
||||
class processorAB2BoundaryIntegration
|
||||
:
|
||||
public boundaryIntegration
|
||||
{
|
||||
public:
|
||||
|
||||
TypeInfo("boundaryIntegration<processor,AdamsBashforth2>");
|
||||
|
||||
processorAB2BoundaryIntegration(
|
||||
const boundaryBase& boundary,
|
||||
const pointStructure& pStruct,
|
||||
const word& method,
|
||||
integration& intgrtn
|
||||
);
|
||||
|
||||
~processorAB2BoundaryIntegration()override=default;
|
||||
|
||||
|
||||
bool correct(
|
||||
real dt,
|
||||
const realx3PointField_D& y,
|
||||
const realx3PointField_D& dy)override;
|
||||
|
||||
|
||||
|
||||
bool correctPStruct(real dt, const realx3PointField_D& vel)override;
|
||||
|
||||
|
||||
add_vCtor(
|
||||
boundaryIntegration,
|
||||
processorAB2BoundaryIntegration,
|
||||
boundaryBase
|
||||
);
|
||||
|
||||
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
|
@ -0,0 +1,55 @@
|
|||
#include "boundaryIntegration.hpp"
|
||||
#include "pointStructure.hpp"
|
||||
|
||||
|
||||
pFlow::boundaryIntegration::boundaryIntegration(
|
||||
const boundaryBase &boundary,
|
||||
const pointStructure &pStruct,
|
||||
const word &method,
|
||||
integration& intgrtn
|
||||
)
|
||||
:
|
||||
generalBoundary(boundary, pStruct, "", method),
|
||||
integration_(intgrtn)
|
||||
{}
|
||||
|
||||
pFlow::uniquePtr<pFlow::boundaryIntegration> pFlow::boundaryIntegration::create(
|
||||
const boundaryBase &boundary,
|
||||
const pointStructure &pStruct,
|
||||
const word &method,
|
||||
integration& intgrtn
|
||||
)
|
||||
{
|
||||
|
||||
word bType = angleBracketsNames2(
|
||||
"boundaryIntegration",
|
||||
boundary.type(),
|
||||
method);
|
||||
|
||||
word altBType{"boundaryIntegration<none>"};
|
||||
|
||||
if( boundaryBasevCtorSelector_.search(bType) )
|
||||
{
|
||||
pOutput.space(4)<<"Creating boundary "<< Green_Text(bType)<<
|
||||
" for "<<boundary.name()<<endl;
|
||||
return boundaryBasevCtorSelector_[bType](boundary, pStruct, method, intgrtn);
|
||||
}
|
||||
else if(boundaryBasevCtorSelector_.search(altBType))
|
||||
{
|
||||
pOutput.space(4)<<"Creating boundary "<< Green_Text(altBType)<<
|
||||
" for "<<boundary.name()<<endl;
|
||||
return boundaryBasevCtorSelector_[altBType](boundary, pStruct, method, intgrtn);
|
||||
}
|
||||
else
|
||||
{
|
||||
printKeys(
|
||||
fatalError << "Ctor Selector "<< bType<<
|
||||
" and "<< altBType << " do not exist. \n"
|
||||
<<"Avaiable ones are: \n",
|
||||
boundaryBasevCtorSelector_
|
||||
);
|
||||
fatalExit;
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
|
@ -0,0 +1,91 @@
|
|||
|
||||
#ifndef __boundaryIntegration_hpp__
|
||||
#define __boundaryIntegration_hpp__
|
||||
|
||||
|
||||
#include "generalBoundary.hpp"
|
||||
#include "virtualConstructor.hpp"
|
||||
#include "pointFields.hpp"
|
||||
|
||||
namespace pFlow
|
||||
{
|
||||
|
||||
class integration;
|
||||
|
||||
class boundaryIntegration
|
||||
:
|
||||
public generalBoundary
|
||||
{
|
||||
private:
|
||||
|
||||
const integration& integration_;
|
||||
|
||||
public:
|
||||
|
||||
TypeInfo("boundaryIntegration<none>");
|
||||
|
||||
boundaryIntegration(
|
||||
const boundaryBase& boundary,
|
||||
const pointStructure& pStruct,
|
||||
const word& method,
|
||||
integration& intgrtn);
|
||||
|
||||
~boundaryIntegration()override = default;
|
||||
|
||||
create_vCtor(
|
||||
boundaryIntegration,
|
||||
boundaryBase,
|
||||
(
|
||||
const boundaryBase& boundary,
|
||||
const pointStructure& pStruct,
|
||||
const word& method,
|
||||
integration& intgrtn
|
||||
),
|
||||
(boundary, pStruct, method, intgrtn)
|
||||
);
|
||||
|
||||
add_vCtor(
|
||||
boundaryIntegration,
|
||||
boundaryIntegration,
|
||||
boundaryBase
|
||||
);
|
||||
|
||||
bool hearChanges(
|
||||
real t,
|
||||
real dt,
|
||||
uint32 iter,
|
||||
const message &msg,
|
||||
const anyList &varList) override
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
const integration& Integration()const
|
||||
{
|
||||
return integration_;
|
||||
}
|
||||
|
||||
virtual
|
||||
bool correct(real dt, const realx3PointField_D& y, const realx3PointField_D& dy)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
virtual
|
||||
bool correctPStruct(real dt, const realx3PointField_D& vel)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static
|
||||
uniquePtr<boundaryIntegration> create(
|
||||
const boundaryBase& boundary,
|
||||
const pointStructure& pStruct,
|
||||
const word& method,
|
||||
integration& intgrtn);
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
|
@ -0,0 +1,41 @@
|
|||
#include "boundaryIntegrationList.hpp"
|
||||
#include "integration.hpp"
|
||||
|
||||
pFlow::boundaryIntegrationList::boundaryIntegrationList(
|
||||
const pointStructure &pStruct,
|
||||
const word &method,
|
||||
integration &intgrtn
|
||||
)
|
||||
:
|
||||
ListPtr<boundaryIntegration>(6),
|
||||
boundaries_(pStruct.boundaries())
|
||||
{
|
||||
|
||||
for(uint32 i=0; i<6; i++)
|
||||
{
|
||||
this->set(
|
||||
i,
|
||||
boundaryIntegration::create(
|
||||
boundaries_[i],
|
||||
pStruct,
|
||||
method,
|
||||
intgrtn
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
bool pFlow::boundaryIntegrationList::correct(real dt, realx3PointField_D &y, realx3PointField_D &dy)
|
||||
{
|
||||
for(auto& bndry:*this)
|
||||
{
|
||||
if(!bndry->correct(dt, y, dy))
|
||||
{
|
||||
fatalErrorInFunction;
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
return true;
|
||||
}
|
|
@ -0,0 +1,45 @@
|
|||
|
||||
#ifndef __boundaryIntegrationList_hpp__
|
||||
#define __boundaryIntegrationList_hpp__
|
||||
|
||||
|
||||
#include "boundaryList.hpp"
|
||||
#include "ListPtr.hpp"
|
||||
#include "boundaryIntegration.hpp"
|
||||
|
||||
|
||||
namespace pFlow
|
||||
{
|
||||
|
||||
class integration;
|
||||
|
||||
class boundaryIntegrationList
|
||||
:
|
||||
public ListPtr<boundaryIntegration>
|
||||
{
|
||||
private:
|
||||
|
||||
const boundaryList& boundaries_;
|
||||
|
||||
public:
|
||||
|
||||
boundaryIntegrationList(
|
||||
const pointStructure& pStruct,
|
||||
const word& method,
|
||||
integration& intgrtn
|
||||
);
|
||||
|
||||
~boundaryIntegrationList()=default;
|
||||
|
||||
bool correct(
|
||||
real dt,
|
||||
realx3PointField_D& y,
|
||||
realx3PointField_D& dy);
|
||||
};
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
#endif //__boundaryIntegrationList_hpp__
|
|
@ -7,8 +7,7 @@ contactSearch/methods/cellBased/NBS/NBS.cpp
|
|||
contactSearch/methods/cellBased/NBS/cellsWallLevel0.cpp
|
||||
|
||||
contactSearch/boundaries/boundaryContactSearch/boundaryContactSearch.cpp
|
||||
#contactSearch/boundaries/twoPartContactSearch/twoPartContactSearchKernels.cpp
|
||||
#contactSearch/boundaries/twoPartContactSearch/twoPartContactSearch.cpp
|
||||
|
||||
contactSearch/boundaries/periodicBoundaryContactSearch/ppwBndryContactSearchKernels.cpp
|
||||
contactSearch/boundaries/periodicBoundaryContactSearch/ppwBndryContactSearch.cpp
|
||||
contactSearch/boundaries/periodicBoundaryContactSearch/wallBoundaryContactSearch.cpp
|
||||
|
@ -28,6 +27,8 @@ if(pFlow_Build_MPI)
|
|||
list(APPEND SourceFiles
|
||||
contactSearch/boundaries/processorBoundaryContactSearch/processorBoundaryContactSearch.cpp
|
||||
sphereInteraction/boundaries/processorBoundarySphereInteraction/processorBoundarySphereInteractions.cpp
|
||||
contactSearch/boundaries/twoPartContactSearch/twoPartContactSearchKernels.cpp
|
||||
contactSearch/boundaries/twoPartContactSearch/twoPartContactSearch.cpp
|
||||
)
|
||||
endif()
|
||||
|
||||
|
|
|
@ -99,7 +99,7 @@ bool pFlow::processorBoundaryContactSearch::broadSearch
|
|||
thisDiams,
|
||||
neighborProcPoints,
|
||||
neighborProcDiams,
|
||||
name()
|
||||
boundaryName()
|
||||
);
|
||||
//pOutput<<"ppSize "<< ppPairs.size()<<endl;
|
||||
return true;
|
||||
|
|
|
@ -9,6 +9,7 @@ template<typename ContactListType, typename ContactForceModel>
|
|||
inline
|
||||
void sphereSphereInteraction
|
||||
(
|
||||
const word& kernalName,
|
||||
real dt,
|
||||
const ContactListType& cntctList,
|
||||
const ContactForceModel& forceModel,
|
||||
|
@ -36,7 +37,7 @@ void sphereSphereInteraction
|
|||
uint32 lastItem = cntctList.loopCount();
|
||||
|
||||
Kokkos::parallel_for(
|
||||
"pFlow::MPI::processorBoundarySIKernels::sphereSphereInteraction",
|
||||
kernalName,
|
||||
deviceRPolicyDynamic(0,lastItem),
|
||||
LAMBDA_HD(uint32 n)
|
||||
{
|
||||
|
|
|
@ -32,11 +32,6 @@ pFlow::MPI::processorBoundarySphereInteraction<cFM, gMM>::processorBoundarySpher
|
|||
geomMotion
|
||||
),
|
||||
masterInteraction_(boundary.isBoundaryMaster())
|
||||
,
|
||||
inter_("inter"),
|
||||
send_("send"),
|
||||
recv_("recv"),
|
||||
add_("add")
|
||||
{
|
||||
if(masterInteraction_)
|
||||
{
|
||||
|
@ -46,6 +41,9 @@ pFlow::MPI::processorBoundarySphereInteraction<cFM, gMM>::processorBoundarySpher
|
|||
|
||||
}
|
||||
|
||||
|
||||
#ifdef BoundaryModel1
|
||||
|
||||
template <typename cFM, typename gMM>
|
||||
bool pFlow::MPI::processorBoundarySphereInteraction<cFM, gMM>::sphereSphereInteraction
|
||||
(
|
||||
|
@ -74,8 +72,8 @@ bool pFlow::MPI::processorBoundarySphereInteraction<cFM, gMM>::sphereSphereInter
|
|||
if(step == 2 )
|
||||
{
|
||||
iter++;
|
||||
inter_.start();
|
||||
pFlow::MPI::processorBoundarySIKernels::sphereSphereInteraction(
|
||||
"ppBoundaryInteraction."+this->boundaryName(),
|
||||
dt,
|
||||
this->ppPairs(),
|
||||
cfModel,
|
||||
|
@ -94,64 +92,165 @@ bool pFlow::MPI::processorBoundarySphereInteraction<cFM, gMM>::sphereSphereInter
|
|||
cfBndry.neighborProcField().deviceViewAll(),
|
||||
ctBndry.neighborProcField().deviceViewAll()
|
||||
);
|
||||
inter_.end();
|
||||
|
||||
return true;
|
||||
}
|
||||
else if(step == 3 )
|
||||
{
|
||||
send_.start();
|
||||
cfBndry.sendBackData();
|
||||
ctBndry.sendBackData();
|
||||
send_.end();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
if(iter % 1000 == 0u)
|
||||
{
|
||||
pOutput<<"inter "<< inter_.totalTime()<<endl;
|
||||
pOutput<<"send "<< send_.totalTime()<<endl<<endl;;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
const auto & sphPar = this->sphParticles();
|
||||
uint32 thisIndex = this->boundary().thisBoundaryIndex();
|
||||
const auto& cfBndry = static_cast<const processorBoundaryField<realx3>&>(
|
||||
sphPar.contactForce().BoundaryField(thisIndex));
|
||||
const auto& ctBndry = static_cast<const processorBoundaryField<realx3>&> (
|
||||
sphPar.contactTorque().BoundaryField(thisIndex));
|
||||
if(step==1)
|
||||
if(step == 1 )
|
||||
{
|
||||
recv_.start();
|
||||
const auto & sphPar = this->sphParticles();
|
||||
uint32 thisIndex = this->boundary().thisBoundaryIndex();
|
||||
const auto& cfBndry = static_cast<const processorBoundaryField<realx3>&>(
|
||||
sphPar.contactForce().BoundaryField(thisIndex));
|
||||
const auto& ctBndry = static_cast<const processorBoundaryField<realx3>&> (
|
||||
sphPar.contactTorque().BoundaryField(thisIndex));
|
||||
|
||||
cfBndry.recieveBackData();
|
||||
ctBndry.recieveBackData();
|
||||
recv_.end();
|
||||
|
||||
return false;
|
||||
}
|
||||
else if(step == 2)
|
||||
else if(step == 11)
|
||||
{
|
||||
iter++;
|
||||
return true;
|
||||
}
|
||||
else if(step == 3)
|
||||
{
|
||||
add_.start();
|
||||
const auto & sphPar = this->sphParticles();
|
||||
uint32 thisIndex = this->boundary().thisBoundaryIndex();
|
||||
const auto& cfBndry = static_cast<const processorBoundaryField<realx3>&>(
|
||||
sphPar.contactForce().BoundaryField(thisIndex));
|
||||
const auto& ctBndry = static_cast<const processorBoundaryField<realx3>&> (
|
||||
sphPar.contactTorque().BoundaryField(thisIndex));
|
||||
|
||||
cfBndry.addBufferToInternalField();
|
||||
ctBndry.addBufferToInternalField();
|
||||
add_.end();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
if(iter % 1000 == 0u)
|
||||
{
|
||||
pOutput<<"recive "<< recv_.totalTime()<<endl;
|
||||
pOutput<<"add "<< add_.totalTime()<<endl<<endl;
|
||||
}
|
||||
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
}
|
||||
#else
|
||||
|
||||
template <typename cFM, typename gMM>
|
||||
bool pFlow::MPI::processorBoundarySphereInteraction<cFM, gMM>::sphereSphereInteraction
|
||||
(
|
||||
real dt,
|
||||
const ContactForceModel &cfModel,
|
||||
uint32 step
|
||||
)
|
||||
{
|
||||
|
||||
// master processor calculates the contact force/torque and sends data back to the
|
||||
// neighbor processor (slave processor).
|
||||
// slave processor recieves the data and adds the data to the internalField
|
||||
if(masterInteraction_)
|
||||
{
|
||||
if(step==1)return true;
|
||||
|
||||
const auto & sphPar = this->sphParticles();
|
||||
uint32 thisIndex = this->boundary().thisBoundaryIndex();
|
||||
|
||||
const auto& cfBndry = static_cast<const processorBoundaryField<realx3>&> (
|
||||
sphPar.contactForce().BoundaryField(thisIndex));
|
||||
|
||||
const auto& ctBndry = static_cast<const processorBoundaryField<realx3>&> (
|
||||
sphPar.contactTorque().BoundaryField(thisIndex));
|
||||
|
||||
if(step == 2 )
|
||||
{
|
||||
|
||||
pFlow::MPI::processorBoundarySIKernels::sphereSphereInteraction(
|
||||
"ppBoundaryInteraction."+this->boundaryName(),
|
||||
dt,
|
||||
this->ppPairs(),
|
||||
cfModel,
|
||||
this->boundary().thisPoints(),
|
||||
sphPar.diameter().deviceViewAll(),
|
||||
sphPar.propertyId().deviceViewAll(),
|
||||
sphPar.velocity().deviceViewAll(),
|
||||
sphPar.rVelocity().deviceViewAll(),
|
||||
sphPar.contactForce().deviceViewAll(),
|
||||
sphPar.contactTorque().deviceViewAll(),
|
||||
this->boundary().neighborProcPoints().deviceViewAll(),
|
||||
sphPar.diameter().BoundaryField(thisIndex).neighborProcField().deviceViewAll(),
|
||||
sphPar.propertyId().BoundaryField(thisIndex).neighborProcField().deviceViewAll(),
|
||||
sphPar.velocity().BoundaryField(thisIndex).neighborProcField().deviceViewAll(),
|
||||
sphPar.rVelocity().BoundaryField(thisIndex).neighborProcField().deviceViewAll(),
|
||||
cfBndry.neighborProcField().deviceViewAll(),
|
||||
ctBndry.neighborProcField().deviceViewAll()
|
||||
);
|
||||
|
||||
return true;
|
||||
}
|
||||
else if(step == 3 )
|
||||
{
|
||||
cfBndry.sendBackData();
|
||||
ctBndry.sendBackData();
|
||||
return true;
|
||||
}
|
||||
else if(step == 11 )
|
||||
{
|
||||
cfBndry.updateBoundaryFromSlave();
|
||||
ctBndry.updateBoundaryFromSlave();
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
if(step == 1 )
|
||||
{
|
||||
const auto & sphPar = this->sphParticles();
|
||||
uint32 thisIndex = this->boundary().thisBoundaryIndex();
|
||||
const auto& cfBndry = static_cast<const processorBoundaryField<realx3>&>(
|
||||
sphPar.contactForce().BoundaryField(thisIndex));
|
||||
const auto& ctBndry = static_cast<const processorBoundaryField<realx3>&> (
|
||||
sphPar.contactTorque().BoundaryField(thisIndex));
|
||||
|
||||
cfBndry.recieveBackData();
|
||||
ctBndry.recieveBackData();
|
||||
|
||||
return false;
|
||||
}
|
||||
else if(step == 11)
|
||||
{
|
||||
const auto & sphPar = this->sphParticles();
|
||||
uint32 thisIndex = this->boundary().thisBoundaryIndex();
|
||||
const auto& cfBndry = static_cast<const processorBoundaryField<realx3>&>(
|
||||
sphPar.contactForce().BoundaryField(thisIndex));
|
||||
const auto& ctBndry = static_cast<const processorBoundaryField<realx3>&> (
|
||||
sphPar.contactTorque().BoundaryField(thisIndex));
|
||||
|
||||
cfBndry.addBufferToInternalField();
|
||||
cfBndry.updateBoundaryToMaster();
|
||||
|
||||
ctBndry.addBufferToInternalField();
|
||||
ctBndry.updateBoundaryToMaster();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif
|
|
@ -22,6 +22,7 @@ Licence:
|
|||
|
||||
#include "boundarySphereInteraction.hpp"
|
||||
#include "processorBoundaryField.hpp"
|
||||
#include "boundaryProcessor.hpp"
|
||||
|
||||
namespace pFlow::MPI
|
||||
{
|
||||
|
@ -57,12 +58,6 @@ private:
|
|||
|
||||
bool masterInteraction_;
|
||||
|
||||
Timer inter_;
|
||||
Timer send_;
|
||||
Timer recv_;
|
||||
Timer add_;
|
||||
uint32 iter=0;
|
||||
|
||||
public:
|
||||
|
||||
TypeInfoTemplate22("boundarySphereInteraction", "processor",ContactForceModel, MotionModel);
|
||||
|
|
|
@ -0,0 +1,64 @@
|
|||
#include "boundarySphereParticles.hpp"
|
||||
#include "boundaryBase.hpp"
|
||||
#include "sphereParticles.hpp"
|
||||
|
||||
|
||||
pFlow::boundarySphereParticles::boundarySphereParticles(
|
||||
const boundaryBase &boundary,
|
||||
sphereParticles &prtcls
|
||||
)
|
||||
:
|
||||
generalBoundary(boundary, prtcls.pStruct(), "", ""),
|
||||
particles_(prtcls)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
pFlow::sphereParticles &pFlow::boundarySphereParticles::Particles()
|
||||
{
|
||||
return particles_;
|
||||
}
|
||||
|
||||
const pFlow::sphereParticles &pFlow::boundarySphereParticles::Particles() const
|
||||
{
|
||||
return particles_;
|
||||
}
|
||||
|
||||
pFlow::uniquePtr<pFlow::boundarySphereParticles> pFlow::boundarySphereParticles::create(
|
||||
const boundaryBase &boundary,
|
||||
sphereParticles &prtcls
|
||||
)
|
||||
{
|
||||
|
||||
word bType = angleBracketsNames2(
|
||||
"boundarySphereParticles",
|
||||
pFlowProcessors().localRunTypeName(),
|
||||
boundary.type());
|
||||
|
||||
word altBType{"boundarySphereParticles<none>"};
|
||||
|
||||
if( boundaryBasevCtorSelector_.search(bType) )
|
||||
{
|
||||
pOutput.space(4)<<"Creating boundary "<< Green_Text(bType)<<
|
||||
" for "<<boundary.name()<<endl;
|
||||
return boundaryBasevCtorSelector_[bType](boundary, prtcls);
|
||||
}
|
||||
else if(boundaryBasevCtorSelector_.search(altBType))
|
||||
{
|
||||
pOutput.space(4)<<"Creating boundary "<< Green_Text(altBType)<<
|
||||
" for "<<boundary.name()<<endl;
|
||||
return boundaryBasevCtorSelector_[altBType](boundary, prtcls);
|
||||
}
|
||||
else
|
||||
{
|
||||
printKeys(
|
||||
fatalError << "Ctor Selector "<< bType<<
|
||||
" and "<< altBType << " do not exist. \n"
|
||||
<<"Avaiable ones are: \n",
|
||||
boundaryBasevCtorSelector_
|
||||
);
|
||||
fatalExit;
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
|
@ -0,0 +1,80 @@
|
|||
|
||||
|
||||
#ifndef __boundarySphereParticles_hpp__
|
||||
#define __boundarySphereParticles_hpp__
|
||||
|
||||
#include "generalBoundary.hpp"
|
||||
#include "virtualConstructor.hpp"
|
||||
#include "timeInfo.hpp"
|
||||
|
||||
namespace pFlow
|
||||
{
|
||||
|
||||
class sphereParticles;
|
||||
|
||||
class boundarySphereParticles
|
||||
: public generalBoundary
|
||||
{
|
||||
private:
|
||||
|
||||
sphereParticles& particles_;
|
||||
|
||||
public:
|
||||
|
||||
/// type info
|
||||
TypeInfo("boundarySphereParticles<none>");
|
||||
|
||||
boundarySphereParticles(
|
||||
const boundaryBase &boundary,
|
||||
sphereParticles& prtcls
|
||||
);
|
||||
|
||||
create_vCtor(
|
||||
boundarySphereParticles,
|
||||
boundaryBase,
|
||||
(
|
||||
const boundaryBase &boundary,
|
||||
sphereParticles& prtcls
|
||||
),
|
||||
(boundary, prtcls)
|
||||
);
|
||||
|
||||
add_vCtor(
|
||||
boundarySphereParticles,
|
||||
boundarySphereParticles,
|
||||
boundaryBase
|
||||
);
|
||||
|
||||
sphereParticles& Particles();
|
||||
|
||||
const sphereParticles& Particles()const;
|
||||
|
||||
bool hearChanges(
|
||||
real t,
|
||||
real dt,
|
||||
uint32 iter,
|
||||
const message &msg,
|
||||
const anyList &varList) override
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
virtual
|
||||
bool acceleration(const timeInfo& ti, const realx3& g)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static
|
||||
uniquePtr<boundarySphereParticles> create(
|
||||
const boundaryBase &boundary,
|
||||
sphereParticles& prtcls);
|
||||
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
#endif
|
|
@ -0,0 +1,19 @@
|
|||
#include "boundarySphereParticlesList.hpp"
|
||||
|
||||
pFlow::boundarySphereParticlesList::boundarySphereParticlesList(
|
||||
const boundaryList &bndrs,
|
||||
sphereParticles &prtcls
|
||||
)
|
||||
:
|
||||
ListPtr(bndrs.size()),
|
||||
boundaries_(bndrs)
|
||||
{
|
||||
for(auto i=0; i<boundaries_.size(); i++)
|
||||
{
|
||||
this->set
|
||||
(
|
||||
i,
|
||||
boundarySphereParticles::create(boundaries_[i], prtcls)
|
||||
);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
|
||||
|
||||
#ifndef __boundarySphereParticlesList_hpp__
|
||||
#define __boundarySphereParticlesList_hpp__
|
||||
|
||||
#include "ListPtr.hpp"
|
||||
#include "boundaryList.hpp"
|
||||
#include "boundarySphereParticles.hpp"
|
||||
|
||||
namespace pFlow
|
||||
{
|
||||
|
||||
class boundarySphereParticlesList
|
||||
:
|
||||
public ListPtr<boundarySphereParticles>
|
||||
{
|
||||
private:
|
||||
|
||||
const boundaryList& boundaries_;
|
||||
|
||||
public:
|
||||
|
||||
boundarySphereParticlesList(
|
||||
const boundaryList& bndrs,
|
||||
sphereParticles& prtcls
|
||||
);
|
||||
|
||||
~boundarySphereParticlesList()=default;
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
#endif
|
|
@ -0,0 +1,46 @@
|
|||
#include "processorBoundarySphereParticles.hpp"
|
||||
#include "sphereParticles.hpp"
|
||||
#include "boundaryProcessor.hpp"
|
||||
|
||||
pFlow::processorBoundarySphereParticles::processorBoundarySphereParticles(
|
||||
const boundaryBase &boundary,
|
||||
sphereParticles &prtcls
|
||||
)
|
||||
:
|
||||
boundarySphereParticles(boundary, prtcls)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
bool pFlow::processorBoundarySphereParticles::acceleration(const timeInfo &ti, const realx3& g)
|
||||
{
|
||||
|
||||
|
||||
#ifndef BoundaryModel1
|
||||
|
||||
|
||||
if(isBoundaryMaster())
|
||||
{
|
||||
auto thisIndex = thisBoundaryIndex();
|
||||
auto mass = Particles().mass().BoundaryField(thisIndex).neighborProcField().deviceView();
|
||||
auto I = Particles().I().BoundaryField(thisIndex).neighborProcField().deviceView();
|
||||
auto cf = Particles().contactForce().BoundaryField(thisIndex).neighborProcField().deviceView();
|
||||
auto ct = Particles().contactTorque().BoundaryField(thisIndex).neighborProcField().deviceView();
|
||||
auto acc = Particles().accelertion().BoundaryField(thisIndex).neighborProcField().deviceView();
|
||||
auto rAcc = Particles().rAcceleration().BoundaryField(thisIndex).neighborProcField().deviceView();
|
||||
|
||||
Kokkos::parallel_for(
|
||||
"processorBoundary::acceleration."+this->boundaryName(),
|
||||
deviceRPolicyStatic(0,mass.size()),
|
||||
LAMBDA_HD(uint32 i){
|
||||
acc[i] = cf[i]/mass[i] + g;
|
||||
rAcc[i] = ct[i]/I[i];
|
||||
});
|
||||
Kokkos::fence();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
return true;
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
#ifndef __processorBoundarySphereParticles_hpp__
|
||||
#define __processorBoundarySphereParticles_hpp__
|
||||
|
||||
#include "boundarySphereParticles.hpp"
|
||||
|
||||
namespace pFlow
|
||||
{
|
||||
|
||||
class processorBoundarySphereParticles
|
||||
:
|
||||
public boundarySphereParticles
|
||||
{
|
||||
|
||||
public:
|
||||
|
||||
/// type info
|
||||
TypeInfo("boundarySphereParticles<MPI,processor>");
|
||||
|
||||
processorBoundarySphereParticles(
|
||||
const boundaryBase &boundary,
|
||||
sphereParticles& prtcls
|
||||
);
|
||||
|
||||
add_vCtor(
|
||||
boundarySphereParticles,
|
||||
processorBoundarySphereParticles,
|
||||
boundaryBase
|
||||
);
|
||||
|
||||
bool acceleration(const timeInfo& ti, const realx3& g)override;
|
||||
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
|
||||
#endif
|
|
@ -17,7 +17,6 @@ Licence:
|
|||
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
|
||||
-----------------------------------------------------------------------------*/
|
||||
|
||||
#ifndef __mpiCommunication_H__
|
||||
#define __mpiCommunication_H__
|
||||
|
||||
|
@ -37,6 +36,8 @@ extern DataType realx4Type__;
|
|||
|
||||
extern DataType int32x3Type__;
|
||||
|
||||
extern DataType uint32x3Type__;
|
||||
|
||||
template<typename T>
|
||||
auto constexpr Type()
|
||||
{
|
||||
|
@ -190,6 +191,20 @@ auto constexpr sFactor<int32x3>()
|
|||
return 1;
|
||||
}
|
||||
|
||||
template<>
|
||||
inline
|
||||
auto Type<uint32x3>()
|
||||
{
|
||||
return uint32x3Type__;
|
||||
}
|
||||
|
||||
|
||||
template<>
|
||||
auto constexpr sFactor<uint32x3>()
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*inline
|
||||
auto createByteSequence(int sizeOfElement)
|
||||
{
|
||||
|
@ -211,6 +226,7 @@ auto TypeFree(DataType* type)
|
|||
return MPI_Type_free(type);
|
||||
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
inline auto getCount(Status* status, int& count)
|
||||
{
|
||||
|
@ -440,11 +456,6 @@ inline auto Wait(Request* request, Status* status)
|
|||
return MPI_Wait(request, status);
|
||||
}
|
||||
|
||||
inline auto typeFree(DataType& type)
|
||||
{
|
||||
return MPI_Type_free(&type);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -55,6 +55,8 @@ namespace pFlow::MPI
|
|||
inline const auto ErrOp = MPI_ERR_OP;
|
||||
|
||||
inline const auto SumOp = MPI_SUM;
|
||||
inline const auto MaxOp = MPI_MAX;
|
||||
inline const auto MinOp = MPI_MIN;
|
||||
|
||||
inline const size_t MaxNoProcessors = 2048;
|
||||
|
||||
|
|
|
@ -35,46 +35,18 @@ pFlow::MPI::MPISimulationDomain::MPISimulationDomain(systemControl& control)
|
|||
|
||||
bool pFlow::MPI::MPISimulationDomain::createBoundaryDicts()
|
||||
{
|
||||
auto& boundaries = this->subDict("boundaries");
|
||||
|
||||
dictionary& boundaries = this->subDict("boundaries");
|
||||
|
||||
dictionary& thisBoundaries = this->subDict(thisBoundariesDictName());
|
||||
|
||||
this->addDict("MPIBoundaries", boundaries);
|
||||
auto& mpiBoundaries = this->subDict("MPIBoundaries");
|
||||
|
||||
real neighborLength = boundaries.getVal<real>("neighborLength");
|
||||
auto boundaryExtntionLengthRatio = max(
|
||||
boundaries.getValOrSet<real>("boundaryExtntionLengthRatio", 0.1),
|
||||
0.0);
|
||||
auto updateIntercal = max(
|
||||
boundaries.getValOrSet<uint32>("updateInterval", 1u),
|
||||
1u);
|
||||
|
||||
auto neighbors = findPlaneNeighbors();
|
||||
|
||||
for(uint32 i=0; i<sizeOfBoundaries(); i++)
|
||||
{
|
||||
word bName = bundaryName(i);
|
||||
if( !boundaries.containsDictionay(bName) )
|
||||
{
|
||||
fatalErrorInFunction<<"dictionary "<< bName<<
|
||||
"does not exist in "<< boundaries.globalName()<<endl;
|
||||
return false;
|
||||
}
|
||||
auto& bDict = mpiBoundaries.subDict(bName);
|
||||
|
||||
if(!bDict.addOrKeep("neighborLength", neighborLength))
|
||||
{
|
||||
fatalErrorInFunction<<"error in adding neighborLength to "<< bName <<
|
||||
"in dictionary "<< boundaries.globalName()<<endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
if(!bDict.addOrReplace("updateInterval", updateIntercal))
|
||||
{
|
||||
fatalErrorInFunction<<"error in adding updateIntercal to "<< bName <<
|
||||
"in dictionary "<< boundaries.globalName()<<endl;
|
||||
}
|
||||
|
||||
bDict.addOrReplace("boundaryExtntionLengthRatio", boundaryExtntionLengthRatio);
|
||||
|
||||
word bName = bundaryName(i);
|
||||
auto& bDict = thisBoundaries.subDict(bName);
|
||||
|
||||
if( thisDomainActive_ )
|
||||
{
|
||||
|
@ -154,8 +126,7 @@ bool pFlow::MPI::MPISimulationDomain::setThisDomain()
|
|||
fatalErrorInFunction<< "Failed to distributed domains"<<endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -276,11 +247,6 @@ std::vector<int> pFlow::MPI::MPISimulationDomain::findPlaneNeighbors() const
|
|||
return neighbors;
|
||||
}
|
||||
|
||||
const pFlow::dictionary &
|
||||
pFlow::MPI::MPISimulationDomain::thisBoundaryDict() const
|
||||
{
|
||||
return this->subDict("MPIBoundaries");
|
||||
}
|
||||
|
||||
bool pFlow::MPI::MPISimulationDomain::initialUpdateDomains(span<realx3> pointPos)
|
||||
{
|
||||
|
|
|
@ -72,8 +72,6 @@ public:
|
|||
systemControl
|
||||
);
|
||||
|
||||
const dictionary& thisBoundaryDict() const final;
|
||||
|
||||
/// @brief
|
||||
/// @param pointPos
|
||||
/// @return
|
||||
|
|
|
@ -25,13 +25,17 @@ pFlow::MPI::processorBoundaryField<T, MemorySpace>::checkDataRecieved() const
|
|||
{
|
||||
if (!dataRecieved_)
|
||||
{
|
||||
uint32 nRecv = reciever_.waitBufferForUse();
|
||||
uint32 nRecv = neighborProcField_.waitBufferForUse();
|
||||
dataRecieved_ = true;
|
||||
if (nRecv != this->neighborProcSize())
|
||||
{
|
||||
fatalErrorInFunction;
|
||||
fatalErrorInFunction<<
|
||||
"number of recived data is "<< nRecv <<" and expected number is "<<
|
||||
this->neighborProcSize()<< " in "<<this->name() <<endl;
|
||||
fatalExit;
|
||||
}
|
||||
|
||||
//pOutput<<"field data "<< this->name()<<" has recieved with size "<< nRecv<<endl;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -42,6 +46,11 @@ pFlow::MPI::processorBoundaryField<T, MemorySpace>::updateBoundary(
|
|||
DataDirection direction
|
||||
)
|
||||
{
|
||||
#ifndef BoundaryModel1
|
||||
if(!this->boundary().performBoundarytUpdate())
|
||||
return true;
|
||||
#endif
|
||||
|
||||
if (step == 1)
|
||||
{
|
||||
// Isend
|
||||
|
@ -49,9 +58,11 @@ pFlow::MPI::processorBoundaryField<T, MemorySpace>::updateBoundary(
|
|||
( this->isBoundaryMaster() && direction == DataDirection::MasterToSlave) ||
|
||||
(!this->isBoundaryMaster() && direction == DataDirection::SlaveToMaster))
|
||||
{
|
||||
sender_.sendData(pFlowProcessors(), this->thisField());
|
||||
thisFieldInNeighbor_.sendData(pFlowProcessors(), this->thisField(), this->name());
|
||||
dataRecieved_ = false;
|
||||
//pOutput<<"request for boundary update "<< this->name()<<" direction "<< (int)direction<<endl;
|
||||
}
|
||||
|
||||
}
|
||||
else if (step == 2)
|
||||
{
|
||||
|
@ -60,8 +71,9 @@ pFlow::MPI::processorBoundaryField<T, MemorySpace>::updateBoundary(
|
|||
(!this->isBoundaryMaster() && direction == DataDirection::MasterToSlave) ||
|
||||
( this->isBoundaryMaster() && direction == DataDirection::SlaveToMaster))
|
||||
{
|
||||
reciever_.recieveData(pFlowProcessors(), this->neighborProcSize());
|
||||
neighborProcField_.recieveData(pFlowProcessors(), this->neighborProcSize(), this->name());
|
||||
dataRecieved_ = false;
|
||||
//pOutput<<"request for boundary update "<< this->name()<<" direction "<< (int)direction<<endl;
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -80,13 +92,13 @@ pFlow::MPI::processorBoundaryField<T, MemorySpace>::processorBoundaryField(
|
|||
InternalFieldType& internal
|
||||
)
|
||||
: BoundaryFieldType(boundary, pStruct, internal),
|
||||
sender_(
|
||||
groupNames("sendBufferField", boundary.name()),
|
||||
thisFieldInNeighbor_(
|
||||
groupNames("sendBuffer", this->name()),
|
||||
boundary.neighborProcessorNo(),
|
||||
boundary.thisBoundaryIndex()
|
||||
),
|
||||
reciever_(
|
||||
groupNames("neighborProcField", boundary.name()),
|
||||
neighborProcField_(
|
||||
groupNames("recieveBuffer", boundary.name()),
|
||||
boundary.neighborProcessorNo(),
|
||||
boundary.mirrorBoundaryIndex()
|
||||
)
|
||||
|
@ -102,7 +114,7 @@ typename pFlow::MPI::processorBoundaryField<T, MemorySpace>::ProcVectorType&
|
|||
pFlow::MPI::processorBoundaryField<T, MemorySpace>::neighborProcField()
|
||||
{
|
||||
checkDataRecieved();
|
||||
return reciever_.buffer();
|
||||
return neighborProcField_.buffer();
|
||||
}
|
||||
|
||||
template<class T, class MemorySpace>
|
||||
|
@ -111,7 +123,7 @@ const typename pFlow::MPI::processorBoundaryField<T, MemorySpace>::
|
|||
pFlow::MPI::processorBoundaryField<T, MemorySpace>::neighborProcField() const
|
||||
{
|
||||
checkDataRecieved();
|
||||
return reciever_.buffer();
|
||||
return neighborProcField_.buffer();
|
||||
}
|
||||
|
||||
template<class T, class MemorySpace>
|
||||
|
@ -127,7 +139,7 @@ bool pFlow::MPI::processorBoundaryField<T, MemorySpace>::hearChanges(
|
|||
if(msg.equivalentTo(message::BNDR_PROC_SIZE_CHANGED))
|
||||
{
|
||||
auto newProcSize = varList.getObject<uint32>("size");
|
||||
reciever_.resize(newProcSize);
|
||||
neighborProcField_.resize(newProcSize);
|
||||
}
|
||||
|
||||
if(msg.equivalentTo(message::BNDR_PROCTRANSFER_SEND))
|
||||
|
@ -144,7 +156,7 @@ bool pFlow::MPI::processorBoundaryField<T, MemorySpace>::hearChanges(
|
|||
this->internal().deviceViewAll()
|
||||
);
|
||||
|
||||
sender_.sendData(pFlowProcessors(),transferData);
|
||||
thisFieldInNeighbor_.sendData(pFlowProcessors(),transferData);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -154,7 +166,7 @@ bool pFlow::MPI::processorBoundaryField<T, MemorySpace>::hearChanges(
|
|||
this->internal().deviceViewAll()
|
||||
);
|
||||
|
||||
sender_.sendData(pFlowProcessors(),transferData);
|
||||
thisFieldInNeighbor_.sendData(pFlowProcessors(),transferData);
|
||||
}
|
||||
|
||||
|
||||
|
@ -164,12 +176,12 @@ bool pFlow::MPI::processorBoundaryField<T, MemorySpace>::hearChanges(
|
|||
uint32 numRecieved = varList.getObject<uint32>(
|
||||
message::eventName(message::BNDR_PROCTRANSFER_RECIEVE)
|
||||
);
|
||||
reciever_.recieveData(pFlowProcessors(), numRecieved);
|
||||
neighborProcField_.recieveData(pFlowProcessors(), numRecieved);
|
||||
}
|
||||
else if(msg.equivalentTo(message::BNDR_PROCTRANSFER_WAITFILL))
|
||||
{
|
||||
|
||||
uint32 numRecieved = reciever_.waitBufferForUse();
|
||||
uint32 numRecieved = neighborProcField_.waitBufferForUse();
|
||||
|
||||
if(msg.equivalentTo(message::CAP_CHANGED))
|
||||
{
|
||||
|
@ -188,7 +200,7 @@ bool pFlow::MPI::processorBoundaryField<T, MemorySpace>::hearChanges(
|
|||
const auto& indices = varList.getObject<uint32IndexContainer>(
|
||||
message::eventName(message::ITEM_INSERT));
|
||||
|
||||
this->internal().field().insertSetElement(indices, reciever_.buffer().deviceView());
|
||||
this->internal().field().insertSetElement(indices, neighborProcField_.buffer().deviceView());
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -198,14 +210,14 @@ bool pFlow::MPI::processorBoundaryField<T, MemorySpace>::hearChanges(
|
|||
template <class T, class MemorySpace>
|
||||
void pFlow::MPI::processorBoundaryField<T, MemorySpace>::sendBackData() const
|
||||
{
|
||||
reciever_.sendBackData(pFlowProcessors());
|
||||
neighborProcField_.sendBackData(pFlowProcessors());
|
||||
dataRecieved_ = false;
|
||||
}
|
||||
|
||||
template <class T, class MemorySpace>
|
||||
void pFlow::MPI::processorBoundaryField<T, MemorySpace>::recieveBackData() const
|
||||
{
|
||||
sender_.recieveBackData(pFlowProcessors(), this->size());
|
||||
thisFieldInNeighbor_.recieveBackData(pFlowProcessors(), this->size());
|
||||
}
|
||||
|
||||
template <class T, class MemorySpace>
|
||||
|
@ -216,16 +228,17 @@ void pFlow::MPI::processorBoundaryField<T, MemorySpace>::addBufferToInternalFiel
|
|||
Kokkos::Schedule<Kokkos::Static>,
|
||||
Kokkos::IndexType<pFlow::uint32>>;
|
||||
|
||||
sender_.waitBufferForUse();
|
||||
//pOutput<<"waiting for buffer to be recived in addBufferToInternalField "<<this->name()<<endl;
|
||||
thisFieldInNeighbor_.waitBufferForUse();
|
||||
|
||||
const auto& buffView = sender_.buffer().deviceViewAll();
|
||||
const auto& buffView = thisFieldInNeighbor_.buffer().deviceViewAll();
|
||||
const auto& field = this->internal().deviceViewAll();
|
||||
|
||||
if constexpr( isDeviceAccessible<execution_space> )
|
||||
{
|
||||
const auto& indices = this->indexList().deviceViewAll();
|
||||
Kokkos::parallel_for(
|
||||
"dataSender::recieveBackData",
|
||||
"recieveBackData::"+this->name(),
|
||||
RPolicy(0,this->size()),
|
||||
LAMBDA_HD(uint32 i)
|
||||
{
|
||||
|
@ -238,7 +251,7 @@ void pFlow::MPI::processorBoundaryField<T, MemorySpace>::addBufferToInternalFiel
|
|||
{
|
||||
const auto& indices = this->boundary().indexListHost().deviceViewAll();
|
||||
Kokkos::parallel_for(
|
||||
"dataSender::recieveBackData",
|
||||
"recieveBackData::"+this->name(),
|
||||
RPolicy(0,this->size()),
|
||||
LAMBDA_HD(uint32 i)
|
||||
{
|
||||
|
@ -247,4 +260,25 @@ void pFlow::MPI::processorBoundaryField<T, MemorySpace>::addBufferToInternalFiel
|
|||
);
|
||||
Kokkos::fence();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <class T, class MemorySpace>
|
||||
void pFlow::MPI::processorBoundaryField<T, MemorySpace>::updateBoundaryToMaster()const
|
||||
{
|
||||
if (!this->isBoundaryMaster() )
|
||||
{
|
||||
thisFieldInNeighbor_.sendData(pFlowProcessors(), this->thisField(), this->name());
|
||||
dataRecieved_ = false;
|
||||
}
|
||||
}
|
||||
|
||||
template <class T, class MemorySpace>
|
||||
void pFlow::MPI::processorBoundaryField<T, MemorySpace>::updateBoundaryFromSlave()const
|
||||
{
|
||||
if( this->isBoundaryMaster() )
|
||||
{
|
||||
neighborProcField_.recieveData(pFlowProcessors(), this->neighborProcSize(), this->name());
|
||||
dataRecieved_ = false;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ Licence:
|
|||
#include "boundaryField.hpp"
|
||||
#include "dataSender.hpp"
|
||||
#include "dataReciever.hpp"
|
||||
#include "boundaryProcessor.hpp"
|
||||
|
||||
namespace pFlow::MPI
|
||||
{
|
||||
|
@ -50,9 +51,9 @@ public:
|
|||
|
||||
private:
|
||||
|
||||
dataSender<T, MemorySpace> sender_;
|
||||
mutable dataSender<T, MemorySpace> thisFieldInNeighbor_;
|
||||
|
||||
dataReciever<T, MemorySpace> reciever_;
|
||||
mutable dataReciever<T, MemorySpace> neighborProcField_;
|
||||
|
||||
mutable bool dataRecieved_ = true;
|
||||
|
||||
|
@ -86,7 +87,7 @@ public:
|
|||
|
||||
void fill(const T& val)override
|
||||
{
|
||||
reciever_.fill(val);
|
||||
neighborProcField_.fill(val);
|
||||
}
|
||||
|
||||
bool hearChanges(
|
||||
|
@ -103,6 +104,10 @@ public:
|
|||
|
||||
void addBufferToInternalField()const;
|
||||
|
||||
void updateBoundaryToMaster()const;
|
||||
|
||||
void updateBoundaryFromSlave()const;
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -33,11 +33,13 @@ pFlow::MPI::boundaryProcessor::checkDataRecieved() const
|
|||
{
|
||||
if (!dataRecieved_)
|
||||
{
|
||||
uint32 nRecv = reciever_.waitBufferForUse();
|
||||
uint32 nRecv = neighborProcPoints_.waitBufferForUse();
|
||||
dataRecieved_ = true;
|
||||
if (nRecv != neighborProcSize())
|
||||
{
|
||||
fatalErrorInFunction;
|
||||
fatalErrorInFunction<<"In boundary "<<this->name()<<
|
||||
" ,number of recieved data is "<< nRecv<<
|
||||
" and neighborProcSize is "<<neighborProcSize()<<endl;
|
||||
fatalExit;
|
||||
}
|
||||
}
|
||||
|
@ -51,12 +53,12 @@ pFlow::MPI::boundaryProcessor::boundaryProcessor(
|
|||
uint32 thisIndex
|
||||
)
|
||||
: boundaryBase(dict, bplane, internal, bndrs, thisIndex),
|
||||
sender_(
|
||||
thisPointsInNeighbor_(
|
||||
groupNames("sendBuffer", name()),
|
||||
neighborProcessorNo(),
|
||||
thisBoundaryIndex()
|
||||
),
|
||||
reciever_(
|
||||
neighborProcPoints_(
|
||||
groupNames("neighborProcPoints", name()),
|
||||
neighborProcessorNo(),
|
||||
mirrorBoundaryIndex()
|
||||
|
@ -65,12 +67,34 @@ pFlow::MPI::boundaryProcessor::boundaryProcessor(
|
|||
}
|
||||
|
||||
bool
|
||||
pFlow::MPI::boundaryProcessor::beforeIteration(uint32 iterNum, real t, real dt)
|
||||
pFlow::MPI::boundaryProcessor::beforeIteration(
|
||||
uint32 step,
|
||||
const timeInfo& ti,
|
||||
bool updateIter,
|
||||
bool iterBeforeUpdate ,
|
||||
bool& callAgain
|
||||
)
|
||||
{
|
||||
if(step == 1)
|
||||
{
|
||||
boundaryBase::beforeIteration(step, ti, updateIter, iterBeforeUpdate, callAgain);
|
||||
callAgain = true;
|
||||
}
|
||||
else if(step == 2 )
|
||||
{
|
||||
|
||||
#ifdef BoundaryModel1
|
||||
callAgain = true;
|
||||
#else
|
||||
if(!performBoundarytUpdate())
|
||||
{
|
||||
callAgain = false;
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
thisNumPoints_ = size();
|
||||
|
||||
auto req = MPI_REQUEST_NULL;
|
||||
MPI_Isend(
|
||||
&thisNumPoints_,
|
||||
1,
|
||||
|
@ -78,30 +102,63 @@ pFlow::MPI::boundaryProcessor::beforeIteration(uint32 iterNum, real t, real dt)
|
|||
neighborProcessorNo(),
|
||||
thisBoundaryIndex(),
|
||||
pFlowProcessors().localCommunicator(),
|
||||
&req);
|
||||
&numPointsRequest0_);
|
||||
|
||||
MPI_Recv(
|
||||
MPI_Irecv(
|
||||
&neighborProcNumPoints_,
|
||||
1,
|
||||
MPI_UNSIGNED,
|
||||
neighborProcessorNo(),
|
||||
mirrorBoundaryIndex(),
|
||||
pFlowProcessors().localCommunicator(),
|
||||
MPI_STATUS_IGNORE
|
||||
&numPointsRequest_
|
||||
);
|
||||
MPI_Request_free(&req);
|
||||
|
||||
anyList varList;
|
||||
message msg;
|
||||
|
||||
varList.emplaceBack(msg.addAndName(message::BNDR_PROC_SIZE_CHANGED), neighborProcNumPoints_);
|
||||
|
||||
if( !notify(iterNum, t, dt, msg, varList) )
|
||||
{
|
||||
fatalErrorInFunction;
|
||||
return false;
|
||||
}
|
||||
|
||||
else if(step == 3 )
|
||||
{
|
||||
callAgain = true;
|
||||
|
||||
if(numPointsRequest_ != RequestNull)
|
||||
{
|
||||
MPI_Wait(&numPointsRequest_, MPI_STATUS_IGNORE);
|
||||
if(numPointsRequest0_!= RequestNull)
|
||||
{
|
||||
MPI_Wait(&numPointsRequest0_, MPI_STATUS_IGNORE);
|
||||
}
|
||||
}
|
||||
|
||||
// Size has not been changed. Notification is not required.
|
||||
if(neighborProcNumPoints_ == neighborProcPoints_.size()) return true;
|
||||
|
||||
anyList varList;
|
||||
message msg;
|
||||
|
||||
varList.emplaceBack(msg.addAndName(message::BNDR_PROC_SIZE_CHANGED), neighborProcNumPoints_);
|
||||
|
||||
if( !notify(ti.iter(), ti.t(), ti.dt(), msg, varList) )
|
||||
{
|
||||
fatalErrorInFunction;
|
||||
callAgain = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
else if(step == 4)
|
||||
{
|
||||
dataRecieved_ = false;
|
||||
if ( !isBoundaryMaster())
|
||||
{
|
||||
thisPointsInNeighbor_.sendData(pFlowProcessors(), thisPoints(),"positions");
|
||||
}
|
||||
else if (isBoundaryMaster())
|
||||
{
|
||||
neighborProcPoints_.recieveData(pFlowProcessors(), neighborProcSize(), "positions");
|
||||
}
|
||||
|
||||
callAgain = false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -115,62 +172,46 @@ pFlow::realx3Vector_D&
|
|||
pFlow::MPI::boundaryProcessor::neighborProcPoints()
|
||||
{
|
||||
checkDataRecieved();
|
||||
return reciever_.buffer();
|
||||
return neighborProcPoints_.buffer();
|
||||
}
|
||||
|
||||
const pFlow::realx3Vector_D&
|
||||
pFlow::MPI::boundaryProcessor::neighborProcPoints() const
|
||||
{
|
||||
checkDataRecieved();
|
||||
return reciever_.buffer();
|
||||
return neighborProcPoints_.buffer();
|
||||
}
|
||||
|
||||
bool
|
||||
pFlow::MPI::boundaryProcessor::updataBoundaryData(int step)
|
||||
{
|
||||
if (step == 1)
|
||||
{
|
||||
sender_.sendData(pFlowProcessors(), thisPoints());
|
||||
dataRecieved_ = false;
|
||||
}
|
||||
else if (step == 2)
|
||||
{
|
||||
reciever_.recieveData(pFlowProcessors(), neighborProcSize());
|
||||
dataRecieved_ = false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool pFlow::MPI::boundaryProcessor::transferData(uint32 iter, int step)
|
||||
bool pFlow::MPI::boundaryProcessor::transferData(
|
||||
uint32 iter,
|
||||
int step,
|
||||
bool& callAgain
|
||||
)
|
||||
{
|
||||
if(!boundaryListUpdate(iter))return false;
|
||||
|
||||
if(step==1)
|
||||
if( !iterBeforeBoundaryUpdate() )
|
||||
{
|
||||
uint32 s = size();
|
||||
uint32Vector_D transferFlags("transferFlags",s+1, s+1, RESERVE());
|
||||
transferFlags.fill(0u);
|
||||
|
||||
const auto& transferD = transferFlags.deviceViewAll();
|
||||
deviceScatteredFieldAccess<realx3> points = thisPoints();
|
||||
auto p = boundaryPlane().infPlane();
|
||||
|
||||
numToTransfer_ = 0;
|
||||
callAgain = false;
|
||||
return true;
|
||||
}
|
||||
|
||||
if(step == 1)
|
||||
{
|
||||
|
||||
Kokkos::parallel_reduce
|
||||
(
|
||||
"boundaryProcessor::afterIteration",
|
||||
deviceRPolicyStatic(0,s),
|
||||
boundaryProcessorKernels::markNegative(
|
||||
boundaryPlane().infPlane(),
|
||||
transferFlags.deviceViewAll(),
|
||||
thisPoints()
|
||||
),
|
||||
numToTransfer_
|
||||
);
|
||||
uint32Vector_D transferFlags("transferFlags"+this->name());
|
||||
|
||||
numToTransfer_ = markInNegativeSide(
|
||||
"transferData::markToTransfer"+this->name(),
|
||||
transferFlags);
|
||||
|
||||
uint32Vector_D keepIndices("keepIndices");
|
||||
|
||||
if(numToTransfer_ != 0u)
|
||||
{
|
||||
pFlow::boundaryBaseKernels::createRemoveKeepIndices
|
||||
|
@ -182,6 +223,7 @@ bool pFlow::MPI::boundaryProcessor::transferData(uint32 iter, int step)
|
|||
keepIndices,
|
||||
false
|
||||
);
|
||||
|
||||
// delete transfer point from this processor
|
||||
if( !setRemoveKeepIndices(transferIndices_, keepIndices))
|
||||
{
|
||||
|
@ -194,60 +236,80 @@ bool pFlow::MPI::boundaryProcessor::transferData(uint32 iter, int step)
|
|||
{
|
||||
transferIndices_.clear();
|
||||
}
|
||||
|
||||
auto req = RequestNull;
|
||||
|
||||
CheckMPI( Isend(
|
||||
numToTransfer_,
|
||||
neighborProcessorNo(),
|
||||
thisBoundaryIndex(),
|
||||
pFlowProcessors().localCommunicator(),
|
||||
&req), true );
|
||||
//pOutput<<"sent "<< numToTransfer_<<endl;
|
||||
CheckMPI(recv(
|
||||
&numTransferRequest_), true );
|
||||
|
||||
CheckMPI(Irecv(
|
||||
numToRecieve_,
|
||||
neighborProcessorNo(),
|
||||
mirrorBoundaryIndex(),
|
||||
pFlowProcessors().localCommunicator(),
|
||||
StatusesIgnore), true);
|
||||
&numRecieveRequest_), true);
|
||||
|
||||
//pOutput<<"recieved "<<numToRecieve_<<endl;
|
||||
|
||||
MPI_Request_free(&req);
|
||||
callAgain = true;
|
||||
return true;
|
||||
}
|
||||
else if(step ==2 )
|
||||
else if(step ==2) // to transferData to neighbor
|
||||
{
|
||||
if( transferIndices_.empty() )return true;
|
||||
if(numTransferRequest_!= RequestNull)
|
||||
{
|
||||
Wait(&numTransferRequest_, StatusIgnore);
|
||||
}
|
||||
|
||||
if( numToTransfer_ == 0u)
|
||||
{
|
||||
callAgain = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
pointFieldAccessType transferPoints(
|
||||
transferIndices_.size(),
|
||||
transferIndices_.deviceViewAll(),
|
||||
internal().pointPositionDevice());
|
||||
transferIndices_.size(),
|
||||
transferIndices_.deviceViewAll(),
|
||||
internal().pointPositionDevice()
|
||||
);
|
||||
|
||||
sender_.sendData(pFlowProcessors(), transferPoints);
|
||||
// this buffer is used temporarily
|
||||
thisPointsInNeighbor_.sendData(pFlowProcessors(), transferPoints);
|
||||
|
||||
message msg;
|
||||
anyList varList;
|
||||
varList.emplaceBack(
|
||||
msg.addAndName(message::BNDR_PROCTRANSFER_SEND),
|
||||
transferIndices_);
|
||||
|
||||
if(!notify(
|
||||
internal().time().currentIter(),
|
||||
internal().time().currentTime(),
|
||||
internal().time().dt(),
|
||||
msg,
|
||||
varList))
|
||||
const auto ti = internal().time().TimeInfo();
|
||||
|
||||
if(!notify(ti, msg, varList)
|
||||
)
|
||||
{
|
||||
fatalErrorInFunction;
|
||||
callAgain = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
callAgain = true;
|
||||
return true;
|
||||
}
|
||||
else if(step == 3)
|
||||
else if(step == 3) // to recieve data
|
||||
{
|
||||
if(numToRecieve_ == 0u) return false;
|
||||
reciever_.recieveData(pFlowProcessors(), numToRecieve_);
|
||||
|
||||
if(numRecieveRequest_ != RequestNull)
|
||||
{
|
||||
Wait(&numRecieveRequest_, StatusIgnore);
|
||||
}
|
||||
|
||||
if(numToRecieve_ == 0u)
|
||||
{
|
||||
callAgain = false;
|
||||
return true;
|
||||
}
|
||||
|
||||
// this buffer is being used temporarily
|
||||
neighborProcPoints_.recieveData(pFlowProcessors(), numToRecieve_);
|
||||
|
||||
message msg;
|
||||
anyList varList;
|
||||
|
@ -255,65 +317,67 @@ bool pFlow::MPI::boundaryProcessor::transferData(uint32 iter, int step)
|
|||
msg.addAndName(message::BNDR_PROCTRANSFER_RECIEVE),
|
||||
numToRecieve_);
|
||||
|
||||
if(!notify(
|
||||
internal().time().currentIter(),
|
||||
internal().time().currentTime(),
|
||||
internal().time().dt(),
|
||||
msg,
|
||||
varList))
|
||||
const auto ti = internal().time().TimeInfo();
|
||||
if(!notify( ti, msg, varList))
|
||||
{
|
||||
fatalErrorInFunction;
|
||||
callAgain = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
callAgain = true;
|
||||
return true;
|
||||
}
|
||||
else if(step == 4)
|
||||
else if(step == 4) // to insert data
|
||||
{
|
||||
if(numToRecieve_ == 0u) return false;
|
||||
reciever_.waitBufferForUse();
|
||||
if(numToRecieve_ == 0u)
|
||||
{
|
||||
callAgain = false;
|
||||
return true;
|
||||
}
|
||||
|
||||
// points should be inserted first
|
||||
message msg(message::BNDR_PROCTRANSFER_WAITFILL);
|
||||
anyList varList;
|
||||
|
||||
internal().insertPointsOnly(reciever_.buffer(), msg, varList);
|
||||
neighborProcPoints_.waitBufferForUse();
|
||||
internal().insertPointsOnly(neighborProcPoints_.buffer(), msg, varList);
|
||||
|
||||
const auto& indices = varList.getObject<uint32IndexContainer>(message::eventName(message::ITEM_INSERT));
|
||||
|
||||
auto indView = deviceViewType1D<uint32>(indices.deviceView().data(), indices.deviceView().size());
|
||||
|
||||
uint32Vector_D newIndices("newIndices", indView);
|
||||
|
||||
if(! appendNewIndices(newIndices))
|
||||
{
|
||||
fatalErrorInFunction;
|
||||
callAgain = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
if(!notify(
|
||||
internal().time().currentIter(),
|
||||
internal().time().currentTime(),
|
||||
internal().time().dt(),
|
||||
msg,
|
||||
varList))
|
||||
const auto ti = internal().time().TimeInfo();
|
||||
if(!notify(ti, msg, varList))
|
||||
{
|
||||
fatalErrorInFunction;
|
||||
callAgain = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
return false;
|
||||
callAgain = false;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
pFlow::MPI::boundaryProcessor::iterate(uint32 iterNum, real t, real dt)
|
||||
pFlow::MPI::boundaryProcessor::iterate(const timeInfo& ti)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
pFlow::MPI::boundaryProcessor::afterIteration(uint32 iterNum, real t, real dt)
|
||||
pFlow::MPI::boundaryProcessor::afterIteration(const timeInfo& ti)
|
||||
{
|
||||
|
||||
uint32 s = size();
|
||||
|
|
|
@ -22,9 +22,11 @@ Licence:
|
|||
#define __boundaryProcessor_hpp__
|
||||
|
||||
#include "boundaryBase.hpp"
|
||||
#include "timeInfo.hpp"
|
||||
#include "mpiTypes.hpp"
|
||||
#include "dataSender.hpp"
|
||||
#include "dataReciever.hpp"
|
||||
#include "boundaryConfigs.hpp"
|
||||
|
||||
namespace pFlow::MPI
|
||||
{
|
||||
|
@ -42,11 +44,13 @@ namespace pFlow::MPI
|
|||
|
||||
uint32 thisNumPoints_ = 0;
|
||||
|
||||
realx3Vector_D neighborProcPoints_;
|
||||
Request numPointsRequest_ = RequestNull;
|
||||
|
||||
dataSender<realx3> sender_;
|
||||
Request numPointsRequest0_ = RequestNull;
|
||||
|
||||
dataReciever<realx3> reciever_;
|
||||
dataSender<realx3> thisPointsInNeighbor_;
|
||||
|
||||
dataReciever<realx3> neighborProcPoints_;
|
||||
|
||||
mutable bool dataRecieved_ = true;
|
||||
|
||||
|
@ -56,6 +60,10 @@ namespace pFlow::MPI
|
|||
|
||||
uint32Vector_D transferIndices_{"transferIndices"};
|
||||
|
||||
Request numTransferRequest_ = RequestNull;
|
||||
|
||||
Request numRecieveRequest_ = RequestNull;
|
||||
|
||||
void checkDataRecieved() const;
|
||||
|
||||
/// @brief Update processor boundary data for this processor
|
||||
|
@ -68,7 +76,7 @@ namespace pFlow::MPI
|
|||
/// step is non-blocking recieve to get data.
|
||||
bool updataBoundaryData(int step) override;
|
||||
|
||||
bool transferData(uint32 iter, int step) override;
|
||||
bool transferData(uint32 iter, int step, bool& callAgain) override;
|
||||
|
||||
public:
|
||||
TypeInfo("boundary<processor>");
|
||||
|
@ -87,11 +95,17 @@ namespace pFlow::MPI
|
|||
boundaryProcessor,
|
||||
dictionary);
|
||||
|
||||
bool beforeIteration(uint32 iterNum, real t, real dt) override;
|
||||
bool beforeIteration(
|
||||
uint32 step,
|
||||
const timeInfo& ti,
|
||||
bool updateIter,
|
||||
bool iterBeforeUpdate ,
|
||||
bool& callAgain
|
||||
) override;
|
||||
|
||||
bool iterate(uint32 iterNum, real t, real dt) override;
|
||||
bool iterate(const timeInfo& ti) override;
|
||||
|
||||
bool afterIteration(uint32 iterNum, real t, real dt) override;
|
||||
bool afterIteration(const timeInfo& ti) override;
|
||||
|
||||
/// @brief Return number of points in the neighbor processor boundary.
|
||||
/// This is overriden from boundaryBase.
|
||||
|
|
|
@ -76,7 +76,8 @@ public:
|
|||
|
||||
void recieveData(
|
||||
const localProcessors& processors,
|
||||
uint32 numToRecv
|
||||
uint32 numToRecv,
|
||||
const word& name = "dataReciver"
|
||||
)
|
||||
{
|
||||
resize(numToRecv);
|
||||
|
|
|
@ -1,3 +1,23 @@
|
|||
/*------------------------------- phasicFlow ---------------------------------
|
||||
O C enter of
|
||||
O O E ngineering and
|
||||
O O M ultiscale modeling of
|
||||
OOOOOOO F luid flow
|
||||
------------------------------------------------------------------------------
|
||||
Copyright (C): www.cemf.ir
|
||||
email: hamid.r.norouzi AT gmail.com
|
||||
------------------------------------------------------------------------------
|
||||
Licence:
|
||||
This file is part of phasicFlow code. It is a free software for simulating
|
||||
granular and multiphase flows. You can redistribute it and/or modify it under
|
||||
the terms of GNU General Public License v3 or any other later versions.
|
||||
|
||||
phasicFlow is distributed to help others in their research in the field of
|
||||
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
|
||||
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
|
||||
-----------------------------------------------------------------------------*/
|
||||
|
||||
#ifndef __dataSender_hpp__
|
||||
#define __dataSender_hpp__
|
||||
|
||||
|
@ -61,7 +81,8 @@ public:
|
|||
|
||||
void sendData(
|
||||
const localProcessors& processors,
|
||||
const scatteredFieldAccess<T, memory_space>& scatterField
|
||||
const scatteredFieldAccess<T, memory_space>& scatterField,
|
||||
const word& name = "dataSender::sendData"
|
||||
)
|
||||
{
|
||||
using RPolicy = Kokkos::RangePolicy<
|
||||
|
@ -79,10 +100,10 @@ public:
|
|||
buffer_.clear();
|
||||
buffer_.resize(n);
|
||||
|
||||
const auto& buffView = buffer_.deviceViewAll();
|
||||
const auto& buffView = buffer_.deviceViewAll();
|
||||
|
||||
Kokkos::parallel_for(
|
||||
"dataSender::sendData",
|
||||
"packDataForSend::"+name,
|
||||
RPolicy(0,n),
|
||||
LAMBDA_HD(uint32 i)
|
||||
{
|
||||
|
|
|
@ -21,13 +21,17 @@ Licence:
|
|||
template<class T, class MemorySpace>
|
||||
bool pFlow::Field<T, MemorySpace>::read
|
||||
(
|
||||
iIstream& is
|
||||
iIstream& is,
|
||||
bool resume
|
||||
)
|
||||
{
|
||||
|
||||
bool tokenFound = true;
|
||||
|
||||
tokenFound = is.findToken(fieldKey_);
|
||||
if(resume)
|
||||
tokenFound = is.findTokenResume(fieldKey_);
|
||||
else
|
||||
tokenFound = is.findToken(fieldKey_);
|
||||
|
||||
if( !tokenFound )
|
||||
{
|
||||
|
@ -53,14 +57,21 @@ template<class T, class MemorySpace>
|
|||
bool pFlow::Field<T, MemorySpace>::read
|
||||
(
|
||||
iIstream& is,
|
||||
const IOPattern& iop
|
||||
const IOPattern& iop,
|
||||
bool resume
|
||||
)
|
||||
{
|
||||
|
||||
bool tokenFound = true;
|
||||
|
||||
if(iop.thisProcReadData())
|
||||
tokenFound = is.findToken(fieldKey_);
|
||||
{
|
||||
if(resume)
|
||||
tokenFound = is.findTokenResume(fieldKey_);
|
||||
else
|
||||
tokenFound = is.findToken(fieldKey_);
|
||||
}
|
||||
|
||||
|
||||
if( !tokenFound )
|
||||
{
|
||||
|
|
|
@ -197,12 +197,12 @@ public:
|
|||
|
||||
//// - IO operations
|
||||
|
||||
bool read(iIstream& is);
|
||||
bool read(iIstream& is, bool resume = false);
|
||||
|
||||
bool write(iOstream& os)const;
|
||||
|
||||
|
||||
bool read(iIstream& is, const IOPattern& iop);
|
||||
bool read(iIstream& is, const IOPattern& iop, bool resume = false);
|
||||
|
||||
|
||||
bool write(iOstream& os, const IOPattern& iop )const;
|
||||
|
|
|
@ -21,3 +21,80 @@ Licence:
|
|||
|
||||
#include "wordVectorHost.hpp"
|
||||
|
||||
bool pFlow::VectorSingle<pFlow::word,pFlow::HostSpace>::insertSetElement(
|
||||
const uint32IndexContainer& indices,
|
||||
const word& val
|
||||
)
|
||||
{
|
||||
if(indices.empty()) return true;
|
||||
|
||||
auto maxInd = indices.max();
|
||||
|
||||
if(this->empty() || maxInd > size()-1 )
|
||||
{
|
||||
resize(maxInd+1);
|
||||
}
|
||||
|
||||
auto ind = indices.hostView();
|
||||
auto s = indices.size();
|
||||
|
||||
for(uint32 i=0; i< s; i++)
|
||||
{
|
||||
container_[ind[i]] = val;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool pFlow::VectorSingle<pFlow::word,pFlow::HostSpace>::insertSetElement(
|
||||
const uint32IndexContainer& indices,
|
||||
const std::vector<word>& vals
|
||||
)
|
||||
{
|
||||
if(indices.empty())return true;
|
||||
if(indices.size() != vals.size())return false;
|
||||
|
||||
auto maxInd = indices.max();
|
||||
|
||||
if(this->empty() || maxInd > size()-1 )
|
||||
{
|
||||
resize(maxInd+1);
|
||||
}
|
||||
|
||||
auto ind = indices.hostView();
|
||||
auto s = indices.size();
|
||||
|
||||
for(uint32 i=0; i< s; i++)
|
||||
{
|
||||
container_[ind[i]] = vals[i];
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool pFlow::VectorSingle<pFlow::word,pFlow::HostSpace>::insertSetElement(
|
||||
const uint32IndexContainer& indices,
|
||||
const ViewType1D<word, memory_space> vals
|
||||
)
|
||||
{
|
||||
if(indices.empty())return true;
|
||||
if(indices.size() != vals.size())return false;
|
||||
|
||||
auto maxInd = indices.max();
|
||||
|
||||
if(this->empty() || maxInd > size()-1 )
|
||||
{
|
||||
resize(maxInd+1);
|
||||
}
|
||||
|
||||
auto ind = indices.hostView();
|
||||
auto s = indices.size();
|
||||
|
||||
for(uint32 i=0; i< s; i++)
|
||||
{
|
||||
container_[ind[i]] = vals[i];
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -372,28 +372,20 @@ public:
|
|||
return span<word>(const_cast<word*>(container_.data()), container_.size());
|
||||
}
|
||||
|
||||
INLINE_FUNCTION_H
|
||||
bool insertSetElement(const uint32IndexContainer& indices, const word& val)
|
||||
{
|
||||
notImplementedFunction;
|
||||
return false;
|
||||
}
|
||||
|
||||
INLINE_FUNCTION_H
|
||||
bool insertSetElement(const uint32IndexContainer& indices, const std::vector<word>& vals)
|
||||
{
|
||||
notImplementedFunction;
|
||||
return false;
|
||||
}
|
||||
bool insertSetElement(const uint32IndexContainer& indices, const word& val);
|
||||
|
||||
|
||||
|
||||
bool insertSetElement(const uint32IndexContainer& indices, const std::vector<word>& vals);
|
||||
|
||||
|
||||
|
||||
INLINE_FUNCTION_H
|
||||
bool insertSetElement(
|
||||
const uint32IndexContainer& indices,
|
||||
const ViewType1D<word, memory_space> vals)
|
||||
{
|
||||
notImplementedFunction;
|
||||
return false;
|
||||
}
|
||||
const ViewType1D<word, memory_space> vals
|
||||
);
|
||||
|
||||
|
||||
INLINE_FUNCTION_H
|
||||
bool reorderItems(const uint32IndexContainer& indices)
|
||||
|
|
|
@ -0,0 +1,10 @@
|
|||
#ifndef __boundaryConfigs_hpp__
|
||||
#define __boundaryConfigs_hpp__
|
||||
|
||||
|
||||
#ifndef BoundaryModel1
|
||||
//#define BoundaryModel1
|
||||
#endif
|
||||
|
||||
|
||||
#endif //__boundaryConfigs_hpp__
|
|
@ -0,0 +1,88 @@
|
|||
#include "MPITimer.hpp"
|
||||
|
||||
#ifdef pFlow_Build_MPI
|
||||
#include "pFlowProcessors.hpp"
|
||||
#include "procCommunication.hpp"
|
||||
#endif
|
||||
|
||||
pFlow::real pFlow::MPITimer::totalTimeMax() const
|
||||
{
|
||||
return accTimersTotalMax();
|
||||
}
|
||||
|
||||
std::vector<pFlow::real> pFlow::MPITimer::totalTimeAllToAll() const
|
||||
{
|
||||
#ifdef pFlow_Build_MPI
|
||||
MPI::procCommunication comm(pFlowProcessors());
|
||||
auto [totTime, succs] = comm.collectAllToAll(totalTime());
|
||||
return totTime;
|
||||
#else
|
||||
return std::vector<real>(1, totalTime());
|
||||
#endif
|
||||
}
|
||||
|
||||
std::vector<pFlow::real> pFlow::MPITimer::totalTimeAllToMaster() const
|
||||
{
|
||||
#ifdef pFlow_Build_MPI
|
||||
MPI::procCommunication comm(pFlowProcessors());
|
||||
auto [totTime, succs] = comm.collectAllToMaster(totalTime());
|
||||
return totTime;
|
||||
#else
|
||||
return std::vector<real>(1, totalTime());
|
||||
#endif
|
||||
}
|
||||
|
||||
pFlow::real pFlow::MPITimer::averageTimeMax() const
|
||||
{
|
||||
return Timer::averageTimeMax();
|
||||
}
|
||||
|
||||
std::vector<pFlow::real> pFlow::MPITimer::averageTimeAllToAll() const
|
||||
{
|
||||
#ifdef pFlow_Build_MPI
|
||||
MPI::procCommunication comm(pFlowProcessors());
|
||||
auto [totTime, succs] = comm.collectAllToAll(averageTime());
|
||||
return totTime;
|
||||
#else
|
||||
return std::vector<real>(1, averageTime());
|
||||
#endif
|
||||
}
|
||||
|
||||
std::vector<pFlow::real> pFlow::MPITimer::averageTimeAllAtoMaster() const
|
||||
{
|
||||
#ifdef pFlow_Build_MPI
|
||||
MPI::procCommunication comm(pFlowProcessors());
|
||||
auto [totTime, succs] = comm.collectAllToMaster(averageTime());
|
||||
return totTime;
|
||||
#else
|
||||
return std::vector<real>(1, averageTime());
|
||||
#endif
|
||||
}
|
||||
|
||||
bool pFlow::MPITimer::write(iOstream &os) const
|
||||
{
|
||||
const auto ts = totalTimeAllToAll();
|
||||
auto maxloc = std::distance(ts.begin(), std::max_element(ts.begin(), ts.end()));
|
||||
os<<'(';
|
||||
for(auto i=0; i<ts.size(); i++)
|
||||
{
|
||||
if(maxloc == i)
|
||||
os<<Red_Text(ts[i]);
|
||||
else
|
||||
os<<ts[i];
|
||||
|
||||
if(i != ts.size()-1)
|
||||
os<<' ';
|
||||
else
|
||||
os<<')'<<endl;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static pFlow::MPITimer ComputationTimer__{"ComputationTimer"};
|
||||
|
||||
pFlow::MPITimer &pFlow::ComputationTimer()
|
||||
{
|
||||
return ComputationTimer__;
|
||||
}
|
|
@ -0,0 +1,55 @@
|
|||
|
||||
|
||||
#ifndef __MPITimer_hpp__
|
||||
#define __MPITimer_hpp__
|
||||
|
||||
#include "Timer.hpp"
|
||||
|
||||
|
||||
namespace pFlow
|
||||
{
|
||||
|
||||
class MPITimer
|
||||
:
|
||||
public Timer
|
||||
{
|
||||
private:
|
||||
|
||||
// hiding methods
|
||||
using Timer::accTimersTotal;
|
||||
using Timer::averageTime;
|
||||
|
||||
public:
|
||||
|
||||
TypeInfo("MPITimer");
|
||||
|
||||
explicit MPITimer(const word& name)
|
||||
:
|
||||
Timer(name)
|
||||
{}
|
||||
|
||||
|
||||
real totalTimeMax()const;
|
||||
|
||||
std::vector<real> totalTimeAllToAll()const;
|
||||
|
||||
std::vector<real> totalTimeAllToMaster()const;
|
||||
|
||||
real averageTimeMax()const;
|
||||
|
||||
std::vector<real> averageTimeAllToAll()const;
|
||||
|
||||
std::vector<real> averageTimeAllAtoMaster()const;
|
||||
|
||||
// call this from all processors in pFlowProcessors
|
||||
bool write(iOstream& os)const;
|
||||
|
||||
};
|
||||
|
||||
MPITimer& ComputationTimer();
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
#endif
|
|
@ -0,0 +1,50 @@
|
|||
|
||||
#ifndef __boundariesMask_hpp__
|
||||
#define __boundariesMask_hpp__
|
||||
|
||||
#include <array>
|
||||
|
||||
namespace pFlow
|
||||
{
|
||||
|
||||
template<size_t N>
|
||||
class boundariesMask
|
||||
:
|
||||
public std::array<bool,N>
|
||||
{
|
||||
public:
|
||||
|
||||
boundariesMask()=default;
|
||||
|
||||
boundariesMask(bool val)
|
||||
{
|
||||
this->fill(val);
|
||||
}
|
||||
|
||||
boundariesMask(std::initializer_list<bool> l)
|
||||
:
|
||||
std::array<bool,N>(l)
|
||||
{}
|
||||
|
||||
inline
|
||||
bool allElements(bool val)
|
||||
{
|
||||
return std::all_of(this->begin(), this->end(), [val](bool v) { return v==val;} );
|
||||
}
|
||||
|
||||
inline
|
||||
bool anyElement(bool val)
|
||||
{
|
||||
return std::any_of(this->begin(), this->end(), [val](bool v) { return v==val;} );
|
||||
}
|
||||
|
||||
inline
|
||||
bool noElement(bool val)
|
||||
{
|
||||
return std::none_of(this->begin(), this->end(), [val](bool v) { return v==val;} );
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif //__boundariesMask_hpp__
|
|
@ -339,7 +339,7 @@ public:
|
|||
|
||||
|
||||
virtual
|
||||
bool beforeIteration(uint32 iterNum, real t, real dt) = 0 ;
|
||||
bool beforeIteration(uint32 step, uint32 iterNum, real t, real dt) = 0 ;
|
||||
|
||||
virtual
|
||||
bool iterate(uint32 iterNum, real t, real dt) = 0;
|
||||
|
|
|
@ -42,12 +42,14 @@ pFlow::boundaryExit::boundaryExit
|
|||
|
||||
bool pFlow::boundaryExit::beforeIteration
|
||||
(
|
||||
uint32 step,
|
||||
uint32 iterNum,
|
||||
real t,
|
||||
real dt
|
||||
)
|
||||
{
|
||||
|
||||
if(step!= 2 )return true;
|
||||
|
||||
if( !boundaryListUpdate(iterNum))return true;
|
||||
|
||||
// nothing have to be done
|
||||
|
|
|
@ -63,7 +63,7 @@ public:
|
|||
dictionary
|
||||
);
|
||||
|
||||
bool beforeIteration(uint32 iterNum, real t, real dt) override;
|
||||
bool beforeIteration(uint32 step, uint32 iterNum, real t, real dt) override;
|
||||
|
||||
bool iterate(uint32 iterNum, real t, real dt) override;
|
||||
|
||||
|
|
|
@ -92,9 +92,12 @@ pFlow::boundaryList::boundaryList(pointStructure& pStruct)
|
|||
: ListPtr<boundaryBase>(pStruct.simDomain().sizeOfBoundaries()),
|
||||
pStruct_(pStruct),
|
||||
neighborListUpdateInterval_(
|
||||
pStruct.simDomain().subDict("boundaries").getVal<uint32>(
|
||||
"neighborListUpdateInterval"
|
||||
)
|
||||
max(
|
||||
pStruct.simDomain().subDict("boundaries").getVal<uint32>(
|
||||
"neighborListUpdateInterval"
|
||||
),
|
||||
1u
|
||||
)
|
||||
)
|
||||
{
|
||||
}
|
||||
|
@ -169,7 +172,17 @@ pFlow::boundaryList::beforeIteration(uint32 iter, real t, real dt, bool force)
|
|||
|
||||
for (auto bdry : *this)
|
||||
{
|
||||
if (!bdry->beforeIteration(iter, t, dt))
|
||||
if (!bdry->beforeIteration(1, iter, t, dt))
|
||||
{
|
||||
fatalErrorInFunction << "Error in beforeIteration in boundary "
|
||||
<< bdry->name() << endl;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
for (auto bdry : *this)
|
||||
{
|
||||
if (!bdry->beforeIteration(2, iter, t, dt))
|
||||
{
|
||||
fatalErrorInFunction << "Error in beforeIteration in boundary "
|
||||
<< bdry->name() << endl;
|
||||
|
|
|
@ -34,6 +34,7 @@ pFlow::boundaryNone::boundaryNone
|
|||
|
||||
bool pFlow::boundaryNone::beforeIteration
|
||||
(
|
||||
uint32 step,
|
||||
uint32 iterNum,
|
||||
real t,
|
||||
real dt
|
||||
|
|
|
@ -52,7 +52,7 @@ public:
|
|||
dictionary
|
||||
);
|
||||
|
||||
bool beforeIteration(uint32 iterNum, real t, real dt) final;
|
||||
bool beforeIteration(uint32 step, uint32 iterNum, real t, real dt) final;
|
||||
|
||||
bool iterate(uint32 iterNum, real t, real dt) final;
|
||||
|
||||
|
|
|
@ -51,10 +51,12 @@ pFlow::realx3 pFlow::boundaryPeriodic::boundaryExtensionLength() const
|
|||
|
||||
|
||||
bool pFlow::boundaryPeriodic::beforeIteration(
|
||||
uint32 step,
|
||||
uint32 iterNum,
|
||||
real t,
|
||||
real dt)
|
||||
{
|
||||
if(step!=2)return true;
|
||||
// nothing have to be done
|
||||
if(empty())
|
||||
{
|
||||
|
|
|
@ -64,7 +64,7 @@ public:
|
|||
|
||||
//const plane& boundaryPlane()const override;*/
|
||||
|
||||
bool beforeIteration(uint32 iterNum, real t, real dt) override;
|
||||
bool beforeIteration(uint32 step, uint32 iterNum, real t, real dt) override;
|
||||
|
||||
bool iterate(uint32 iterNum, real t, real dt) override;
|
||||
|
||||
|
|
|
@ -46,6 +46,7 @@ pFlow::boundaryReflective::boundaryReflective
|
|||
}
|
||||
|
||||
bool pFlow::boundaryReflective::beforeIteration(
|
||||
uint32 step,
|
||||
uint32 iterNum,
|
||||
real t,
|
||||
real dt)
|
||||
|
|
|
@ -59,7 +59,7 @@ public:
|
|||
dictionary
|
||||
);
|
||||
|
||||
bool beforeIteration(uint32 iterNum, real t, real dt) override;
|
||||
bool beforeIteration(uint32 step, uint32 iterNum, real t, real dt) override;
|
||||
|
||||
bool iterate(uint32 iterNum, real t, real dt) override;
|
||||
|
||||
|
|
|
@ -32,9 +32,12 @@ bool pFlow::regularSimulationDomain::createBoundaryDicts()
|
|||
auto& rbBoundaries = this->subDict("regularBoundaries");
|
||||
|
||||
auto neighborLength = boundaries.getVal<real>("neighborLength");
|
||||
auto boundaryExtntionLengthRatio =
|
||||
boundaries.getValOrSet<real>("boundaryExtntionLengthRatio", 0.1);
|
||||
auto updateIntercal = boundaries.getValOrSet<uint32>("updateInterval", 1u);
|
||||
auto boundaryExtntionLengthRatio = max(
|
||||
boundaries.getValOrSet<real>("boundaryExtntionLengthRatio", 0.1),
|
||||
0.0);
|
||||
auto updateIntercal = max(
|
||||
boundaries.getValOrSet<uint32>("updateInterval", 1u),
|
||||
1u);
|
||||
|
||||
for(uint32 i=0; i<sizeOfBoundaries(); i++)
|
||||
{
|
||||
|
|
|
@ -257,7 +257,7 @@ pFlow::triSurface::triSurface
|
|||
bool pFlow::triSurface::read(iIstream &is, const IOPattern &iop)
|
||||
{
|
||||
points_.clear();
|
||||
if(!points_.read(is, iop))
|
||||
if(!points_.read(is, iop, true))
|
||||
{
|
||||
ioErrorInFile(is.name(), is.lineNumber())<<
|
||||
" when reading field "<< points_.name()<<endl;
|
||||
|
@ -265,7 +265,7 @@ bool pFlow::triSurface::read(iIstream &is, const IOPattern &iop)
|
|||
}
|
||||
|
||||
vertices_.clear();
|
||||
if(!vertices_.read(is, iop))
|
||||
if(!vertices_.read(is, iop, true))
|
||||
{
|
||||
ioErrorInFile(is.name(), is.lineNumber())<<
|
||||
" when reading field "<< vertices_.name()<<endl;
|
||||
|
|
Loading…
Reference in New Issue