diff --git a/src/Integration/AdamsBashforth2/AB2Kernels.hpp b/src/Integration/AdamsBashforth2/AB2Kernels.hpp new file mode 100644 index 00000000..578cf07e --- /dev/null +++ b/src/Integration/AdamsBashforth2/AB2Kernels.hpp @@ -0,0 +1,63 @@ + + +#ifndef __AB2Kernels_hpp__ +#define __AB2Kernels_hpp__ + +#include "KokkosTypes.hpp" +#include "types.hpp" + +namespace pFlow::AB2Kernels +{ +inline +bool intAllActive( + const word& name, + real dt, + rangeU32 activeRng, + const deviceViewType1D& y, + const deviceViewType1D& dy, + const deviceViewType1D& dy1 +) +{ + Kokkos::parallel_for( + name, + deviceRPolicyStatic (activeRng.start(), activeRng.end()), + LAMBDA_HD(uint32 i){ + y[i] += dt*(static_cast(1.5) * dy[i] - static_cast(0.5) * dy1[i]); + dy1[i] = dy[i]; + }); + Kokkos::fence(); + + return true; +} + +inline +bool intScattered +( + const word& name, + real dt, + const pFlagTypeDevice& activePoints, + const deviceViewType1D& y, + const deviceViewType1D& dy, + const deviceViewType1D& dy1 +) +{ + + Kokkos::parallel_for( + name, + deviceRPolicyStatic (activePoints.activeRange().start(), activePoints.activeRange().end()), + LAMBDA_HD(uint32 i){ + if( activePoints(i)) + { + y[i] += dt*(static_cast(1.5) * dy[i] - static_cast(0.5) * dy1[i]); + dy1[i] = dy[i]; + } + }); + Kokkos::fence(); + + + return true; +} + +} + +#endif \ No newline at end of file diff --git a/src/Integration/AdamsBashforth2/processorAB2BoundaryIntegration.cpp b/src/Integration/AdamsBashforth2/processorAB2BoundaryIntegration.cpp new file mode 100644 index 00000000..2fd9dbc1 --- /dev/null +++ b/src/Integration/AdamsBashforth2/processorAB2BoundaryIntegration.cpp @@ -0,0 +1,71 @@ +#include "processorAB2BoundaryIntegration.hpp" +#include "AdamsBashforth2.hpp" +#include "AB2Kernels.hpp" +#include "boundaryConfigs.hpp" + +pFlow::processorAB2BoundaryIntegration::processorAB2BoundaryIntegration( + const boundaryBase &boundary, + const pointStructure &pStruct, + const word &method, + integration& intgrtn +) +: + boundaryIntegration(boundary, pStruct, method, intgrtn) +{} + +bool pFlow::processorAB2BoundaryIntegration::correct( + real dt, + const realx3PointField_D& y, + const realx3PointField_D& dy +) +{ + +#ifndef BoundaryModel1 + if(this->isBoundaryMaster()) + { + const uint32 thisIndex = thisBoundaryIndex(); + const auto& AB2 = static_cast(Integration()); + const auto& dy1View = AB2.BoundaryField(thisIndex).neighborProcField().deviceView(); + const auto& dyView = dy.BoundaryField(thisIndex).neighborProcField().deviceView(); + const auto& yView = y.BoundaryField(thisIndex).neighborProcField().deviceView(); + const rangeU32 aRange(0u, dy1View.size()); + return AB2Kernels::intAllActive( + "AB2Integration::correct."+this->boundaryName(), + dt, + aRange, + yView, + dyView, + dy1View + ); + } +#endif //BoundaryModel1 + + + return true; +} + +bool pFlow::processorAB2BoundaryIntegration::correctPStruct(real dt, const realx3PointField_D &vel) +{ + + #ifndef BoundaryModel1 + if(this->isBoundaryMaster()) + { + const uint32 thisIndex = thisBoundaryIndex(); + const auto& AB2 = static_cast(Integration()); + const auto& dy1View = AB2.BoundaryField(thisIndex).neighborProcField().deviceView(); + const auto& velView = vel.BoundaryField(thisIndex).neighborProcField().deviceView(); + const auto& xposView = boundary().neighborProcPoints().deviceView(); + const rangeU32 aRange(0u, dy1View.size()); + return AB2Kernels::intAllActive( + "AB2Integration::correctPStruct."+this->boundaryName(), + dt, + aRange, + xposView, + velView, + dy1View + ); + } +#endif //BoundaryModel1 + + return true; +} diff --git a/src/Integration/AdamsBashforth2/processorAB2BoundaryIntegration.hpp b/src/Integration/AdamsBashforth2/processorAB2BoundaryIntegration.hpp new file mode 100644 index 00000000..7ff6aacb --- /dev/null +++ b/src/Integration/AdamsBashforth2/processorAB2BoundaryIntegration.hpp @@ -0,0 +1,51 @@ + + +#ifndef __processorAB2BoundaryIntegration_hpp__ +#define __processorAB2BoundaryIntegration_hpp__ + +#include "boundaryIntegration.hpp" + +namespace pFlow +{ + +class processorAB2BoundaryIntegration +: + public boundaryIntegration +{ +public: + + TypeInfo("boundaryIntegration"); + + processorAB2BoundaryIntegration( + const boundaryBase& boundary, + const pointStructure& pStruct, + const word& method, + integration& intgrtn + ); + + ~processorAB2BoundaryIntegration()override=default; + + + bool correct( + real dt, + const realx3PointField_D& y, + const realx3PointField_D& dy)override; + + + + bool correctPStruct(real dt, const realx3PointField_D& vel)override; + + + add_vCtor( + boundaryIntegration, + processorAB2BoundaryIntegration, + boundaryBase + ); + + + +}; + +} + +#endif \ No newline at end of file diff --git a/src/Integration/boundaries/boundaryIntegration.cpp b/src/Integration/boundaries/boundaryIntegration.cpp new file mode 100644 index 00000000..e13a5624 --- /dev/null +++ b/src/Integration/boundaries/boundaryIntegration.cpp @@ -0,0 +1,55 @@ +#include "boundaryIntegration.hpp" +#include "pointStructure.hpp" + + +pFlow::boundaryIntegration::boundaryIntegration( + const boundaryBase &boundary, + const pointStructure &pStruct, + const word &method, + integration& intgrtn +) +: + generalBoundary(boundary, pStruct, "", method), + integration_(intgrtn) +{} + +pFlow::uniquePtr pFlow::boundaryIntegration::create( + const boundaryBase &boundary, + const pointStructure &pStruct, + const word &method, + integration& intgrtn +) +{ + + word bType = angleBracketsNames2( + "boundaryIntegration", + boundary.type(), + method); + + word altBType{"boundaryIntegration"}; + + if( boundaryBasevCtorSelector_.search(bType) ) + { + pOutput.space(4)<<"Creating boundary "<< Green_Text(bType)<< + " for "<"); + + boundaryIntegration( + const boundaryBase& boundary, + const pointStructure& pStruct, + const word& method, + integration& intgrtn); + + ~boundaryIntegration()override = default; + + create_vCtor( + boundaryIntegration, + boundaryBase, + ( + const boundaryBase& boundary, + const pointStructure& pStruct, + const word& method, + integration& intgrtn + ), + (boundary, pStruct, method, intgrtn) + ); + + add_vCtor( + boundaryIntegration, + boundaryIntegration, + boundaryBase + ); + + bool hearChanges( + real t, + real dt, + uint32 iter, + const message &msg, + const anyList &varList) override + { + return true; + } + + const integration& Integration()const + { + return integration_; + } + + virtual + bool correct(real dt, const realx3PointField_D& y, const realx3PointField_D& dy) + { + return true; + } + + virtual + bool correctPStruct(real dt, const realx3PointField_D& vel) + { + return true; + } + + static + uniquePtr create( + const boundaryBase& boundary, + const pointStructure& pStruct, + const word& method, + integration& intgrtn); + +}; + +} + +#endif \ No newline at end of file diff --git a/src/Integration/boundaries/boundaryIntegrationList.cpp b/src/Integration/boundaries/boundaryIntegrationList.cpp new file mode 100644 index 00000000..c7e2ad3b --- /dev/null +++ b/src/Integration/boundaries/boundaryIntegrationList.cpp @@ -0,0 +1,41 @@ +#include "boundaryIntegrationList.hpp" +#include "integration.hpp" + +pFlow::boundaryIntegrationList::boundaryIntegrationList( + const pointStructure &pStruct, + const word &method, + integration &intgrtn +) +: + ListPtr(6), + boundaries_(pStruct.boundaries()) +{ + + for(uint32 i=0; i<6; i++) + { + this->set( + i, + boundaryIntegration::create( + boundaries_[i], + pStruct, + method, + intgrtn + ) + ); + } + +} + +bool pFlow::boundaryIntegrationList::correct(real dt, realx3PointField_D &y, realx3PointField_D &dy) +{ + for(auto& bndry:*this) + { + if(!bndry->correct(dt, y, dy)) + { + fatalErrorInFunction; + return false; + } + + } + return true; +} diff --git a/src/Integration/boundaries/boundaryIntegrationList.hpp b/src/Integration/boundaries/boundaryIntegrationList.hpp new file mode 100644 index 00000000..a4e262a0 --- /dev/null +++ b/src/Integration/boundaries/boundaryIntegrationList.hpp @@ -0,0 +1,45 @@ + +#ifndef __boundaryIntegrationList_hpp__ +#define __boundaryIntegrationList_hpp__ + + +#include "boundaryList.hpp" +#include "ListPtr.hpp" +#include "boundaryIntegration.hpp" + + +namespace pFlow +{ + +class integration; + +class boundaryIntegrationList +: + public ListPtr +{ +private: + + const boundaryList& boundaries_; + +public: + + boundaryIntegrationList( + const pointStructure& pStruct, + const word& method, + integration& intgrtn + ); + + ~boundaryIntegrationList()=default; + + bool correct( + real dt, + realx3PointField_D& y, + realx3PointField_D& dy); +}; + + + +} + + +#endif //__boundaryIntegrationList_hpp__ \ No newline at end of file diff --git a/src/Interaction/CMakeLists.txt b/src/Interaction/CMakeLists.txt index 96197f6a..3b985647 100644 --- a/src/Interaction/CMakeLists.txt +++ b/src/Interaction/CMakeLists.txt @@ -7,8 +7,7 @@ contactSearch/methods/cellBased/NBS/NBS.cpp contactSearch/methods/cellBased/NBS/cellsWallLevel0.cpp contactSearch/boundaries/boundaryContactSearch/boundaryContactSearch.cpp -#contactSearch/boundaries/twoPartContactSearch/twoPartContactSearchKernels.cpp -#contactSearch/boundaries/twoPartContactSearch/twoPartContactSearch.cpp + contactSearch/boundaries/periodicBoundaryContactSearch/ppwBndryContactSearchKernels.cpp contactSearch/boundaries/periodicBoundaryContactSearch/ppwBndryContactSearch.cpp contactSearch/boundaries/periodicBoundaryContactSearch/wallBoundaryContactSearch.cpp @@ -28,6 +27,8 @@ if(pFlow_Build_MPI) list(APPEND SourceFiles contactSearch/boundaries/processorBoundaryContactSearch/processorBoundaryContactSearch.cpp sphereInteraction/boundaries/processorBoundarySphereInteraction/processorBoundarySphereInteractions.cpp + contactSearch/boundaries/twoPartContactSearch/twoPartContactSearchKernels.cpp + contactSearch/boundaries/twoPartContactSearch/twoPartContactSearch.cpp ) endif() diff --git a/src/Interaction/contactSearch/boundaries/processorBoundaryContactSearch/processorBoundaryContactSearch.cpp b/src/Interaction/contactSearch/boundaries/processorBoundaryContactSearch/processorBoundaryContactSearch.cpp index 8281c55c..323f23f7 100644 --- a/src/Interaction/contactSearch/boundaries/processorBoundaryContactSearch/processorBoundaryContactSearch.cpp +++ b/src/Interaction/contactSearch/boundaries/processorBoundaryContactSearch/processorBoundaryContactSearch.cpp @@ -99,7 +99,7 @@ bool pFlow::processorBoundaryContactSearch::broadSearch thisDiams, neighborProcPoints, neighborProcDiams, - name() + boundaryName() ); //pOutput<<"ppSize "<< ppPairs.size()< inline void sphereSphereInteraction ( + const word& kernalName, real dt, const ContactListType& cntctList, const ContactForceModel& forceModel, @@ -36,7 +37,7 @@ void sphereSphereInteraction uint32 lastItem = cntctList.loopCount(); Kokkos::parallel_for( - "pFlow::MPI::processorBoundarySIKernels::sphereSphereInteraction", + kernalName, deviceRPolicyDynamic(0,lastItem), LAMBDA_HD(uint32 n) { diff --git a/src/Interaction/sphereInteraction/boundaries/processorBoundarySphereInteraction/processorBoundarySphereInteraction.cpp b/src/Interaction/sphereInteraction/boundaries/processorBoundarySphereInteraction/processorBoundarySphereInteraction.cpp index f0e2a9a1..231260da 100644 --- a/src/Interaction/sphereInteraction/boundaries/processorBoundarySphereInteraction/processorBoundarySphereInteraction.cpp +++ b/src/Interaction/sphereInteraction/boundaries/processorBoundarySphereInteraction/processorBoundarySphereInteraction.cpp @@ -32,11 +32,6 @@ pFlow::MPI::processorBoundarySphereInteraction::processorBoundarySpher geomMotion ), masterInteraction_(boundary.isBoundaryMaster()) - , - inter_("inter"), - send_("send"), - recv_("recv"), - add_("add") { if(masterInteraction_) { @@ -46,6 +41,9 @@ pFlow::MPI::processorBoundarySphereInteraction::processorBoundarySpher } + +#ifdef BoundaryModel1 + template bool pFlow::MPI::processorBoundarySphereInteraction::sphereSphereInteraction ( @@ -74,8 +72,8 @@ bool pFlow::MPI::processorBoundarySphereInteraction::sphereSphereInter if(step == 2 ) { iter++; - inter_.start(); pFlow::MPI::processorBoundarySIKernels::sphereSphereInteraction( + "ppBoundaryInteraction."+this->boundaryName(), dt, this->ppPairs(), cfModel, @@ -94,64 +92,165 @@ bool pFlow::MPI::processorBoundarySphereInteraction::sphereSphereInter cfBndry.neighborProcField().deviceViewAll(), ctBndry.neighborProcField().deviceViewAll() ); - inter_.end(); + return true; } else if(step == 3 ) { - send_.start(); cfBndry.sendBackData(); ctBndry.sendBackData(); - send_.end(); + return true; } - if(iter % 1000 == 0u) - { - pOutput<<"inter "<< inter_.totalTime()<sphParticles(); - uint32 thisIndex = this->boundary().thisBoundaryIndex(); - const auto& cfBndry = static_cast&>( - sphPar.contactForce().BoundaryField(thisIndex)); - const auto& ctBndry = static_cast&> ( - sphPar.contactTorque().BoundaryField(thisIndex)); - if(step==1) + if(step == 1 ) { - recv_.start(); + const auto & sphPar = this->sphParticles(); + uint32 thisIndex = this->boundary().thisBoundaryIndex(); + const auto& cfBndry = static_cast&>( + sphPar.contactForce().BoundaryField(thisIndex)); + const auto& ctBndry = static_cast&> ( + sphPar.contactTorque().BoundaryField(thisIndex)); + cfBndry.recieveBackData(); ctBndry.recieveBackData(); - recv_.end(); + return false; } - else if(step == 2) + else if(step == 11) { - iter++; - return true; - } - else if(step == 3) - { - add_.start(); + const auto & sphPar = this->sphParticles(); + uint32 thisIndex = this->boundary().thisBoundaryIndex(); + const auto& cfBndry = static_cast&>( + sphPar.contactForce().BoundaryField(thisIndex)); + const auto& ctBndry = static_cast&> ( + sphPar.contactTorque().BoundaryField(thisIndex)); + cfBndry.addBufferToInternalField(); ctBndry.addBufferToInternalField(); - add_.end(); + return true; } - if(iter % 1000 == 0u) - { - pOutput<<"recive "<< recv_.totalTime()< +bool pFlow::MPI::processorBoundarySphereInteraction::sphereSphereInteraction +( + real dt, + const ContactForceModel &cfModel, + uint32 step +) +{ + + // master processor calculates the contact force/torque and sends data back to the + // neighbor processor (slave processor). + // slave processor recieves the data and adds the data to the internalField + if(masterInteraction_) + { + if(step==1)return true; + + const auto & sphPar = this->sphParticles(); + uint32 thisIndex = this->boundary().thisBoundaryIndex(); + + const auto& cfBndry = static_cast&> ( + sphPar.contactForce().BoundaryField(thisIndex)); + + const auto& ctBndry = static_cast&> ( + sphPar.contactTorque().BoundaryField(thisIndex)); + + if(step == 2 ) + { + + pFlow::MPI::processorBoundarySIKernels::sphereSphereInteraction( + "ppBoundaryInteraction."+this->boundaryName(), + dt, + this->ppPairs(), + cfModel, + this->boundary().thisPoints(), + sphPar.diameter().deviceViewAll(), + sphPar.propertyId().deviceViewAll(), + sphPar.velocity().deviceViewAll(), + sphPar.rVelocity().deviceViewAll(), + sphPar.contactForce().deviceViewAll(), + sphPar.contactTorque().deviceViewAll(), + this->boundary().neighborProcPoints().deviceViewAll(), + sphPar.diameter().BoundaryField(thisIndex).neighborProcField().deviceViewAll(), + sphPar.propertyId().BoundaryField(thisIndex).neighborProcField().deviceViewAll(), + sphPar.velocity().BoundaryField(thisIndex).neighborProcField().deviceViewAll(), + sphPar.rVelocity().BoundaryField(thisIndex).neighborProcField().deviceViewAll(), + cfBndry.neighborProcField().deviceViewAll(), + ctBndry.neighborProcField().deviceViewAll() + ); + + return true; + } + else if(step == 3 ) + { + cfBndry.sendBackData(); + ctBndry.sendBackData(); + return true; + } + else if(step == 11 ) + { + cfBndry.updateBoundaryFromSlave(); + ctBndry.updateBoundaryFromSlave(); + return true; + } + + return false; + } + else + { + + if(step == 1 ) + { + const auto & sphPar = this->sphParticles(); + uint32 thisIndex = this->boundary().thisBoundaryIndex(); + const auto& cfBndry = static_cast&>( + sphPar.contactForce().BoundaryField(thisIndex)); + const auto& ctBndry = static_cast&> ( + sphPar.contactTorque().BoundaryField(thisIndex)); + + cfBndry.recieveBackData(); + ctBndry.recieveBackData(); + + return false; + } + else if(step == 11) + { + const auto & sphPar = this->sphParticles(); + uint32 thisIndex = this->boundary().thisBoundaryIndex(); + const auto& cfBndry = static_cast&>( + sphPar.contactForce().BoundaryField(thisIndex)); + const auto& ctBndry = static_cast&> ( + sphPar.contactTorque().BoundaryField(thisIndex)); + + cfBndry.addBufferToInternalField(); + cfBndry.updateBoundaryToMaster(); + + ctBndry.addBufferToInternalField(); + ctBndry.updateBoundaryToMaster(); + + return true; + } + + return false; + } + + return false; +} + +#endif \ No newline at end of file diff --git a/src/Interaction/sphereInteraction/boundaries/processorBoundarySphereInteraction/processorBoundarySphereInteraction.hpp b/src/Interaction/sphereInteraction/boundaries/processorBoundarySphereInteraction/processorBoundarySphereInteraction.hpp index a07d434a..5d27bd76 100644 --- a/src/Interaction/sphereInteraction/boundaries/processorBoundarySphereInteraction/processorBoundarySphereInteraction.hpp +++ b/src/Interaction/sphereInteraction/boundaries/processorBoundarySphereInteraction/processorBoundarySphereInteraction.hpp @@ -22,6 +22,7 @@ Licence: #include "boundarySphereInteraction.hpp" #include "processorBoundaryField.hpp" +#include "boundaryProcessor.hpp" namespace pFlow::MPI { @@ -57,12 +58,6 @@ private: bool masterInteraction_; - Timer inter_; - Timer send_; - Timer recv_; - Timer add_; - uint32 iter=0; - public: TypeInfoTemplate22("boundarySphereInteraction", "processor",ContactForceModel, MotionModel); diff --git a/src/Particles/SphereParticles/boundarySphereParticles.cpp b/src/Particles/SphereParticles/boundarySphereParticles.cpp new file mode 100644 index 00000000..8eb5eaa0 --- /dev/null +++ b/src/Particles/SphereParticles/boundarySphereParticles.cpp @@ -0,0 +1,64 @@ +#include "boundarySphereParticles.hpp" +#include "boundaryBase.hpp" +#include "sphereParticles.hpp" + + +pFlow::boundarySphereParticles::boundarySphereParticles( + const boundaryBase &boundary, + sphereParticles &prtcls +) +: + generalBoundary(boundary, prtcls.pStruct(), "", ""), + particles_(prtcls) +{ + +} + +pFlow::sphereParticles &pFlow::boundarySphereParticles::Particles() +{ + return particles_; +} + +const pFlow::sphereParticles &pFlow::boundarySphereParticles::Particles() const +{ + return particles_; +} + +pFlow::uniquePtr pFlow::boundarySphereParticles::create( + const boundaryBase &boundary, + sphereParticles &prtcls +) +{ + + word bType = angleBracketsNames2( + "boundarySphereParticles", + pFlowProcessors().localRunTypeName(), + boundary.type()); + + word altBType{"boundarySphereParticles"}; + + if( boundaryBasevCtorSelector_.search(bType) ) + { + pOutput.space(4)<<"Creating boundary "<< Green_Text(bType)<< + " for "<"); + + boundarySphereParticles( + const boundaryBase &boundary, + sphereParticles& prtcls + ); + + create_vCtor( + boundarySphereParticles, + boundaryBase, + ( + const boundaryBase &boundary, + sphereParticles& prtcls + ), + (boundary, prtcls) + ); + + add_vCtor( + boundarySphereParticles, + boundarySphereParticles, + boundaryBase + ); + + sphereParticles& Particles(); + + const sphereParticles& Particles()const; + + bool hearChanges( + real t, + real dt, + uint32 iter, + const message &msg, + const anyList &varList) override + { + return true; + } + + virtual + bool acceleration(const timeInfo& ti, const realx3& g) + { + return true; + } + + static + uniquePtr create( + const boundaryBase &boundary, + sphereParticles& prtcls); + +}; + + +} + + + +#endif diff --git a/src/Particles/SphereParticles/boundarySphereParticlesList.cpp b/src/Particles/SphereParticles/boundarySphereParticlesList.cpp new file mode 100644 index 00000000..5cbebc32 --- /dev/null +++ b/src/Particles/SphereParticles/boundarySphereParticlesList.cpp @@ -0,0 +1,19 @@ +#include "boundarySphereParticlesList.hpp" + +pFlow::boundarySphereParticlesList::boundarySphereParticlesList( + const boundaryList &bndrs, + sphereParticles &prtcls +) +: + ListPtr(bndrs.size()), + boundaries_(bndrs) +{ + for(auto i=0; iset + ( + i, + boundarySphereParticles::create(boundaries_[i], prtcls) + ); + } +} \ No newline at end of file diff --git a/src/Particles/SphereParticles/boundarySphereParticlesList.hpp b/src/Particles/SphereParticles/boundarySphereParticlesList.hpp new file mode 100644 index 00000000..cd6770df --- /dev/null +++ b/src/Particles/SphereParticles/boundarySphereParticlesList.hpp @@ -0,0 +1,36 @@ + + +#ifndef __boundarySphereParticlesList_hpp__ +#define __boundarySphereParticlesList_hpp__ + +#include "ListPtr.hpp" +#include "boundaryList.hpp" +#include "boundarySphereParticles.hpp" + +namespace pFlow +{ + +class boundarySphereParticlesList +: + public ListPtr +{ +private: + + const boundaryList& boundaries_; + +public: + + boundarySphereParticlesList( + const boundaryList& bndrs, + sphereParticles& prtcls + ); + + ~boundarySphereParticlesList()=default; + +}; + +} + + + +#endif \ No newline at end of file diff --git a/src/Particles/SphereParticles/processorBoundarySphereParticles.cpp b/src/Particles/SphereParticles/processorBoundarySphereParticles.cpp new file mode 100644 index 00000000..c07b356c --- /dev/null +++ b/src/Particles/SphereParticles/processorBoundarySphereParticles.cpp @@ -0,0 +1,46 @@ +#include "processorBoundarySphereParticles.hpp" +#include "sphereParticles.hpp" +#include "boundaryProcessor.hpp" + +pFlow::processorBoundarySphereParticles::processorBoundarySphereParticles( + const boundaryBase &boundary, + sphereParticles &prtcls +) +: + boundarySphereParticles(boundary, prtcls) +{ + +} + +bool pFlow::processorBoundarySphereParticles::acceleration(const timeInfo &ti, const realx3& g) +{ + + +#ifndef BoundaryModel1 + + + if(isBoundaryMaster()) + { + auto thisIndex = thisBoundaryIndex(); + auto mass = Particles().mass().BoundaryField(thisIndex).neighborProcField().deviceView(); + auto I = Particles().I().BoundaryField(thisIndex).neighborProcField().deviceView(); + auto cf = Particles().contactForce().BoundaryField(thisIndex).neighborProcField().deviceView(); + auto ct = Particles().contactTorque().BoundaryField(thisIndex).neighborProcField().deviceView(); + auto acc = Particles().accelertion().BoundaryField(thisIndex).neighborProcField().deviceView(); + auto rAcc = Particles().rAcceleration().BoundaryField(thisIndex).neighborProcField().deviceView(); + + Kokkos::parallel_for( + "processorBoundary::acceleration."+this->boundaryName(), + deviceRPolicyStatic(0,mass.size()), + LAMBDA_HD(uint32 i){ + acc[i] = cf[i]/mass[i] + g; + rAcc[i] = ct[i]/I[i]; + }); + Kokkos::fence(); + } + +#endif + + + return true; +} diff --git a/src/Particles/SphereParticles/processorBoundarySphereParticles.hpp b/src/Particles/SphereParticles/processorBoundarySphereParticles.hpp new file mode 100644 index 00000000..cf4b1955 --- /dev/null +++ b/src/Particles/SphereParticles/processorBoundarySphereParticles.hpp @@ -0,0 +1,38 @@ +#ifndef __processorBoundarySphereParticles_hpp__ +#define __processorBoundarySphereParticles_hpp__ + +#include "boundarySphereParticles.hpp" + +namespace pFlow +{ + +class processorBoundarySphereParticles +: + public boundarySphereParticles +{ + +public: + + /// type info + TypeInfo("boundarySphereParticles"); + + processorBoundarySphereParticles( + const boundaryBase &boundary, + sphereParticles& prtcls + ); + + add_vCtor( + boundarySphereParticles, + processorBoundarySphereParticles, + boundaryBase + ); + + bool acceleration(const timeInfo& ti, const realx3& g)override; + + +}; + +} + + +#endif \ No newline at end of file diff --git a/src/phasicFlow/MPIParallelization/MPI/mpiCommunication.hpp b/src/phasicFlow/MPIParallelization/MPI/mpiCommunication.hpp index 27d259eb..2ef1caed 100644 --- a/src/phasicFlow/MPIParallelization/MPI/mpiCommunication.hpp +++ b/src/phasicFlow/MPIParallelization/MPI/mpiCommunication.hpp @@ -17,7 +17,6 @@ Licence: implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -----------------------------------------------------------------------------*/ - #ifndef __mpiCommunication_H__ #define __mpiCommunication_H__ @@ -37,6 +36,8 @@ extern DataType realx4Type__; extern DataType int32x3Type__; +extern DataType uint32x3Type__; + template auto constexpr Type() { @@ -190,6 +191,20 @@ auto constexpr sFactor() return 1; } +template<> +inline +auto Type() +{ + return uint32x3Type__; +} + + +template<> +auto constexpr sFactor() +{ + return 1; +} + /*inline auto createByteSequence(int sizeOfElement) { @@ -211,6 +226,7 @@ auto TypeFree(DataType* type) return MPI_Type_free(type); } + template inline auto getCount(Status* status, int& count) { @@ -440,11 +456,6 @@ inline auto Wait(Request* request, Status* status) return MPI_Wait(request, status); } -inline auto typeFree(DataType& type) -{ - return MPI_Type_free(&type); -} - } diff --git a/src/phasicFlow/MPIParallelization/MPI/mpiTypes.hpp b/src/phasicFlow/MPIParallelization/MPI/mpiTypes.hpp index 873dd7eb..05b45e93 100644 --- a/src/phasicFlow/MPIParallelization/MPI/mpiTypes.hpp +++ b/src/phasicFlow/MPIParallelization/MPI/mpiTypes.hpp @@ -55,6 +55,8 @@ namespace pFlow::MPI inline const auto ErrOp = MPI_ERR_OP; inline const auto SumOp = MPI_SUM; + inline const auto MaxOp = MPI_MAX; + inline const auto MinOp = MPI_MIN; inline const size_t MaxNoProcessors = 2048; diff --git a/src/phasicFlow/MPIParallelization/domain/MPISimulationDomain.cpp b/src/phasicFlow/MPIParallelization/domain/MPISimulationDomain.cpp index 9a5ee76e..37ecb052 100644 --- a/src/phasicFlow/MPIParallelization/domain/MPISimulationDomain.cpp +++ b/src/phasicFlow/MPIParallelization/domain/MPISimulationDomain.cpp @@ -35,46 +35,18 @@ pFlow::MPI::MPISimulationDomain::MPISimulationDomain(systemControl& control) bool pFlow::MPI::MPISimulationDomain::createBoundaryDicts() { - auto& boundaries = this->subDict("boundaries"); + + dictionary& boundaries = this->subDict("boundaries"); + + dictionary& thisBoundaries = this->subDict(thisBoundariesDictName()); - this->addDict("MPIBoundaries", boundaries); - auto& mpiBoundaries = this->subDict("MPIBoundaries"); - - real neighborLength = boundaries.getVal("neighborLength"); - auto boundaryExtntionLengthRatio = max( - boundaries.getValOrSet("boundaryExtntionLengthRatio", 0.1), - 0.0); - auto updateIntercal = max( - boundaries.getValOrSet("updateInterval", 1u), - 1u); - auto neighbors = findPlaneNeighbors(); for(uint32 i=0; i pFlow::MPI::MPISimulationDomain::findPlaneNeighbors() const return neighbors; } -const pFlow::dictionary & -pFlow::MPI::MPISimulationDomain::thisBoundaryDict() const -{ - return this->subDict("MPIBoundaries"); -} bool pFlow::MPI::MPISimulationDomain::initialUpdateDomains(span pointPos) { diff --git a/src/phasicFlow/MPIParallelization/domain/MPISimulationDomain.hpp b/src/phasicFlow/MPIParallelization/domain/MPISimulationDomain.hpp index bab83611..8949409e 100644 --- a/src/phasicFlow/MPIParallelization/domain/MPISimulationDomain.hpp +++ b/src/phasicFlow/MPIParallelization/domain/MPISimulationDomain.hpp @@ -72,8 +72,6 @@ public: systemControl ); - const dictionary& thisBoundaryDict() const final; - /// @brief /// @param pointPos /// @return diff --git a/src/phasicFlow/MPIParallelization/pointField/processorBoundaryField.cpp b/src/phasicFlow/MPIParallelization/pointField/processorBoundaryField.cpp index b7348a2a..d622a971 100644 --- a/src/phasicFlow/MPIParallelization/pointField/processorBoundaryField.cpp +++ b/src/phasicFlow/MPIParallelization/pointField/processorBoundaryField.cpp @@ -25,13 +25,17 @@ pFlow::MPI::processorBoundaryField::checkDataRecieved() const { if (!dataRecieved_) { - uint32 nRecv = reciever_.waitBufferForUse(); + uint32 nRecv = neighborProcField_.waitBufferForUse(); dataRecieved_ = true; if (nRecv != this->neighborProcSize()) { - fatalErrorInFunction; + fatalErrorInFunction<< + "number of recived data is "<< nRecv <<" and expected number is "<< + this->neighborProcSize()<< " in "<name() <name()<<" has recieved with size "<< nRecv<::updateBoundary( DataDirection direction ) { +#ifndef BoundaryModel1 + if(!this->boundary().performBoundarytUpdate()) + return true; +#endif + if (step == 1) { // Isend @@ -49,9 +58,11 @@ pFlow::MPI::processorBoundaryField::updateBoundary( ( this->isBoundaryMaster() && direction == DataDirection::MasterToSlave) || (!this->isBoundaryMaster() && direction == DataDirection::SlaveToMaster)) { - sender_.sendData(pFlowProcessors(), this->thisField()); + thisFieldInNeighbor_.sendData(pFlowProcessors(), this->thisField(), this->name()); dataRecieved_ = false; + //pOutput<<"request for boundary update "<< this->name()<<" direction "<< (int)direction<::updateBoundary( (!this->isBoundaryMaster() && direction == DataDirection::MasterToSlave) || ( this->isBoundaryMaster() && direction == DataDirection::SlaveToMaster)) { - reciever_.recieveData(pFlowProcessors(), this->neighborProcSize()); + neighborProcField_.recieveData(pFlowProcessors(), this->neighborProcSize(), this->name()); dataRecieved_ = false; + //pOutput<<"request for boundary update "<< this->name()<<" direction "<< (int)direction<::processorBoundaryField( InternalFieldType& internal ) : BoundaryFieldType(boundary, pStruct, internal), - sender_( - groupNames("sendBufferField", boundary.name()), + thisFieldInNeighbor_( + groupNames("sendBuffer", this->name()), boundary.neighborProcessorNo(), boundary.thisBoundaryIndex() ), - reciever_( - groupNames("neighborProcField", boundary.name()), + neighborProcField_( + groupNames("recieveBuffer", boundary.name()), boundary.neighborProcessorNo(), boundary.mirrorBoundaryIndex() ) @@ -102,7 +114,7 @@ typename pFlow::MPI::processorBoundaryField::ProcVectorType& pFlow::MPI::processorBoundaryField::neighborProcField() { checkDataRecieved(); - return reciever_.buffer(); + return neighborProcField_.buffer(); } template @@ -111,7 +123,7 @@ const typename pFlow::MPI::processorBoundaryField:: pFlow::MPI::processorBoundaryField::neighborProcField() const { checkDataRecieved(); - return reciever_.buffer(); + return neighborProcField_.buffer(); } template @@ -127,7 +139,7 @@ bool pFlow::MPI::processorBoundaryField::hearChanges( if(msg.equivalentTo(message::BNDR_PROC_SIZE_CHANGED)) { auto newProcSize = varList.getObject("size"); - reciever_.resize(newProcSize); + neighborProcField_.resize(newProcSize); } if(msg.equivalentTo(message::BNDR_PROCTRANSFER_SEND)) @@ -144,7 +156,7 @@ bool pFlow::MPI::processorBoundaryField::hearChanges( this->internal().deviceViewAll() ); - sender_.sendData(pFlowProcessors(),transferData); + thisFieldInNeighbor_.sendData(pFlowProcessors(),transferData); } else { @@ -154,7 +166,7 @@ bool pFlow::MPI::processorBoundaryField::hearChanges( this->internal().deviceViewAll() ); - sender_.sendData(pFlowProcessors(),transferData); + thisFieldInNeighbor_.sendData(pFlowProcessors(),transferData); } @@ -164,12 +176,12 @@ bool pFlow::MPI::processorBoundaryField::hearChanges( uint32 numRecieved = varList.getObject( message::eventName(message::BNDR_PROCTRANSFER_RECIEVE) ); - reciever_.recieveData(pFlowProcessors(), numRecieved); + neighborProcField_.recieveData(pFlowProcessors(), numRecieved); } else if(msg.equivalentTo(message::BNDR_PROCTRANSFER_WAITFILL)) { - uint32 numRecieved = reciever_.waitBufferForUse(); + uint32 numRecieved = neighborProcField_.waitBufferForUse(); if(msg.equivalentTo(message::CAP_CHANGED)) { @@ -188,7 +200,7 @@ bool pFlow::MPI::processorBoundaryField::hearChanges( const auto& indices = varList.getObject( message::eventName(message::ITEM_INSERT)); - this->internal().field().insertSetElement(indices, reciever_.buffer().deviceView()); + this->internal().field().insertSetElement(indices, neighborProcField_.buffer().deviceView()); return true; } @@ -198,14 +210,14 @@ bool pFlow::MPI::processorBoundaryField::hearChanges( template void pFlow::MPI::processorBoundaryField::sendBackData() const { - reciever_.sendBackData(pFlowProcessors()); + neighborProcField_.sendBackData(pFlowProcessors()); dataRecieved_ = false; } template void pFlow::MPI::processorBoundaryField::recieveBackData() const { - sender_.recieveBackData(pFlowProcessors(), this->size()); + thisFieldInNeighbor_.recieveBackData(pFlowProcessors(), this->size()); } template @@ -216,16 +228,17 @@ void pFlow::MPI::processorBoundaryField::addBufferToInternalFiel Kokkos::Schedule, Kokkos::IndexType>; - sender_.waitBufferForUse(); + //pOutput<<"waiting for buffer to be recived in addBufferToInternalField "<name()<internal().deviceViewAll(); if constexpr( isDeviceAccessible ) { const auto& indices = this->indexList().deviceViewAll(); Kokkos::parallel_for( - "dataSender::recieveBackData", + "recieveBackData::"+this->name(), RPolicy(0,this->size()), LAMBDA_HD(uint32 i) { @@ -238,7 +251,7 @@ void pFlow::MPI::processorBoundaryField::addBufferToInternalFiel { const auto& indices = this->boundary().indexListHost().deviceViewAll(); Kokkos::parallel_for( - "dataSender::recieveBackData", + "recieveBackData::"+this->name(), RPolicy(0,this->size()), LAMBDA_HD(uint32 i) { @@ -247,4 +260,25 @@ void pFlow::MPI::processorBoundaryField::addBufferToInternalFiel ); Kokkos::fence(); } -} \ No newline at end of file +} + + +template +void pFlow::MPI::processorBoundaryField::updateBoundaryToMaster()const +{ + if (!this->isBoundaryMaster() ) + { + thisFieldInNeighbor_.sendData(pFlowProcessors(), this->thisField(), this->name()); + dataRecieved_ = false; + } +} + +template +void pFlow::MPI::processorBoundaryField::updateBoundaryFromSlave()const +{ + if( this->isBoundaryMaster() ) + { + neighborProcField_.recieveData(pFlowProcessors(), this->neighborProcSize(), this->name()); + dataRecieved_ = false; + } +} diff --git a/src/phasicFlow/MPIParallelization/pointField/processorBoundaryField.hpp b/src/phasicFlow/MPIParallelization/pointField/processorBoundaryField.hpp index fd2c72e0..bd32d59d 100644 --- a/src/phasicFlow/MPIParallelization/pointField/processorBoundaryField.hpp +++ b/src/phasicFlow/MPIParallelization/pointField/processorBoundaryField.hpp @@ -23,6 +23,7 @@ Licence: #include "boundaryField.hpp" #include "dataSender.hpp" #include "dataReciever.hpp" +#include "boundaryProcessor.hpp" namespace pFlow::MPI { @@ -50,9 +51,9 @@ public: private: - dataSender sender_; + mutable dataSender thisFieldInNeighbor_; - dataReciever reciever_; + mutable dataReciever neighborProcField_; mutable bool dataRecieved_ = true; @@ -86,7 +87,7 @@ public: void fill(const T& val)override { - reciever_.fill(val); + neighborProcField_.fill(val); } bool hearChanges( @@ -103,6 +104,10 @@ public: void addBufferToInternalField()const; + void updateBoundaryToMaster()const; + + void updateBoundaryFromSlave()const; + }; } diff --git a/src/phasicFlow/MPIParallelization/pointStructure/boundaries/boundaryProcessor.cpp b/src/phasicFlow/MPIParallelization/pointStructure/boundaries/boundaryProcessor.cpp index 54bed6ad..f781a00c 100644 --- a/src/phasicFlow/MPIParallelization/pointStructure/boundaries/boundaryProcessor.cpp +++ b/src/phasicFlow/MPIParallelization/pointStructure/boundaries/boundaryProcessor.cpp @@ -33,11 +33,13 @@ pFlow::MPI::boundaryProcessor::checkDataRecieved() const { if (!dataRecieved_) { - uint32 nRecv = reciever_.waitBufferForUse(); + uint32 nRecv = neighborProcPoints_.waitBufferForUse(); dataRecieved_ = true; if (nRecv != neighborProcSize()) { - fatalErrorInFunction; + fatalErrorInFunction<<"In boundary "<name()<< + " ,number of recieved data is "<< nRecv<< + " and neighborProcSize is "< points = thisPoints(); - auto p = boundaryPlane().infPlane(); - - numToTransfer_ = 0; + callAgain = false; + return true; + } + if(step == 1) + { - Kokkos::parallel_reduce - ( - "boundaryProcessor::afterIteration", - deviceRPolicyStatic(0,s), - boundaryProcessorKernels::markNegative( - boundaryPlane().infPlane(), - transferFlags.deviceViewAll(), - thisPoints() - ), - numToTransfer_ - ); + uint32Vector_D transferFlags("transferFlags"+this->name()); + + numToTransfer_ = markInNegativeSide( + "transferData::markToTransfer"+this->name(), + transferFlags); uint32Vector_D keepIndices("keepIndices"); + if(numToTransfer_ != 0u) { pFlow::boundaryBaseKernels::createRemoveKeepIndices @@ -182,6 +223,7 @@ bool pFlow::MPI::boundaryProcessor::transferData(uint32 iter, int step) keepIndices, false ); + // delete transfer point from this processor if( !setRemoveKeepIndices(transferIndices_, keepIndices)) { @@ -194,60 +236,80 @@ bool pFlow::MPI::boundaryProcessor::transferData(uint32 iter, int step) { transferIndices_.clear(); } - - auto req = RequestNull; + CheckMPI( Isend( numToTransfer_, neighborProcessorNo(), thisBoundaryIndex(), pFlowProcessors().localCommunicator(), - &req), true ); - //pOutput<<"sent "<< numToTransfer_<(message::eventName(message::ITEM_INSERT)); + auto indView = deviceViewType1D(indices.deviceView().data(), indices.deviceView().size()); + uint32Vector_D newIndices("newIndices", indView); if(! appendNewIndices(newIndices)) { fatalErrorInFunction; + callAgain = false; return false; } - if(!notify( - internal().time().currentIter(), - internal().time().currentTime(), - internal().time().dt(), - msg, - varList)) + const auto ti = internal().time().TimeInfo(); + if(!notify(ti, msg, varList)) { fatalErrorInFunction; + callAgain = false; return false; } - return false; + callAgain = false; + return true; } - - return false; - + return true; } bool -pFlow::MPI::boundaryProcessor::iterate(uint32 iterNum, real t, real dt) +pFlow::MPI::boundaryProcessor::iterate(const timeInfo& ti) { return true; } bool -pFlow::MPI::boundaryProcessor::afterIteration(uint32 iterNum, real t, real dt) +pFlow::MPI::boundaryProcessor::afterIteration(const timeInfo& ti) { uint32 s = size(); diff --git a/src/phasicFlow/MPIParallelization/pointStructure/boundaries/boundaryProcessor.hpp b/src/phasicFlow/MPIParallelization/pointStructure/boundaries/boundaryProcessor.hpp index 8771869e..bf957541 100644 --- a/src/phasicFlow/MPIParallelization/pointStructure/boundaries/boundaryProcessor.hpp +++ b/src/phasicFlow/MPIParallelization/pointStructure/boundaries/boundaryProcessor.hpp @@ -22,9 +22,11 @@ Licence: #define __boundaryProcessor_hpp__ #include "boundaryBase.hpp" +#include "timeInfo.hpp" #include "mpiTypes.hpp" #include "dataSender.hpp" #include "dataReciever.hpp" +#include "boundaryConfigs.hpp" namespace pFlow::MPI { @@ -42,11 +44,13 @@ namespace pFlow::MPI uint32 thisNumPoints_ = 0; - realx3Vector_D neighborProcPoints_; + Request numPointsRequest_ = RequestNull; - dataSender sender_; + Request numPointsRequest0_ = RequestNull; - dataReciever reciever_; + dataSender thisPointsInNeighbor_; + + dataReciever neighborProcPoints_; mutable bool dataRecieved_ = true; @@ -56,6 +60,10 @@ namespace pFlow::MPI uint32Vector_D transferIndices_{"transferIndices"}; + Request numTransferRequest_ = RequestNull; + + Request numRecieveRequest_ = RequestNull; + void checkDataRecieved() const; /// @brief Update processor boundary data for this processor @@ -68,7 +76,7 @@ namespace pFlow::MPI /// step is non-blocking recieve to get data. bool updataBoundaryData(int step) override; - bool transferData(uint32 iter, int step) override; + bool transferData(uint32 iter, int step, bool& callAgain) override; public: TypeInfo("boundary"); @@ -87,11 +95,17 @@ namespace pFlow::MPI boundaryProcessor, dictionary); - bool beforeIteration(uint32 iterNum, real t, real dt) override; + bool beforeIteration( + uint32 step, + const timeInfo& ti, + bool updateIter, + bool iterBeforeUpdate , + bool& callAgain + ) override; - bool iterate(uint32 iterNum, real t, real dt) override; + bool iterate(const timeInfo& ti) override; - bool afterIteration(uint32 iterNum, real t, real dt) override; + bool afterIteration(const timeInfo& ti) override; /// @brief Return number of points in the neighbor processor boundary. /// This is overriden from boundaryBase. diff --git a/src/phasicFlow/MPIParallelization/pointStructure/boundaries/dataReciever.hpp b/src/phasicFlow/MPIParallelization/pointStructure/boundaries/dataReciever.hpp index 547e09f9..c547f112 100644 --- a/src/phasicFlow/MPIParallelization/pointStructure/boundaries/dataReciever.hpp +++ b/src/phasicFlow/MPIParallelization/pointStructure/boundaries/dataReciever.hpp @@ -76,7 +76,8 @@ public: void recieveData( const localProcessors& processors, - uint32 numToRecv + uint32 numToRecv, + const word& name = "dataReciver" ) { resize(numToRecv); diff --git a/src/phasicFlow/MPIParallelization/pointStructure/boundaries/dataSender.hpp b/src/phasicFlow/MPIParallelization/pointStructure/boundaries/dataSender.hpp index a4c5d39b..bb4538f8 100644 --- a/src/phasicFlow/MPIParallelization/pointStructure/boundaries/dataSender.hpp +++ b/src/phasicFlow/MPIParallelization/pointStructure/boundaries/dataSender.hpp @@ -1,3 +1,23 @@ +/*------------------------------- phasicFlow --------------------------------- + O C enter of + O O E ngineering and + O O M ultiscale modeling of + OOOOOOO F luid flow +------------------------------------------------------------------------------ + Copyright (C): www.cemf.ir + email: hamid.r.norouzi AT gmail.com +------------------------------------------------------------------------------ +Licence: + This file is part of phasicFlow code. It is a free software for simulating + granular and multiphase flows. You can redistribute it and/or modify it under + the terms of GNU General Public License v3 or any other later versions. + + phasicFlow is distributed to help others in their research in the field of + granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the + implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +-----------------------------------------------------------------------------*/ + #ifndef __dataSender_hpp__ #define __dataSender_hpp__ @@ -61,7 +81,8 @@ public: void sendData( const localProcessors& processors, - const scatteredFieldAccess& scatterField + const scatteredFieldAccess& scatterField, + const word& name = "dataSender::sendData" ) { using RPolicy = Kokkos::RangePolicy< @@ -79,10 +100,10 @@ public: buffer_.clear(); buffer_.resize(n); - const auto& buffView = buffer_.deviceViewAll(); + const auto& buffView = buffer_.deviceViewAll(); Kokkos::parallel_for( - "dataSender::sendData", + "packDataForSend::"+name, RPolicy(0,n), LAMBDA_HD(uint32 i) { diff --git a/src/phasicFlow/containers/Field/Field.cpp b/src/phasicFlow/containers/Field/Field.cpp index 335fbe49..10e14df8 100644 --- a/src/phasicFlow/containers/Field/Field.cpp +++ b/src/phasicFlow/containers/Field/Field.cpp @@ -21,13 +21,17 @@ Licence: template bool pFlow::Field::read ( - iIstream& is + iIstream& is, + bool resume ) { bool tokenFound = true; - tokenFound = is.findToken(fieldKey_); + if(resume) + tokenFound = is.findTokenResume(fieldKey_); + else + tokenFound = is.findToken(fieldKey_); if( !tokenFound ) { @@ -53,14 +57,21 @@ template bool pFlow::Field::read ( iIstream& is, - const IOPattern& iop + const IOPattern& iop, + bool resume ) { bool tokenFound = true; if(iop.thisProcReadData()) - tokenFound = is.findToken(fieldKey_); + { + if(resume) + tokenFound = is.findTokenResume(fieldKey_); + else + tokenFound = is.findToken(fieldKey_); + } + if( !tokenFound ) { diff --git a/src/phasicFlow/containers/Field/Field.hpp b/src/phasicFlow/containers/Field/Field.hpp index 738811bc..b38d3017 100644 --- a/src/phasicFlow/containers/Field/Field.hpp +++ b/src/phasicFlow/containers/Field/Field.hpp @@ -197,12 +197,12 @@ public: //// - IO operations - bool read(iIstream& is); + bool read(iIstream& is, bool resume = false); bool write(iOstream& os)const; - bool read(iIstream& is, const IOPattern& iop); + bool read(iIstream& is, const IOPattern& iop, bool resume = false); bool write(iOstream& os, const IOPattern& iop )const; diff --git a/src/phasicFlow/containers/VectorHD/wordVectorHost.cpp b/src/phasicFlow/containers/VectorHD/wordVectorHost.cpp index 1941a239..eb1e8515 100644 --- a/src/phasicFlow/containers/VectorHD/wordVectorHost.cpp +++ b/src/phasicFlow/containers/VectorHD/wordVectorHost.cpp @@ -21,3 +21,80 @@ Licence: #include "wordVectorHost.hpp" +bool pFlow::VectorSingle::insertSetElement( + const uint32IndexContainer& indices, + const word& val +) +{ + if(indices.empty()) return true; + + auto maxInd = indices.max(); + + if(this->empty() || maxInd > size()-1 ) + { + resize(maxInd+1); + } + + auto ind = indices.hostView(); + auto s = indices.size(); + + for(uint32 i=0; i< s; i++) + { + container_[ind[i]] = val; + } + + return true; +} + +bool pFlow::VectorSingle::insertSetElement( + const uint32IndexContainer& indices, + const std::vector& vals +) +{ + if(indices.empty())return true; + if(indices.size() != vals.size())return false; + + auto maxInd = indices.max(); + + if(this->empty() || maxInd > size()-1 ) + { + resize(maxInd+1); + } + + auto ind = indices.hostView(); + auto s = indices.size(); + + for(uint32 i=0; i< s; i++) + { + container_[ind[i]] = vals[i]; + } + + return true; +} + +bool pFlow::VectorSingle::insertSetElement( + const uint32IndexContainer& indices, + const ViewType1D vals +) +{ + if(indices.empty())return true; + if(indices.size() != vals.size())return false; + + auto maxInd = indices.max(); + + if(this->empty() || maxInd > size()-1 ) + { + resize(maxInd+1); + } + + auto ind = indices.hostView(); + auto s = indices.size(); + + for(uint32 i=0; i< s; i++) + { + container_[ind[i]] = vals[i]; + } + + return true; +} + diff --git a/src/phasicFlow/containers/VectorHD/wordVectorHost.hpp b/src/phasicFlow/containers/VectorHD/wordVectorHost.hpp index e638bd27..cb6d15b2 100644 --- a/src/phasicFlow/containers/VectorHD/wordVectorHost.hpp +++ b/src/phasicFlow/containers/VectorHD/wordVectorHost.hpp @@ -372,28 +372,20 @@ public: return span(const_cast(container_.data()), container_.size()); } - INLINE_FUNCTION_H - bool insertSetElement(const uint32IndexContainer& indices, const word& val) - { - notImplementedFunction; - return false; - } - INLINE_FUNCTION_H - bool insertSetElement(const uint32IndexContainer& indices, const std::vector& vals) - { - notImplementedFunction; - return false; - } + bool insertSetElement(const uint32IndexContainer& indices, const word& val); + + + + bool insertSetElement(const uint32IndexContainer& indices, const std::vector& vals); + + - INLINE_FUNCTION_H bool insertSetElement( const uint32IndexContainer& indices, - const ViewType1D vals) - { - notImplementedFunction; - return false; - } + const ViewType1D vals + ); + INLINE_FUNCTION_H bool reorderItems(const uint32IndexContainer& indices) diff --git a/src/phasicFlow/globals/boundaryConfigs.hpp b/src/phasicFlow/globals/boundaryConfigs.hpp new file mode 100644 index 00000000..11574957 --- /dev/null +++ b/src/phasicFlow/globals/boundaryConfigs.hpp @@ -0,0 +1,10 @@ +#ifndef __boundaryConfigs_hpp__ +#define __boundaryConfigs_hpp__ + + +#ifndef BoundaryModel1 +//#define BoundaryModel1 +#endif + + +#endif //__boundaryConfigs_hpp__ \ No newline at end of file diff --git a/src/phasicFlow/processors/MPITimer.cpp b/src/phasicFlow/processors/MPITimer.cpp new file mode 100644 index 00000000..cfa55631 --- /dev/null +++ b/src/phasicFlow/processors/MPITimer.cpp @@ -0,0 +1,88 @@ +#include "MPITimer.hpp" + +#ifdef pFlow_Build_MPI + #include "pFlowProcessors.hpp" + #include "procCommunication.hpp" +#endif + +pFlow::real pFlow::MPITimer::totalTimeMax() const +{ + return accTimersTotalMax(); +} + +std::vector pFlow::MPITimer::totalTimeAllToAll() const +{ +#ifdef pFlow_Build_MPI + MPI::procCommunication comm(pFlowProcessors()); + auto [totTime, succs] = comm.collectAllToAll(totalTime()); + return totTime; +#else + return std::vector(1, totalTime()); +#endif +} + +std::vector pFlow::MPITimer::totalTimeAllToMaster() const +{ +#ifdef pFlow_Build_MPI + MPI::procCommunication comm(pFlowProcessors()); + auto [totTime, succs] = comm.collectAllToMaster(totalTime()); + return totTime; +#else + return std::vector(1, totalTime()); +#endif +} + +pFlow::real pFlow::MPITimer::averageTimeMax() const +{ + return Timer::averageTimeMax(); +} + +std::vector pFlow::MPITimer::averageTimeAllToAll() const +{ +#ifdef pFlow_Build_MPI + MPI::procCommunication comm(pFlowProcessors()); + auto [totTime, succs] = comm.collectAllToAll(averageTime()); + return totTime; +#else + return std::vector(1, averageTime()); +#endif +} + +std::vector pFlow::MPITimer::averageTimeAllAtoMaster() const +{ +#ifdef pFlow_Build_MPI + MPI::procCommunication comm(pFlowProcessors()); + auto [totTime, succs] = comm.collectAllToMaster(averageTime()); + return totTime; +#else + return std::vector(1, averageTime()); +#endif +} + +bool pFlow::MPITimer::write(iOstream &os) const +{ + const auto ts = totalTimeAllToAll(); + auto maxloc = std::distance(ts.begin(), std::max_element(ts.begin(), ts.end())); + os<<'('; + for(auto i=0; i totalTimeAllToAll()const; + + std::vector totalTimeAllToMaster()const; + + real averageTimeMax()const; + + std::vector averageTimeAllToAll()const; + + std::vector averageTimeAllAtoMaster()const; + + // call this from all processors in pFlowProcessors + bool write(iOstream& os)const; + +}; + +MPITimer& ComputationTimer(); + +} + + + +#endif \ No newline at end of file diff --git a/src/phasicFlow/structuredData/boundaries/boundariesMask.hpp b/src/phasicFlow/structuredData/boundaries/boundariesMask.hpp new file mode 100644 index 00000000..13875f2b --- /dev/null +++ b/src/phasicFlow/structuredData/boundaries/boundariesMask.hpp @@ -0,0 +1,50 @@ + +#ifndef __boundariesMask_hpp__ +#define __boundariesMask_hpp__ + +#include + +namespace pFlow +{ + +template +class boundariesMask +: + public std::array +{ +public: + + boundariesMask()=default; + + boundariesMask(bool val) + { + this->fill(val); + } + + boundariesMask(std::initializer_list l) + : + std::array(l) + {} + + inline + bool allElements(bool val) + { + return std::all_of(this->begin(), this->end(), [val](bool v) { return v==val;} ); + } + + inline + bool anyElement(bool val) + { + return std::any_of(this->begin(), this->end(), [val](bool v) { return v==val;} ); + } + + inline + bool noElement(bool val) + { + return std::none_of(this->begin(), this->end(), [val](bool v) { return v==val;} ); + } +}; + +} + +#endif //__boundariesMask_hpp__ \ No newline at end of file diff --git a/src/phasicFlow/structuredData/boundaries/boundaryBase/boundaryBase.hpp b/src/phasicFlow/structuredData/boundaries/boundaryBase/boundaryBase.hpp index 978f674b..b1f8008a 100644 --- a/src/phasicFlow/structuredData/boundaries/boundaryBase/boundaryBase.hpp +++ b/src/phasicFlow/structuredData/boundaries/boundaryBase/boundaryBase.hpp @@ -339,7 +339,7 @@ public: virtual - bool beforeIteration(uint32 iterNum, real t, real dt) = 0 ; + bool beforeIteration(uint32 step, uint32 iterNum, real t, real dt) = 0 ; virtual bool iterate(uint32 iterNum, real t, real dt) = 0; diff --git a/src/phasicFlow/structuredData/boundaries/boundaryExit/boundaryExit.cpp b/src/phasicFlow/structuredData/boundaries/boundaryExit/boundaryExit.cpp index 9937433c..ce874e67 100644 --- a/src/phasicFlow/structuredData/boundaries/boundaryExit/boundaryExit.cpp +++ b/src/phasicFlow/structuredData/boundaries/boundaryExit/boundaryExit.cpp @@ -42,12 +42,14 @@ pFlow::boundaryExit::boundaryExit bool pFlow::boundaryExit::beforeIteration ( + uint32 step, uint32 iterNum, real t, real dt ) { - + if(step!= 2 )return true; + if( !boundaryListUpdate(iterNum))return true; // nothing have to be done diff --git a/src/phasicFlow/structuredData/boundaries/boundaryExit/boundaryExit.hpp b/src/phasicFlow/structuredData/boundaries/boundaryExit/boundaryExit.hpp index f8aa61c5..b4763e26 100644 --- a/src/phasicFlow/structuredData/boundaries/boundaryExit/boundaryExit.hpp +++ b/src/phasicFlow/structuredData/boundaries/boundaryExit/boundaryExit.hpp @@ -63,7 +63,7 @@ public: dictionary ); - bool beforeIteration(uint32 iterNum, real t, real dt) override; + bool beforeIteration(uint32 step, uint32 iterNum, real t, real dt) override; bool iterate(uint32 iterNum, real t, real dt) override; diff --git a/src/phasicFlow/structuredData/boundaries/boundaryList.cpp b/src/phasicFlow/structuredData/boundaries/boundaryList.cpp index 473e523d..707e00ea 100644 --- a/src/phasicFlow/structuredData/boundaries/boundaryList.cpp +++ b/src/phasicFlow/structuredData/boundaries/boundaryList.cpp @@ -92,9 +92,12 @@ pFlow::boundaryList::boundaryList(pointStructure& pStruct) : ListPtr(pStruct.simDomain().sizeOfBoundaries()), pStruct_(pStruct), neighborListUpdateInterval_( - pStruct.simDomain().subDict("boundaries").getVal( - "neighborListUpdateInterval" - ) + max( + pStruct.simDomain().subDict("boundaries").getVal( + "neighborListUpdateInterval" + ), + 1u + ) ) { } @@ -169,7 +172,17 @@ pFlow::boundaryList::beforeIteration(uint32 iter, real t, real dt, bool force) for (auto bdry : *this) { - if (!bdry->beforeIteration(iter, t, dt)) + if (!bdry->beforeIteration(1, iter, t, dt)) + { + fatalErrorInFunction << "Error in beforeIteration in boundary " + << bdry->name() << endl; + return false; + } + } + + for (auto bdry : *this) + { + if (!bdry->beforeIteration(2, iter, t, dt)) { fatalErrorInFunction << "Error in beforeIteration in boundary " << bdry->name() << endl; diff --git a/src/phasicFlow/structuredData/boundaries/boundaryNone/boundaryNone.cpp b/src/phasicFlow/structuredData/boundaries/boundaryNone/boundaryNone.cpp index 327330dd..7f228562 100644 --- a/src/phasicFlow/structuredData/boundaries/boundaryNone/boundaryNone.cpp +++ b/src/phasicFlow/structuredData/boundaries/boundaryNone/boundaryNone.cpp @@ -34,6 +34,7 @@ pFlow::boundaryNone::boundaryNone bool pFlow::boundaryNone::beforeIteration ( + uint32 step, uint32 iterNum, real t, real dt diff --git a/src/phasicFlow/structuredData/boundaries/boundaryNone/boundaryNone.hpp b/src/phasicFlow/structuredData/boundaries/boundaryNone/boundaryNone.hpp index 8e103d39..68ddf33f 100644 --- a/src/phasicFlow/structuredData/boundaries/boundaryNone/boundaryNone.hpp +++ b/src/phasicFlow/structuredData/boundaries/boundaryNone/boundaryNone.hpp @@ -52,7 +52,7 @@ public: dictionary ); - bool beforeIteration(uint32 iterNum, real t, real dt) final; + bool beforeIteration(uint32 step, uint32 iterNum, real t, real dt) final; bool iterate(uint32 iterNum, real t, real dt) final; diff --git a/src/phasicFlow/structuredData/boundaries/boundaryPeriodic/boundaryPeriodic.cpp b/src/phasicFlow/structuredData/boundaries/boundaryPeriodic/boundaryPeriodic.cpp index cbdecf71..cf111eb5 100644 --- a/src/phasicFlow/structuredData/boundaries/boundaryPeriodic/boundaryPeriodic.cpp +++ b/src/phasicFlow/structuredData/boundaries/boundaryPeriodic/boundaryPeriodic.cpp @@ -51,10 +51,12 @@ pFlow::realx3 pFlow::boundaryPeriodic::boundaryExtensionLength() const bool pFlow::boundaryPeriodic::beforeIteration( + uint32 step, uint32 iterNum, real t, real dt) { + if(step!=2)return true; // nothing have to be done if(empty()) { diff --git a/src/phasicFlow/structuredData/boundaries/boundaryPeriodic/boundaryPeriodic.hpp b/src/phasicFlow/structuredData/boundaries/boundaryPeriodic/boundaryPeriodic.hpp index 88983fa3..69b0413a 100644 --- a/src/phasicFlow/structuredData/boundaries/boundaryPeriodic/boundaryPeriodic.hpp +++ b/src/phasicFlow/structuredData/boundaries/boundaryPeriodic/boundaryPeriodic.hpp @@ -64,7 +64,7 @@ public: //const plane& boundaryPlane()const override;*/ - bool beforeIteration(uint32 iterNum, real t, real dt) override; + bool beforeIteration(uint32 step, uint32 iterNum, real t, real dt) override; bool iterate(uint32 iterNum, real t, real dt) override; diff --git a/src/phasicFlow/structuredData/boundaries/boundaryReflective/boundaryReflective.cpp b/src/phasicFlow/structuredData/boundaries/boundaryReflective/boundaryReflective.cpp index 74ee4d75..8de9b74f 100644 --- a/src/phasicFlow/structuredData/boundaries/boundaryReflective/boundaryReflective.cpp +++ b/src/phasicFlow/structuredData/boundaries/boundaryReflective/boundaryReflective.cpp @@ -46,6 +46,7 @@ pFlow::boundaryReflective::boundaryReflective } bool pFlow::boundaryReflective::beforeIteration( + uint32 step, uint32 iterNum, real t, real dt) diff --git a/src/phasicFlow/structuredData/boundaries/boundaryReflective/boundaryReflective.hpp b/src/phasicFlow/structuredData/boundaries/boundaryReflective/boundaryReflective.hpp index 43e1ce13..3f3bc64c 100644 --- a/src/phasicFlow/structuredData/boundaries/boundaryReflective/boundaryReflective.hpp +++ b/src/phasicFlow/structuredData/boundaries/boundaryReflective/boundaryReflective.hpp @@ -59,7 +59,7 @@ public: dictionary ); - bool beforeIteration(uint32 iterNum, real t, real dt) override; + bool beforeIteration(uint32 step, uint32 iterNum, real t, real dt) override; bool iterate(uint32 iterNum, real t, real dt) override; diff --git a/src/phasicFlow/structuredData/domain/regularSimulationDomain.cpp b/src/phasicFlow/structuredData/domain/regularSimulationDomain.cpp index a84325a9..aef3c126 100644 --- a/src/phasicFlow/structuredData/domain/regularSimulationDomain.cpp +++ b/src/phasicFlow/structuredData/domain/regularSimulationDomain.cpp @@ -32,9 +32,12 @@ bool pFlow::regularSimulationDomain::createBoundaryDicts() auto& rbBoundaries = this->subDict("regularBoundaries"); auto neighborLength = boundaries.getVal("neighborLength"); - auto boundaryExtntionLengthRatio = - boundaries.getValOrSet("boundaryExtntionLengthRatio", 0.1); - auto updateIntercal = boundaries.getValOrSet("updateInterval", 1u); + auto boundaryExtntionLengthRatio = max( + boundaries.getValOrSet("boundaryExtntionLengthRatio", 0.1), + 0.0); + auto updateIntercal = max( + boundaries.getValOrSet("updateInterval", 1u), + 1u); for(uint32 i=0; i