diff --git a/src/Interaction/sphereInteraction/boundaries/processorBoundarySphereInteraction/processorBoundarySphereInteraction.cpp b/src/Interaction/sphereInteraction/boundaries/processorBoundarySphereInteraction/processorBoundarySphereInteraction.cpp index 56243ae6..809b3c6f 100644 --- a/src/Interaction/sphereInteraction/boundaries/processorBoundarySphereInteraction/processorBoundarySphereInteraction.cpp +++ b/src/Interaction/sphereInteraction/boundaries/processorBoundarySphereInteraction/processorBoundarySphereInteraction.cpp @@ -32,9 +32,7 @@ pFlow::MPI::processorBoundarySphereInteraction::processorBoundarySpher geomMotion ), masterInteraction_(boundary.isBoundaryMaster()) -{ - pOutput<<"Processor boundayrCondition for "<< boundary.name()< bool pFlow::MPI::processorBoundarySphereInteraction::sphereSphereInteraction @@ -43,6 +41,7 @@ bool pFlow::MPI::processorBoundarySphereInteraction::sphereSphereInter const ContactForceModel &cfModel ) { + return true; if(!masterInteraction_) return true; const auto & sphPar = this->sphParticles(); diff --git a/src/phasicFlow/MPIParallelization/MPI/mpiCommunication.hpp b/src/phasicFlow/MPIParallelization/MPI/mpiCommunication.hpp index 4fd5e260..27d259eb 100644 --- a/src/phasicFlow/MPIParallelization/MPI/mpiCommunication.hpp +++ b/src/phasicFlow/MPIParallelization/MPI/mpiCommunication.hpp @@ -238,6 +238,18 @@ inline auto send(span data, int dest, int tag, Comm comm) comm); } +template +inline auto send(const T& data, int dest, int tag, Comm comm) +{ + return MPI_Send( + &data, + sFactor(), + Type(), + dest, + tag, + comm); +} + template inline auto Isend(span data, int dest, int tag, Comm comm, Request* req) { @@ -277,6 +289,19 @@ inline auto recv(span data, int source, int tag, Comm comm, Status *status) status); } +template +inline auto recv(T& data, int source, int tag, Comm comm, Status *status) +{ + return MPI_Recv( + &data, + sFactor(), + Type(), + source, + tag, + comm, + status); +} + template inline auto Irecv(T& data, int source, int tag, Comm comm, Request* req) { diff --git a/src/phasicFlow/MPIParallelization/pointField/processorBoundaryField.cpp b/src/phasicFlow/MPIParallelization/pointField/processorBoundaryField.cpp index ab0225e3..164a2fe6 100644 --- a/src/phasicFlow/MPIParallelization/pointField/processorBoundaryField.cpp +++ b/src/phasicFlow/MPIParallelization/pointField/processorBoundaryField.cpp @@ -90,6 +90,8 @@ pFlow::MPI::processorBoundaryField::processorBoundaryField( boundary.mirrorBoundaryIndex() ) { + this->addEvent(message::BNDR_PROCTRANS1). + addEvent(message::BNDR_PROCTRANS2); } template diff --git a/src/phasicFlow/MPIParallelization/pointStructure/boundaries/boundaryProcessor.cpp b/src/phasicFlow/MPIParallelization/pointStructure/boundaries/boundaryProcessor.cpp index 2648cc04..246959b1 100644 --- a/src/phasicFlow/MPIParallelization/pointStructure/boundaries/boundaryProcessor.cpp +++ b/src/phasicFlow/MPIParallelization/pointStructure/boundaries/boundaryProcessor.cpp @@ -21,6 +21,8 @@ Licence: #include "boundaryProcessor.hpp" #include "dictionary.hpp" #include "mpiCommunication.hpp" +#include "boundaryBaseKernels.hpp" +#include "internalPoints.hpp" void pFlow::MPI::boundaryProcessor::checkSize() const @@ -130,6 +132,105 @@ pFlow::MPI::boundaryProcessor::updataBoundary(int step) return true; } +bool pFlow::MPI::boundaryProcessor::transferData(int step) +{ + if(step==1) + { + uint32 s = size(); + uint32Vector_D transferFlags("transferFlags",s+1, s+1, RESERVE()); + transferFlags.fill(0u); + + const auto& transferD = transferFlags.deviceViewAll(); + auto points = thisPoints(); + auto p = boundaryPlane().infPlane(); + + numToTransfer_ = 0; + + Kokkos::parallel_reduce + ( + "boundaryProcessor::afterIteration", + deviceRPolicyStatic(0,s), + LAMBDA_HD(uint32 i, uint32& transferToUpdate) + { + if(p.pointInNegativeSide(points(i))) + { + transferD(i)=1; + transferToUpdate++; + } + }, + numToTransfer_ + ); + + uint32Vector_D keepIndices("keepIndices"); + if(numToTransfer_ != 0u) + { + pFlow::boundaryBaseKernels::createRemoveKeepIndices + ( + indexList(), + numToTransfer_, + transferFlags, + transferIndices_, + keepIndices, + false + ); + // delete transfer point from this processor + if( !setRemoveKeepIndices(transferIndices_, keepIndices)) + { + fatalErrorInFunction<< + "error in setting transfer and keep points in boundary "<< name()< sender_; + dataSender sender_; - dataReciever reciever_; + dataReciever reciever_; - mutable bool dataRecieved_ = true; + mutable bool dataRecieved_ = true; - void checkSize()const; + uint32 numToTransfer_ = 0; - void checkDataRecieved()const; - - /// @brief Update processor boundary data for this processor - /// @param step It is either 1 or 2 in the input to indicate - /// the update step - /// @return true if successful - /// @details This method is called by boundaryList two times to - /// allow processor boundaries to exchange data in two steps. - /// The first step is a buffered non-blocking send and the second - /// step is non-blocking recieve to get data. - bool updataBoundary(int step)override; + uint32 numToRecieve_ = 0; -public: + uint32Vector_D transferIndices_{"transferIndices"}; - TypeInfo("boundary"); + void checkSize() const; - boundaryProcessor( - const dictionary& dict, - const plane& bplane, - internalPoints& internal, - boundaryList& bndrs, - uint32 thisIndex - ); + void checkDataRecieved() const; - ~boundaryProcessor() override = default; + /// @brief Update processor boundary data for this processor + /// @param step It is either 1 or 2 in the input to indicate + /// the update step + /// @return true if successful + /// @details This method is called by boundaryList two times to + /// allow processor boundaries to exchange data in two steps. + /// The first step is a buffered non-blocking send and the second + /// step is non-blocking recieve to get data. + bool updataBoundary(int step) override; - add_vCtor - ( - boundaryBase, - boundaryProcessor, - dictionary - ); + bool transferData(int step) override; - bool beforeIteration(uint32 iterNum, real t, real dt) override; + public: + TypeInfo("boundary"); - bool iterate(uint32 iterNum, real t, real dt) override; + boundaryProcessor( + const dictionary &dict, + const plane &bplane, + internalPoints &internal, + boundaryList &bndrs, + uint32 thisIndex); - bool afterIteration(uint32 iterNum, real t, real dt) override; + ~boundaryProcessor() override = default; - /// @brief Return number of points in the neighbor processor boundary. - /// This is overriden from boundaryBase. - uint32 neighborProcSize() const override; + add_vCtor( + boundaryBase, + boundaryProcessor, + dictionary); - /// @brief Return a reference to point positions in the neighbor - /// processor boundary. - realx3Vector_D& neighborProcPoints() override; + bool beforeIteration(uint32 iterNum, real t, real dt) override; - /// @brief Return a const reference to point positions in the - /// neighbor processor boundary. - const realx3Vector_D& neighborProcPoints() const override; + bool iterate(uint32 iterNum, real t, real dt) override; -}; + bool afterIteration(uint32 iterNum, real t, real dt) override; + + /// @brief Return number of points in the neighbor processor boundary. + /// This is overriden from boundaryBase. + uint32 neighborProcSize() const override; + + /// @brief Return a reference to point positions in the neighbor + /// processor boundary. + realx3Vector_D &neighborProcPoints() override; + + /// @brief Return a const reference to point positions in the + /// neighbor processor boundary. + const realx3Vector_D &neighborProcPoints() const override; + }; } // namespace pFlow::MPI