boundaryProcessor -> transferData

- point data is being transferred (no notification yet).
- field data should be transferred
This commit is contained in:
HRN 2024-05-05 22:54:12 +03:30
parent 525e972c20
commit 68b7d141fa
5 changed files with 244 additions and 65 deletions

View File

@ -32,9 +32,7 @@ pFlow::MPI::processorBoundarySphereInteraction<cFM, gMM>::processorBoundarySpher
geomMotion geomMotion
), ),
masterInteraction_(boundary.isBoundaryMaster()) masterInteraction_(boundary.isBoundaryMaster())
{ {}
pOutput<<"Processor boundayrCondition for "<< boundary.name()<<endl;
}
template <typename cFM, typename gMM> template <typename cFM, typename gMM>
bool pFlow::MPI::processorBoundarySphereInteraction<cFM, gMM>::sphereSphereInteraction bool pFlow::MPI::processorBoundarySphereInteraction<cFM, gMM>::sphereSphereInteraction
@ -43,6 +41,7 @@ bool pFlow::MPI::processorBoundarySphereInteraction<cFM, gMM>::sphereSphereInter
const ContactForceModel &cfModel const ContactForceModel &cfModel
) )
{ {
return true;
if(!masterInteraction_) return true; if(!masterInteraction_) return true;
const auto & sphPar = this->sphParticles(); const auto & sphPar = this->sphParticles();

View File

@ -238,6 +238,18 @@ inline auto send(span<T> data, int dest, int tag, Comm comm)
comm); comm);
} }
template<typename T>
inline auto send(const T& data, int dest, int tag, Comm comm)
{
return MPI_Send(
&data,
sFactor<T>(),
Type<T>(),
dest,
tag,
comm);
}
template<typename T> template<typename T>
inline auto Isend(span<T> data, int dest, int tag, Comm comm, Request* req) inline auto Isend(span<T> data, int dest, int tag, Comm comm, Request* req)
{ {
@ -277,6 +289,19 @@ inline auto recv(span<T> data, int source, int tag, Comm comm, Status *status)
status); status);
} }
template<typename T>
inline auto recv(T& data, int source, int tag, Comm comm, Status *status)
{
return MPI_Recv(
&data,
sFactor<T>(),
Type<T>(),
source,
tag,
comm,
status);
}
template<typename T> template<typename T>
inline auto Irecv(T& data, int source, int tag, Comm comm, Request* req) inline auto Irecv(T& data, int source, int tag, Comm comm, Request* req)
{ {

View File

@ -90,6 +90,8 @@ pFlow::MPI::processorBoundaryField<T, MemorySpace>::processorBoundaryField(
boundary.mirrorBoundaryIndex() boundary.mirrorBoundaryIndex()
) )
{ {
this->addEvent(message::BNDR_PROCTRANS1).
addEvent(message::BNDR_PROCTRANS2);
} }
template<class T, class MemorySpace> template<class T, class MemorySpace>

View File

@ -21,6 +21,8 @@ Licence:
#include "boundaryProcessor.hpp" #include "boundaryProcessor.hpp"
#include "dictionary.hpp" #include "dictionary.hpp"
#include "mpiCommunication.hpp" #include "mpiCommunication.hpp"
#include "boundaryBaseKernels.hpp"
#include "internalPoints.hpp"
void void
pFlow::MPI::boundaryProcessor::checkSize() const pFlow::MPI::boundaryProcessor::checkSize() const
@ -130,6 +132,105 @@ pFlow::MPI::boundaryProcessor::updataBoundary(int step)
return true; return true;
} }
bool pFlow::MPI::boundaryProcessor::transferData(int step)
{
if(step==1)
{
uint32 s = size();
uint32Vector_D transferFlags("transferFlags",s+1, s+1, RESERVE());
transferFlags.fill(0u);
const auto& transferD = transferFlags.deviceViewAll();
auto points = thisPoints();
auto p = boundaryPlane().infPlane();
numToTransfer_ = 0;
Kokkos::parallel_reduce
(
"boundaryProcessor::afterIteration",
deviceRPolicyStatic(0,s),
LAMBDA_HD(uint32 i, uint32& transferToUpdate)
{
if(p.pointInNegativeSide(points(i)))
{
transferD(i)=1;
transferToUpdate++;
}
},
numToTransfer_
);
uint32Vector_D keepIndices("keepIndices");
if(numToTransfer_ != 0u)
{
pFlow::boundaryBaseKernels::createRemoveKeepIndices
(
indexList(),
numToTransfer_,
transferFlags,
transferIndices_,
keepIndices,
false
);
// delete transfer point from this processor
if( !setRemoveKeepIndices(transferIndices_, keepIndices))
{
fatalErrorInFunction<<
"error in setting transfer and keep points in boundary "<< name()<<endl;
return false;
}
}
else
{
transferIndices_.clear();
}
auto req = RequestNull;
CheckMPI( Isend(
numToTransfer_,
neighborProcessorNo(),
thisBoundaryIndex(),
pFlowProcessors().localCommunicator(),
&req), true );
CheckMPI(recv(
numToRecieve_,
neighborProcessorNo(),
mirrorBoundaryIndex(),
pFlowProcessors().localCommunicator(),
StatusesIgnore), true);
MPI_Request_free(&req);
return true;
}
else if(step ==2 )
{
pointFieldAccessType transferPoints(
transferIndices_.size(),
transferIndices_.deviceViewAll(),
internal().pointPositionDevice());
sender_.sendData(pFlowProcessors(), transferPoints);
return true;
}
else if(step == 3)
{
reciever_.recieveData(pFlowProcessors(), numToRecieve_);
return true;
}
else if(step == 4)
{
reciever_.waitBufferForUse();
//
return false;
}
return false;
}
bool bool
pFlow::MPI::boundaryProcessor::iterate(uint32 iterNum, real t, real dt) pFlow::MPI::boundaryProcessor::iterate(uint32 iterNum, real t, real dt)
{ {
@ -139,5 +240,54 @@ pFlow::MPI::boundaryProcessor::iterate(uint32 iterNum, real t, real dt)
bool bool
pFlow::MPI::boundaryProcessor::afterIteration(uint32 iterNum, real t, real dt) pFlow::MPI::boundaryProcessor::afterIteration(uint32 iterNum, real t, real dt)
{ {
uint32 s = size();
pOutput<<"size of boundary is "<< s <<endl;
uint32Vector_D transferFlags("transferFlags",s+1, s+1, RESERVE());
transferFlags.fill(0u);
const auto& transferD = transferFlags.deviceViewAll();
auto points = thisPoints();
auto p = boundaryPlane().infPlane();
uint32 numTransfer = 0;
Kokkos::parallel_reduce
(
"boundaryProcessor::afterIteration",
deviceRPolicyStatic(0,s),
LAMBDA_HD(uint32 i, uint32& transferToUpdate)
{
if(p.pointInNegativeSide(points(i)))
{
transferD(i)=1;
transferToUpdate++;
}
},
numTransfer
);
pOutput<<"Numebr to be transfered "<< numTransfer<<endl;
uint32Vector_D transferIndices("transferIndices");
uint32Vector_D keepIndices("keepIndices");
pFlow::boundaryBaseKernels::createRemoveKeepIndices
(
indexList(),
numTransfer,
transferFlags,
transferIndices,
keepIndices
);
// delete transfer point from this processor
if( !setRemoveKeepIndices(transferIndices, keepIndices))
{
fatalErrorInFunction<<
"error in setting transfer and keep points in boundary "<< name()<<endl;
return false;
}
return true; return true;
} }

View File

@ -1,18 +1,18 @@
/*------------------------------- phasicFlow --------------------------------- /*------------------------------- phasicFlow ---------------------------------
O C enter of O C enter of
O O E ngineering and O O E ngineering and
O O M ultiscale modeling of O O M ultiscale modeling of
OOOOOOO F luid flow OOOOOOO F luid flow
------------------------------------------------------------------------------ ------------------------------------------------------------------------------
Copyright (C): www.cemf.ir Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------ ------------------------------------------------------------------------------
Licence: Licence:
This file is part of phasicFlow code. It is a free software for simulating This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions. the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
@ -21,7 +21,6 @@ Licence:
#ifndef __boundaryProcessor_hpp__ #ifndef __boundaryProcessor_hpp__
#define __boundaryProcessor_hpp__ #define __boundaryProcessor_hpp__
#include "boundaryBase.hpp" #include "boundaryBase.hpp"
#include "mpiTypes.hpp" #include "mpiTypes.hpp"
#include "dataSender.hpp" #include "dataSender.hpp"
@ -30,78 +29,82 @@ Licence:
namespace pFlow::MPI namespace pFlow::MPI
{ {
class boundaryProcessor class boundaryProcessor
: : public boundaryBase
public boundaryBase {
{ public:
private: using pointFieldAccessType = typename boundaryBase::pointFieldAccessType;
uint32 neighborProcNumPoints_ = 0; private:
uint32 neighborProcNumPoints_ = 0;
uint32 thisNumPoints_ = 0; uint32 thisNumPoints_ = 0;
realx3Vector_D neighborProcPoints_; realx3Vector_D neighborProcPoints_;
dataSender<realx3> sender_; dataSender<realx3> sender_;
dataReciever<realx3> reciever_; dataReciever<realx3> reciever_;
mutable bool dataRecieved_ = true; mutable bool dataRecieved_ = true;
void checkSize()const; uint32 numToTransfer_ = 0;
void checkDataRecieved()const; uint32 numToRecieve_ = 0;
/// @brief Update processor boundary data for this processor
/// @param step It is either 1 or 2 in the input to indicate
/// the update step
/// @return true if successful
/// @details This method is called by boundaryList two times to
/// allow processor boundaries to exchange data in two steps.
/// The first step is a buffered non-blocking send and the second
/// step is non-blocking recieve to get data.
bool updataBoundary(int step)override;
public: uint32Vector_D transferIndices_{"transferIndices"};
TypeInfo("boundary<processor>"); void checkSize() const;
boundaryProcessor( void checkDataRecieved() const;
const dictionary& dict,
const plane& bplane,
internalPoints& internal,
boundaryList& bndrs,
uint32 thisIndex
);
~boundaryProcessor() override = default; /// @brief Update processor boundary data for this processor
/// @param step It is either 1 or 2 in the input to indicate
/// the update step
/// @return true if successful
/// @details This method is called by boundaryList two times to
/// allow processor boundaries to exchange data in two steps.
/// The first step is a buffered non-blocking send and the second
/// step is non-blocking recieve to get data.
bool updataBoundary(int step) override;
add_vCtor bool transferData(int step) override;
(
boundaryBase,
boundaryProcessor,
dictionary
);
bool beforeIteration(uint32 iterNum, real t, real dt) override; public:
TypeInfo("boundary<processor>");
bool iterate(uint32 iterNum, real t, real dt) override; boundaryProcessor(
const dictionary &dict,
const plane &bplane,
internalPoints &internal,
boundaryList &bndrs,
uint32 thisIndex);
bool afterIteration(uint32 iterNum, real t, real dt) override; ~boundaryProcessor() override = default;
/// @brief Return number of points in the neighbor processor boundary. add_vCtor(
/// This is overriden from boundaryBase. boundaryBase,
uint32 neighborProcSize() const override; boundaryProcessor,
dictionary);
/// @brief Return a reference to point positions in the neighbor bool beforeIteration(uint32 iterNum, real t, real dt) override;
/// processor boundary.
realx3Vector_D& neighborProcPoints() override;
/// @brief Return a const reference to point positions in the bool iterate(uint32 iterNum, real t, real dt) override;
/// neighbor processor boundary.
const realx3Vector_D& neighborProcPoints() const override;
}; bool afterIteration(uint32 iterNum, real t, real dt) override;
/// @brief Return number of points in the neighbor processor boundary.
/// This is overriden from boundaryBase.
uint32 neighborProcSize() const override;
/// @brief Return a reference to point positions in the neighbor
/// processor boundary.
realx3Vector_D &neighborProcPoints() override;
/// @brief Return a const reference to point positions in the
/// neighbor processor boundary.
const realx3Vector_D &neighborProcPoints() const override;
};
} // namespace pFlow::MPI } // namespace pFlow::MPI