boundaryProcessor -> transferData

- point data is being transferred (no notification yet).
- field data should be transferred
This commit is contained in:
HRN 2024-05-05 22:54:12 +03:30
parent 525e972c20
commit 68b7d141fa
5 changed files with 244 additions and 65 deletions

View File

@ -32,9 +32,7 @@ pFlow::MPI::processorBoundarySphereInteraction<cFM, gMM>::processorBoundarySpher
geomMotion
),
masterInteraction_(boundary.isBoundaryMaster())
{
pOutput<<"Processor boundayrCondition for "<< boundary.name()<<endl;
}
{}
template <typename cFM, typename gMM>
bool pFlow::MPI::processorBoundarySphereInteraction<cFM, gMM>::sphereSphereInteraction
@ -43,6 +41,7 @@ bool pFlow::MPI::processorBoundarySphereInteraction<cFM, gMM>::sphereSphereInter
const ContactForceModel &cfModel
)
{
return true;
if(!masterInteraction_) return true;
const auto & sphPar = this->sphParticles();

View File

@ -238,6 +238,18 @@ inline auto send(span<T> data, int dest, int tag, Comm comm)
comm);
}
template<typename T>
inline auto send(const T& data, int dest, int tag, Comm comm)
{
return MPI_Send(
&data,
sFactor<T>(),
Type<T>(),
dest,
tag,
comm);
}
template<typename T>
inline auto Isend(span<T> data, int dest, int tag, Comm comm, Request* req)
{
@ -277,6 +289,19 @@ inline auto recv(span<T> data, int source, int tag, Comm comm, Status *status)
status);
}
template<typename T>
inline auto recv(T& data, int source, int tag, Comm comm, Status *status)
{
return MPI_Recv(
&data,
sFactor<T>(),
Type<T>(),
source,
tag,
comm,
status);
}
template<typename T>
inline auto Irecv(T& data, int source, int tag, Comm comm, Request* req)
{

View File

@ -90,6 +90,8 @@ pFlow::MPI::processorBoundaryField<T, MemorySpace>::processorBoundaryField(
boundary.mirrorBoundaryIndex()
)
{
this->addEvent(message::BNDR_PROCTRANS1).
addEvent(message::BNDR_PROCTRANS2);
}
template<class T, class MemorySpace>

View File

@ -21,6 +21,8 @@ Licence:
#include "boundaryProcessor.hpp"
#include "dictionary.hpp"
#include "mpiCommunication.hpp"
#include "boundaryBaseKernels.hpp"
#include "internalPoints.hpp"
void
pFlow::MPI::boundaryProcessor::checkSize() const
@ -130,6 +132,105 @@ pFlow::MPI::boundaryProcessor::updataBoundary(int step)
return true;
}
bool pFlow::MPI::boundaryProcessor::transferData(int step)
{
if(step==1)
{
uint32 s = size();
uint32Vector_D transferFlags("transferFlags",s+1, s+1, RESERVE());
transferFlags.fill(0u);
const auto& transferD = transferFlags.deviceViewAll();
auto points = thisPoints();
auto p = boundaryPlane().infPlane();
numToTransfer_ = 0;
Kokkos::parallel_reduce
(
"boundaryProcessor::afterIteration",
deviceRPolicyStatic(0,s),
LAMBDA_HD(uint32 i, uint32& transferToUpdate)
{
if(p.pointInNegativeSide(points(i)))
{
transferD(i)=1;
transferToUpdate++;
}
},
numToTransfer_
);
uint32Vector_D keepIndices("keepIndices");
if(numToTransfer_ != 0u)
{
pFlow::boundaryBaseKernels::createRemoveKeepIndices
(
indexList(),
numToTransfer_,
transferFlags,
transferIndices_,
keepIndices,
false
);
// delete transfer point from this processor
if( !setRemoveKeepIndices(transferIndices_, keepIndices))
{
fatalErrorInFunction<<
"error in setting transfer and keep points in boundary "<< name()<<endl;
return false;
}
}
else
{
transferIndices_.clear();
}
auto req = RequestNull;
CheckMPI( Isend(
numToTransfer_,
neighborProcessorNo(),
thisBoundaryIndex(),
pFlowProcessors().localCommunicator(),
&req), true );
CheckMPI(recv(
numToRecieve_,
neighborProcessorNo(),
mirrorBoundaryIndex(),
pFlowProcessors().localCommunicator(),
StatusesIgnore), true);
MPI_Request_free(&req);
return true;
}
else if(step ==2 )
{
pointFieldAccessType transferPoints(
transferIndices_.size(),
transferIndices_.deviceViewAll(),
internal().pointPositionDevice());
sender_.sendData(pFlowProcessors(), transferPoints);
return true;
}
else if(step == 3)
{
reciever_.recieveData(pFlowProcessors(), numToRecieve_);
return true;
}
else if(step == 4)
{
reciever_.waitBufferForUse();
//
return false;
}
return false;
}
bool
pFlow::MPI::boundaryProcessor::iterate(uint32 iterNum, real t, real dt)
{
@ -139,5 +240,54 @@ pFlow::MPI::boundaryProcessor::iterate(uint32 iterNum, real t, real dt)
bool
pFlow::MPI::boundaryProcessor::afterIteration(uint32 iterNum, real t, real dt)
{
uint32 s = size();
pOutput<<"size of boundary is "<< s <<endl;
uint32Vector_D transferFlags("transferFlags",s+1, s+1, RESERVE());
transferFlags.fill(0u);
const auto& transferD = transferFlags.deviceViewAll();
auto points = thisPoints();
auto p = boundaryPlane().infPlane();
uint32 numTransfer = 0;
Kokkos::parallel_reduce
(
"boundaryProcessor::afterIteration",
deviceRPolicyStatic(0,s),
LAMBDA_HD(uint32 i, uint32& transferToUpdate)
{
if(p.pointInNegativeSide(points(i)))
{
transferD(i)=1;
transferToUpdate++;
}
},
numTransfer
);
pOutput<<"Numebr to be transfered "<< numTransfer<<endl;
uint32Vector_D transferIndices("transferIndices");
uint32Vector_D keepIndices("keepIndices");
pFlow::boundaryBaseKernels::createRemoveKeepIndices
(
indexList(),
numTransfer,
transferFlags,
transferIndices,
keepIndices
);
// delete transfer point from this processor
if( !setRemoveKeepIndices(transferIndices, keepIndices))
{
fatalErrorInFunction<<
"error in setting transfer and keep points in boundary "<< name()<<endl;
return false;
}
return true;
}

View File

@ -1,7 +1,7 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
@ -21,7 +21,6 @@ Licence:
#ifndef __boundaryProcessor_hpp__
#define __boundaryProcessor_hpp__
#include "boundaryBase.hpp"
#include "mpiTypes.hpp"
#include "dataSender.hpp"
@ -30,78 +29,82 @@ Licence:
namespace pFlow::MPI
{
class boundaryProcessor
:
public boundaryBase
{
private:
class boundaryProcessor
: public boundaryBase
{
public:
using pointFieldAccessType = typename boundaryBase::pointFieldAccessType;
uint32 neighborProcNumPoints_ = 0;
private:
uint32 neighborProcNumPoints_ = 0;
uint32 thisNumPoints_ = 0;
uint32 thisNumPoints_ = 0;
realx3Vector_D neighborProcPoints_;
realx3Vector_D neighborProcPoints_;
dataSender<realx3> sender_;
dataSender<realx3> sender_;
dataReciever<realx3> reciever_;
dataReciever<realx3> reciever_;
mutable bool dataRecieved_ = true;
mutable bool dataRecieved_ = true;
void checkSize()const;
uint32 numToTransfer_ = 0;
void checkDataRecieved()const;
uint32 numToRecieve_ = 0;
/// @brief Update processor boundary data for this processor
/// @param step It is either 1 or 2 in the input to indicate
/// the update step
/// @return true if successful
/// @details This method is called by boundaryList two times to
/// allow processor boundaries to exchange data in two steps.
/// The first step is a buffered non-blocking send and the second
/// step is non-blocking recieve to get data.
bool updataBoundary(int step)override;
uint32Vector_D transferIndices_{"transferIndices"};
public:
void checkSize() const;
TypeInfo("boundary<processor>");
void checkDataRecieved() const;
boundaryProcessor(
const dictionary& dict,
const plane& bplane,
internalPoints& internal,
boundaryList& bndrs,
uint32 thisIndex
);
/// @brief Update processor boundary data for this processor
/// @param step It is either 1 or 2 in the input to indicate
/// the update step
/// @return true if successful
/// @details This method is called by boundaryList two times to
/// allow processor boundaries to exchange data in two steps.
/// The first step is a buffered non-blocking send and the second
/// step is non-blocking recieve to get data.
bool updataBoundary(int step) override;
~boundaryProcessor() override = default;
bool transferData(int step) override;
add_vCtor
(
boundaryBase,
boundaryProcessor,
dictionary
);
public:
TypeInfo("boundary<processor>");
bool beforeIteration(uint32 iterNum, real t, real dt) override;
boundaryProcessor(
const dictionary &dict,
const plane &bplane,
internalPoints &internal,
boundaryList &bndrs,
uint32 thisIndex);
bool iterate(uint32 iterNum, real t, real dt) override;
~boundaryProcessor() override = default;
bool afterIteration(uint32 iterNum, real t, real dt) override;
add_vCtor(
boundaryBase,
boundaryProcessor,
dictionary);
/// @brief Return number of points in the neighbor processor boundary.
/// This is overriden from boundaryBase.
uint32 neighborProcSize() const override;
bool beforeIteration(uint32 iterNum, real t, real dt) override;
/// @brief Return a reference to point positions in the neighbor
/// processor boundary.
realx3Vector_D& neighborProcPoints() override;
bool iterate(uint32 iterNum, real t, real dt) override;
/// @brief Return a const reference to point positions in the
/// neighbor processor boundary.
const realx3Vector_D& neighborProcPoints() const override;
bool afterIteration(uint32 iterNum, real t, real dt) override;
};
/// @brief Return number of points in the neighbor processor boundary.
/// This is overriden from boundaryBase.
uint32 neighborProcSize() const override;
/// @brief Return a reference to point positions in the neighbor
/// processor boundary.
realx3Vector_D &neighborProcPoints() override;
/// @brief Return a const reference to point positions in the
/// neighbor processor boundary.
const realx3Vector_D &neighborProcPoints() const override;
};
} // namespace pFlow::MPI