This commit is contained in:
Hamidreza Norouzi 2024-01-20 11:30:49 -08:00
parent 6bc180275e
commit 6969b71cc5
24 changed files with 3172 additions and 0 deletions

View File

@ -0,0 +1,36 @@
#add Zoltan
set(Zoltan_Install_DIR)
if(DEFINED ENV{Zoltan_DIR})
set(Zoltan_Install_DIR $ENV{Zoltan_DIR})
else()
set(Zoltan_Install_DIR $ENV{HOME}/PhasicFlow/Zoltan)
endif()
message(STATUS "Zoltan install directory is ${Zoltan_Install_DIR}")
set(ZOLTAN_PREFIX "${Zoltan_Install_DIR}" CACHE STRING "Zoltan install directory")
find_path(ZOLTAN_INCLUDE_DIR zoltan.h PATHS "${ZOLTAN_PREFIX}/include")
message(STATUS "Zoltan include path: ${ZOLTAN_INCLUDE_DIR}")
find_library(ZOLTAN_LIBRARY zoltan PATHS "${ZOLTAN_PREFIX}/lib")
message(STATUS "Zoltan lib path: ${ZOLTAN_LIBRARY}")
set(SourceFiles
partitioning.cpp
rcb1DPartitioning.cpp
domain/MPISimulationDomain.cpp)
set(link_libs Kokkos::kokkos phasicFlow PRIVATE MPI::MPI_CXX ${ZOLTAN_LIBRARY} -lm )
pFlow_add_library_install(MPIParallelization SourceFiles link_libs)
target_include_directories(MPIParallelization PUBLIC ${ZOLTAN_INCLUDE_DIR})

View File

@ -0,0 +1,61 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#include "boundaryProcessor.hpp"
#include "dictionary.hpp"
pFlow::boundaryProcessor::boundaryProcessor
(
const dictionary& dict,
const plane& bplane,
internalPoints& internal
)
:
boundaryBase(dict, bplane, internal)
{
}
bool pFlow::boundaryProcessor::beforeIteratoin
(
uint32 iterNum,
real t
)
{
return true;
}
bool pFlow::boundaryProcessor::iterate
(
uint32 iterNum,
real t
)
{
return true;
}
bool pFlow::boundaryProcessor::afterIteration
(
uint32 iterNum,
real t
)
{
return true;
}

View File

@ -0,0 +1,67 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __boundaryProcessor_hpp__
#define __boundaryProcessor_hpp__
#include "boundaryBase.hpp"
namespace pFlow
{
class boundaryProcessor
:
public boundaryBase
{
protected:
public:
TypeInfo("boundary<processor>");
boundaryProcessor(
const dictionary& dict,
const plane& bplane,
internalPoints& internal);
virtual
~boundaryProcessor() = default;
add_vCtor
(
boundaryBase,
boundaryProcessor,
dictionary
);
bool beforeIteratoin(uint32 iterNum, real t) override;
bool iterate(uint32 iterNum, real t) override;
bool afterIteration(uint32 iterNum, real t) override;
};
}
#endif //__boundaryProcessor_hpp__

View File

@ -0,0 +1,5 @@
#include "gatherMaster.hpp"

View File

@ -0,0 +1,97 @@
#ifndef __datIOMPI_hpp__
#define __datIOMPI_hpp__
#include "dataIO.hpp"
#include "pFlowProcessors.hpp"
#ifdef pFlow_Build_MPI
#include "gatherMaster.hpp"
#endif
namespace pFlow
{
template<typename T>
class dataIOMPI
:
public dataIO<T>
{
protected:
bool gatherData(span<T> data ) override
{
if(this->ioPattern_.isAllProcessorsDifferent())
{
this->bufferSpan_ = data;
return true;
}
if( this->ioPattern_.isMasterProcessorDistribute())
{
#ifdef pFlow_Build_MPI
auto gatherT = pFlow::MPI::gatherMaster<T>(pFlowProcessors());
if(!gatherT.gatherData(data))
{
fatalErrorInFunction<<"Error in gathering data to master"<<endl;
return false;
}
this->buffer_ = gatherT.moveData();
this->bufferSpan_ = makeSpan(this->buffer_);
return true;
#else
notImplementedFunction;
fatalExit;
return false;
#endif //pFlow_Build_MPI
}
if( this->ioPattern_.isMasterProcessorOnly() || this->ioPattern_.isAllProcessorSimilar() )
{
if( this->ioPattern_.isMaster() )
{
this->bufferSpan_ = data;
}
else
{
this->bufferSpan_ = span<T>(nullptr, 0);
return true;
}
}
return false;
}
public:
TypeInfo("dataIO<MPI>");
dataIOMPI(const IOPattern& iop)
:
dataIO<T>(iop)
{}
dataIOMPI(const dataIOMPI&) = default;
dataIOMPI(dataIOMPI&&) = default;
dataIOMPI& operator=(const dataIOMPI&) = default;
dataIOMPI& operator=(dataIOMPI&&) = default;
~dataIOMPI() = default;
};
}
#endif

View File

@ -0,0 +1,400 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#include "MPISimulationDomain.hpp"
#include "systemControl.hpp"
#include "rcb1DPartitioning.hpp"
#include "scatteredMasterDistribute.hpp"
#include "scatteredMasterDistributeChar.hpp"
pFlow::MPISimulationDomain::MPISimulationDomain(systemControl& control)
:
simulationDomain(control),
communication_(pFlowProcessors()),
subDomains_(pFlowProcessors()),
domainPartition_( makeUnique<rcb1DPartitioning>(subDict("decomposition"), globalBox_))
{}
bool pFlow::MPISimulationDomain::createBoundaryDicts()
{
auto& boundaries = this->subDict("boundaries");
this->addDict("MPIBoundaries", boundaries);
auto& mpiBoundaries = this->subDict("MPIBoundaries");
real neighborLength = boundaries.getVal<real>("neighborLength");
auto neighbors = findPlaneNeighbors();
for(uint32 i=0; i<sizeOfBoundaries(); i++)
{
word bName = bundaryName(i);
if( !boundaries.containsDictionay(bName) )
{
fatalErrorInFunction<<"dictionary "<< bName<<
"does not exist in "<< boundaries.globalName()<<endl;
return false;
}
auto& bDict = mpiBoundaries.subDict(bName);
if(!bDict.addOrKeep("neighborLength", neighborLength))
{
fatalErrorInFunction<<"error in adding neighborLength to "<< bName <<
"in dictionary "<< boundaries.globalName()<<endl;
return false;
}
if( neighbors[i] == -1 )
{
bDict.add("mirrorProcessorNo", processors::globalRank());
}
else
{
bDict.add("mirrorProcessorNo", neighbors[i]);
bDict.addOrReplace("type", "processor");
}
}
return true;
}
bool pFlow::MPISimulationDomain::setThisDomain()
{
thisDomain_ = domain(domainPartition_->localBox());
if(!communication_.collectAllToAll(thisDomain_, subDomains_))
{
fatalErrorInFunction<< "Failed to distributed domains"<<endl;
return false;
}
return true;
}
std::vector<int> pFlow::MPISimulationDomain::findPlaneNeighbors() const
{
std::vector<int> neighbors(sizeOfBoundaries(), -2);
domain gDomain(globalBox_);
// left
if( thisDomain_.left().parallelTouch( gDomain.left() ) )
{
neighbors[0] = -1;
}
for(int i=0; i<sizeOfBoundaries(); i++)
{
if(i == subDomains_.rank())continue;
if( thisDomain_.left().parallelTouch(
subDomains_[i].right()) )
{
neighbors[0] = i;
break;
}
}
// right
if( thisDomain_.right().parallelTouch( gDomain.right() ) )
{
neighbors[1] = -1;
}
for(int i=0; i<sizeOfBoundaries(); i++)
{
if(i == subDomains_.rank())continue;
if( thisDomain_.right().parallelTouch(
subDomains_[i].left()) )
{
neighbors[1] = i;
break;
}
}
// bottom
if( thisDomain_.bottom().parallelTouch( gDomain.bottom() ) )
{
neighbors[2] = -1;
}
for(int i=0; i<sizeOfBoundaries(); i++)
{
if(i == subDomains_.rank())continue;
if( thisDomain_.bottom().parallelTouch(
subDomains_[i].top()) )
{
neighbors[2] = i;
break;
}
}
// top
if( thisDomain_.top().parallelTouch( gDomain.top() ) )
{
neighbors[3] = -1;
}
for(int i=0; i<sizeOfBoundaries(); i++)
{
if(i == subDomains_.rank())continue;
if( thisDomain_.top().parallelTouch(
subDomains_[i].bottom()) )
{
neighbors[3] = i;
break;
}
}
// rear
if( thisDomain_.rear().parallelTouch( gDomain.rear() ) )
{
neighbors[4] = -1;
}
for(int i=0; i<sizeOfBoundaries(); i++)
{
if(i == subDomains_.rank())continue;
if( thisDomain_.rear().parallelTouch(
subDomains_[i].front()) )
{
neighbors[4] = i;
break;
}
}
// front
if( thisDomain_.front().parallelTouch( gDomain.front() ) )
{
neighbors[5] = -1;
}
for(int i=0; i<sizeOfBoundaries(); i++)
{
if(i == subDomains_.rank())continue;
if( thisDomain_.front().parallelTouch(
subDomains_[i].rear()) )
{
neighbors[5] = i;
break;
}
}
return neighbors;
}
const pFlow::dictionary &
pFlow::MPISimulationDomain::thisBoundaryDict() const
{
return this->subDict("MPIBoundaries");
}
bool pFlow::MPISimulationDomain::initialUpdateDomains(span<realx3> pointPos)
{
pFlagTypeHost flags(pointPos.size(), 0 , pointPos.size());
initialNumPoints_ = pointPos.size();
if( !domainPartition_->partition(pointPos, flags) )
{
return false;
}
if(!setThisDomain()) return false;
if(!createBoundaryDicts()) return false;
return true;
}
pFlow::uint32 pFlow::MPISimulationDomain::initialNumberInThis() const
{
uint32 numImport = domainPartition_->numberImportThisProc();
uint32 numExport = domainPartition_->numberExportThisProc();
return initialNumPoints_+ numImport - numExport;;
}
bool pFlow::MPISimulationDomain::initialTransferBlockData
(
span<char> src,
span<char> dst,
size_t sizeOfElement
)const
{
MPI::scatteredMasterDistribute<char> dataDist(sizeOfElement, pFlowProcessors());
auto lists = domainPartition_->allExportLists();
if(!dataDist.setDataMaps( lists ))
{
fatalErrorInFunction;
return false;
}
if(!dataDist.distribute(src, dst))
{
fatalErrorInFunction<<
"Error in distribute"<<endl;
return false;
}
return true;
}
bool pFlow::MPISimulationDomain::initialTransferBlockData
(
span<realx3> src,
span<realx3> dst
)const
{
MPI::scatteredMasterDistribute<realx3>
dataDist(pFlowProcessors());
auto lists = domainPartition_->allExportLists();
if(!dataDist.setDataMaps( lists ))
{
fatalErrorInFunction;
return false;
}
if(!dataDist.distribute(src, dst))
{
fatalErrorInFunction<<
"Error in distribute"<<endl;
return false;
}
return true;
}
bool pFlow::MPISimulationDomain::initialTransferBlockData
(
span<real> src,
span<real> dst
)const
{
MPI::scatteredMasterDistribute<real>
dataDist(pFlowProcessors());
auto lists = domainPartition_->allExportLists();
if(!dataDist.setDataMaps( lists ))
{
fatalErrorInFunction;
return false;
}
if(!dataDist.distribute(src, dst))
{
fatalErrorInFunction<<
"Error in distribute"<<endl;
return false;
}
return true;
}
bool pFlow::MPISimulationDomain::initialTransferBlockData
(
span<uint32> src,
span<uint32> dst
)const
{
MPI::scatteredMasterDistribute<uint32>
dataDist(pFlowProcessors());
auto lists = domainPartition_->allExportLists();
if(!dataDist.setDataMaps( lists ))
{
fatalErrorInFunction;
return false;
}
if(!dataDist.distribute(src, dst))
{
fatalErrorInFunction<<
"Error in distribute"<<endl;
return false;
}
return true;
}
bool pFlow::MPISimulationDomain::initialTransferBlockData
(
span<int32> src,
span<int32> dst
)const
{
MPI::scatteredMasterDistribute<int32>
dataDist(pFlowProcessors());
auto lists = domainPartition_->allExportLists();
if(!dataDist.setDataMaps( lists ))
{
fatalErrorInFunction;
return false;
}
if(!dataDist.distribute(src, dst))
{
fatalErrorInFunction<<
"Error in distribute"<<endl;
return false;
}
return true;
}
/*bool pFlow::MPISimulationDomain::updateDomains(
span<realx3> pointPos,
pFlagTypeHost flags)
{
if( !domainPartition_->partition(pointPos, flags) )
{
return false;
}
if(!setThisDomain()) return false;
if(!createBoundaryDicts()) return false;
return true;
}*/
pFlow::uint32 pFlow::MPISimulationDomain::numberToBeImported() const
{
return domainPartition_->numberImportThisProc();
}
pFlow::uint32 pFlow::MPISimulationDomain::numberToBeExported() const
{
return domainPartition_->numberExportThisProc();
}
bool pFlow::MPISimulationDomain::requiresDataTransfer() const
{
notImplementedFunction;
return false;
}

View File

@ -0,0 +1,116 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __MPISimulationDomain_hpp__
#define __MPISimulationDomain_hpp__
#include "simulationDomain.hpp"
#include "partitioning.hpp"
#include "procVector.hpp"
#include "procCommunication.hpp"
namespace pFlow
{
class MPISimulationDomain
:
public simulationDomain
{
protected:
MPI::procCommunication communication_;
MPI::procVector<domain> subDomains_;
uniquePtr<partitioning> domainPartition_ = nullptr;
uint32 initialNumPoints_ = 0;
bool createBoundaryDicts() override;
bool setThisDomain() override;
std::vector<int>
findPlaneNeighbors()const;
public:
TypeInfo("simulationDomain<MPI>");
MPISimulationDomain(systemControl& control);
virtual
~MPISimulationDomain()=default;
add_vCtor
(
simulationDomain,
MPISimulationDomain,
systemControl
);
const dictionary& thisBoundaryDict()const override;
/// @brief
/// @param pointPos
/// @return
bool initialUpdateDomains(span<realx3> pointPos)override;
/// @brief
/// @return
uint32 initialNumberInThis()const override;
bool initialTransferBlockData(
span<char> src,
span<char> dst,
size_t sizeOfElement)const override;
bool initialTransferBlockData(
span<realx3> src,
span<realx3> dst) const override;
bool initialTransferBlockData(
span<real> src,
span<real> dst) const override;
bool initialTransferBlockData(
span<uint32> src,
span<uint32> dst) const override;
bool initialTransferBlockData(
span<int32> src,
span<int32> dst) const override;
/*bool updateDomains(
span<realx3> pointPos,
pFlagTypeHost flags) override;*/
uint32 numberToBeImported()const override;
uint32 numberToBeExported()const override;
bool requiresDataTransfer() const override;
};
}
#endif

View File

@ -0,0 +1,105 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __gatherMaster_hpp__
#define __gatherMaster_hpp__
#include <numeric>
#include "procCommunication.hpp"
namespace pFlow::MPI
{
template<typename T>
class gatherMaster
:
public procCommunication
{
protected:
std::vector<T> buffer_;
public:
gatherMaster(const localProcessors& procs)
:
procCommunication(procs)
{}
span<T> getData()
{
if(this->localMaster())
return span<T>( buffer_.data(), buffer_.size());
else
return span<T>(nullptr, 0);
}
std::vector<T> moveData()
{
return std::move(buffer_);
}
bool gatherData(span<T> data)
{
int thisN = data.size();
bool succss;
procVector<int> numElems(this->processors(), true);
procVector<int> displ(this->processors(), true);
if( !this->collectAllToMaster(thisN, numElems) )
{
fatalErrorInFunction<<
"error in collecting number of elements from processors"<<endl;
return false;
}
auto totalN = std::accumulate(
numElems.begin(),
numElems.end(),
static_cast<int>(0));
buffer_.resize(totalN);
std::exclusive_scan(
numElems.begin(),
numElems.end(),
displ.begin(),
0);
auto bufferSpan = makeSpan(buffer_);
return CheckMPI(
Gatherv(
data,
bufferSpan,
makeSpan(numElems),
makeSpan(displ),
this->localMasterNo(),
this->localCommunicator()),
false);
}
};
}
#endif

View File

@ -0,0 +1,383 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __mpiCommunication_H__
#define __mpiCommunication_H__
#include "mpiTypes.hpp"
#include "types.hpp"
#include "span.hpp"
#ifdef pFlow_Build_MPI
namespace pFlow::MPI
{
extern DataType realx3Type__;
extern DataType realx4Type__;
extern DataType int32x3Type__;
template<typename T>
auto constexpr Type()
{
return MPI_BYTE;
}
template<typename T>
auto constexpr sFactor()
{
return sizeof(T);
}
template<char>
auto constexpr Type()
{
return MPI_CHAR;
}
template<char>
auto constexpr sFactor()
{
return 1;
}
template<short>
auto constexpr Type()
{
return MPI_SHORT;
}
template<short>
auto constexpr sFactor()
{
return 1;
}
template<unsigned short>
auto constexpr Type()
{
return MPI_UNSIGNED_SHORT;
}
template<unsigned short>
auto constexpr sFactor()
{
return 1;
}
template<int>
auto constexpr Type()
{
return MPI_INT;
}
template<int>
auto constexpr sFactor()
{
return 1;
}
template<>
auto constexpr Type<unsigned int>()
{
return MPI_UNSIGNED;
}
template<>
auto constexpr sFactor<unsigned int>()
{
return 1;
}
template<>
auto constexpr Type<long>()
{
return MPI_LONG;
}
template<>
auto constexpr sFactor<long>()
{
return 1;
}
template<>
auto constexpr Type<unsigned long>()
{
return MPI_UNSIGNED_LONG;
}
template<>
auto constexpr sFactor<unsigned long>()
{
return 1;
}
template<>
auto constexpr Type<float>()
{
return MPI_FLOAT;
}
template<>
auto constexpr sFactor<float>()
{
return 1;
}
template<>
auto constexpr Type<double>()
{
return MPI_DOUBLE;
}
template<>
auto constexpr sFactor<double>()
{
return 1;
}
template<>
inline
auto Type<realx3>()
{
return realx3Type__;
}
template<>
auto constexpr sFactor<realx3>()
{
return 1;
}
template<>
inline
auto Type<realx4>()
{
return realx4Type__;
}
template<>
auto constexpr sFactor<realx4>()
{
return 1;
}
template<>
inline
auto Type<int32x3>()
{
return int32x3Type__;
}
template<>
auto constexpr sFactor<int32x3>()
{
return 1;
}
/*inline
auto createByteSequence(int sizeOfElement)
{
DataType newType;
MPI_Type_contiguous(sizeOfElement, MPI_CHAR, &newType);
MPI_Type_commit(&newType);
return newType;
}*/
inline
auto TypeCommit(DataType* type)
{
return MPI_Type_commit(type);
}
inline
auto TypeFree(DataType* type)
{
return MPI_Type_free(type);
}
template<typename T>
inline auto getCount(Status* status, int& count)
{
int lCount;
auto res = MPI_Get_count(status, Type<T>(), &lCount);
count = lCount/sFactor<T>();
return res;
}
template<typename T>
inline int convertIndex(const int& ind)
{
return ind*sFactor<T>();
}
template<typename T>
inline auto send(span<T> data, int dest, int tag, Comm comm)
{
return MPI_Send(
data.data(),
sFactor<T>()*data().size(),
Type<T>(),
dest,
tag,
comm);
}
template<typename T>
inline auto recv(span<T> data, int source, int tag, Comm comm, Status *status)
{
return MPI_Recv(
data.data(),
sFactor<T>()*data.size(),
Type<T>(),
source,
tag,
comm,
status);
}
template<typename T>
inline auto scan(T sData, T& rData, Comm comm, Operation op = SumOp)
{
return MPI_Scan(&sData, &rData, sFactor<T>()*1, Type<T>(), op , comm );
}
// gathering one scalar data to root processor
template<typename T>
inline auto gather(T sendData, span<T>& recvData, int root, Comm comm)
{
return MPI_Gather(
&sendData,
sFactor<T>()*1,
Type<T>(),
recvData.data(),
sFactor<T>()*1,
Type<T>(),
root,
comm);
}
template<typename T>
inline auto allGather(T sendData, span<T>& recvData, Comm comm)
{
return MPI_Allgather(
&sendData,
sFactor<T>()*1,
Type<T>(),
recvData.data(),
sFactor<T>()*1,
Type<T>(),
comm);
}
template<typename T>
inline auto scatter(span<T> sendData, T& recvData, int root, Comm comm)
{
return MPI_Scatter(
sendData.data(),
sFactor<T>()*1,
Type<T>(),
&recvData,
sFactor<T>()*1,
Type<T>(),
root,
comm);
}
template<typename T>
inline auto Bcast(T& sendData, int root, Comm comm)
{
return MPI_Bcast(
&sendData, sFactor<T>()*1, Type<T>(), root, comm);
}
template<typename T>
bool typeCreateIndexedBlock(
span<int32> index,
DataType &newType)
{
auto res = MPI_Type_create_indexed_block(
index.size(),
sFactor<T>(),
index.data(),
Type<T>(),
&newType);
if(res == Success)
{
TypeCommit(&newType);
}
else
{
return false;
}
return true;
}
template<typename T>
inline auto Gatherv
(
span<T> sendData,
span<T>& recvData,
span<int> recvCounts,
span<int> displs,
int root,
Comm comm)
{
return MPI_Gatherv(
sendData.data(),
sendData.size()*sFactor<T>(),
Type<T>(),
recvData.data(),
recvCounts.data(),
displs.data(),
Type<T>(),
root,
comm
);
}
inline auto Wait(Request* request, Status* status)
{
return MPI_Wait(request, status);
}
inline auto typeFree(DataType& type)
{
return MPI_Type_free(&type);
}
}
#endif //pFlow_Build_MPI
#endif //__mpiCommunication_H__

View File

@ -0,0 +1,75 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __mpiTypes_H__
#define __mpiTypes_H__
#ifdef pFlow_Build_MPI
#include <mpi.h>
namespace pFlow::MPI
{
// types
using Comm = MPI_Comm;
using Group = MPI_Group;
using Status = MPI_Status;
using Offset = MPI_Offset;
using Request = MPI_Request;
using Operation = MPI_Op;
using Information = MPI_Info;
using DataType = MPI_Datatype;
inline Comm CommWorld = MPI_COMM_WORLD;
// all nulls
inline auto ProcNull = MPI_PROC_NULL;
inline auto InfoNull = MPI_INFO_NULL;
inline auto RequestNull = MPI_REQUEST_NULL;
inline auto StatusIgnore = MPI_STATUS_IGNORE;
inline auto StatusesIgnore = MPI_STATUSES_IGNORE;
inline auto FileNull = MPI_FILE_NULL;
inline Comm CommNull = MPI_COMM_NULL;
inline auto TypeNull = MPI_DATATYPE_NULL;
// errors
inline const auto Success = MPI_SUCCESS;
inline const auto ErrOp = MPI_ERR_OP;
inline const auto SumOp = MPI_SUM;
inline const size_t MaxNoProcessors = 2048;
}
#else
namespace pFlow::MPI
{
}
#endif // pFlow_Build_MPI
#endif //__mpiTypes_H__

View File

@ -0,0 +1,113 @@
#include "partitioning.hpp"
#include "error.hpp"
#include "streams.hpp"
void pFlow::partitioning::freeZoltan()
{
if(validPointers_)
{
Zoltan::LB_Free_Part(&importGlobalGids_, &importLocalGids_,
&importProcs_, &importToPart_);
Zoltan::LB_Free_Part(&exportGlobalGids_, &exportLocalGids_,
&exportProcs_, &exportToPart_);
validPointers_ = false;
}
zoltan_.release();
}
pFlow::partitioning::partitioning
(
const dictionary& dict,
const box& globalBox
)
:
globalBox_(globalBox)
{
if(!zoltanInitialized__)
{
auto rc = Zoltan_Initialize
(
processors::argc(),
processors::argv(),
&version_
);
if (rc != ZOLTAN_OK)
{
fatalErrorInFunction<<"Cannot initialize zoltan"<<endl;
fatalExit;
}
zoltanInitialized__ = true;
}
// Creates Zoltan object
zoltan_ = std::make_unique<Zoltan>(pFlowProcessors().localCommunicator());
zoltan_->Set_Param("DEBUG_LEVEL", "0");
zoltan_->Set_Param("LB_METHOD", "RCB");
zoltan_->Set_Param("NUM_GID_ENTRIES", "1");
zoltan_->Set_Param("NUM_LID_ENTRIES", "1");
zoltan_->Set_Param("OBJ_WEIGHT_DIM", "0");
zoltan_->Set_Param("RETURN_LISTS", "ALL");
}
bool pFlow::partitioning::partition(span<realx3> points, pFlagTypeHost flags)
{
pointCollection pointCollctn{points, flags};
return partition(pointCollctn);
}
int GetObjectSize
(
void *data,
int num_gid_entries,
int num_lid_entries,
ZOLTAN_ID_PTR global_id,
ZOLTAN_ID_PTR local_id,
int *ierr
)
{
*ierr = ZOLTAN_OK;
pFlow::uint32 s = *(static_cast<pFlow::uint32*>(data));
return static_cast<int>(s);
}
void PackObject
(
void *data,
int num_gid_entries,
int num_lid_entries,
ZOLTAN_ID_PTR global_id,
ZOLTAN_ID_PTR local_id,
int dest,
int size,
char *buf,
int *ierr
)
{
}
bool pFlow::partitioning::migrateData(span<char> src, span<char> dst, uint32 elementSize)
{
dataCollection data{src, dst, elementSize};
zoltan_->Set_Obj_Size_Fn(GetObjectSize, &elementSize);
return false;
}
pFlow::partitioning::~partitioning()
{
freeZoltan();
}
void pFlow::partitioning::printBox()const
{
pOutput<< "localBox:" << localBox_<<endl;
}

View File

@ -0,0 +1,168 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __partitioning_hpp__
#define __partitioning_hpp__
#include "zoltan_cpp.h"
#include "pFlowProcessors.hpp"
#include "virtualConstructor.hpp"
#include "box.hpp"
#include "span.hpp"
#include "pointFlag.hpp"
#include "procVector.hpp"
namespace pFlow
{
struct pointCollection
{
span<realx3> points_;
pFlagTypeHost pFlag_;
uint32 numActivePoints()const
{
return pFlag_.numActive();
}
};
struct dataCollection
{
span<char> srcData_;
span<char> dstData_;
uint32 elementSize_;
};
class partitioning
{
protected:
float version_ = 0.0;
std::unique_ptr<Zoltan> zoltan_ = nullptr;
bool validPointers_ = false;
box globalBox_;
box localBox_;
int32 changes_, numImport_, numExport_;
id_t *importGlobalGids_, *importLocalGids_, *exportGlobalGids_, *exportLocalGids_;
int32 *importProcs_, *importToPart_, *exportProcs_, *exportToPart_;
uint32 numBeforePartition_ = 0 ;
static inline bool zoltanInitialized__ = false;
void freeZoltan();
virtual
bool partition(pointCollection& points) = 0;
public:
partitioning(
const dictionary& dict,
const box& globalBox);
virtual
~partitioning();
create_vCtor(
partitioning,
dictionary,
(
const dictionary& dict,
const box& globalBox
),
(dict, globalBox));
bool partition(
span<realx3> points,
pFlagTypeHost flags);
bool migrateData(span<char> src, span<char> dst, uint32 elementSize);
inline
auto localBox()const
{
return localBox_;
}
inline
const auto& globalBox()const
{
return globalBox_;
}
inline
bool partitionsChanged()const
{
return changes_ == 1;
}
uint32 numberImportThisProc()const
{
return numImport_;
}
uint32 numberExportThisProc()const
{
return numExport_;
}
virtual
span<int32> exportList(int procNo)const = 0;
virtual
pFlow::MPI::procVector<span<int32>> allExportLists()const=0;
void printBox()const;
};
}
#endif //__partitioning_hpp__
/*static
int getNumberOfPoints(void *data, int32 *ierr);
static
void getPointList(
void *data,
int32 sizeGID,
int32 sizeLID,
id_t* globalID,
id_t* localID,
int32 wgt_dim,
float *obj_wgts,
int32 *ierr);*/

View File

@ -0,0 +1,30 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#include "procCommunication.hpp"
pFlow::MPI::procCommunication::procCommunication
(
const localProcessors& proc
)
:
processors_(proc)
{}

View File

@ -0,0 +1,178 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __procCommunication_hpp__
#define __procCommunication_hpp__
#include "procVector.hpp"
#include "localProcessors.hpp"
#include "mpiCommunication.hpp"
namespace pFlow::MPI
{
class procCommunication
{
protected:
const localProcessors& processors_;
public:
procCommunication(const localProcessors& proc);
~procCommunication()=default;
/// @brief Tell if this processor is master processor in the local
/// communicator
/// @return true if this processor is master
inline
const auto& processors()const
{
return processors_;
}
inline
bool localMaster()const
{
return processors_.localMaster();;
}
inline
auto localSize()const
{
return processors_.localSize();
}
inline
auto localRank()const
{
return processors_.localRank();
}
inline
auto localCommunicator()const
{
return processors_.localCommunicator();
}
/// @brief return the master number in the local communicator
auto localMasterNo()const
{
return processors_.localMasterNo();
}
/// Send a single val to all processors including itself (local communicator)
template<typename T>
std::pair<T,bool> distributeMasterToAll(const T& val)
{
T retVal = val;
auto res = CheckMPI(
Bcast(retVal, localMasterNo(),localCommunicator() ),
false);
return {retVal, res};
}
/// @brief Send a single value to all processor including master (in local communicator)
/// @param val value to be sent
/// @param recvVal recieved value
/// @return true if successful and false if fail
template<typename T>
bool distributeMasterToAll(const T& val, T& recvVal)
{
recvVal = val;
return CheckMPI(
Bcast(recvVal, localMasterNo(), localCommunicator()),
false);
}
/// @brief values in the vector (size is equal to number of
// processors in local communicator) to each processor
template<typename T>
std::pair<T,bool> distributeMasterToAll(const procVector<T>& vals)
{
T val;
auto vec = vals.getSpan();
auto res = CheckMPI(
scatter(vec, val, localMasterNo(), localCommunicator()),
false);
return {val, res};
}
/// @brief Each processor in the local communicator calls this funtion with a value
/// and the values are distributed among all processors
template<typename T>
std::pair<procVector<T>, bool> collectAllToAll(const T& val)
{
procVector<T> allVec;
auto vec = allVec.getSpan();
auto res = CheckMPI(
allGather(val, vec, localCommunicator()),
false);
return {allVec, res};
}
/// @brief Each processor in the local communicator calls this funtion with a value
/// and the values are distributed among all processors
template<typename T>
bool collectAllToAll(const T& val, procVector<T>& allVec)
{
auto vec = allVec.getSpan();
return CheckMPI(
allGather(val, vec, localCommunicator()),
false);
}
/// @brief Each processor in the local communicator calls this function with a value
/// and all values are collected in the master processor
template<typename T>
std::pair<procVector<T>,bool> collectAllToMaster(const T& val)
{
// only on master processor
procVector<T> masterVec(processors_, true);
auto masterSpan = masterVec.getSpan();
auto res = CheckMPI(
gather(val,masterSpan, localMasterNo(), localCommunicator()),
false);
return {masterVec, res};
}
template<typename T>
bool collectAllToMaster(const T& val, procVector<T>& masterVec)
{
// only on master processor
auto [vec, res] = collectAllToMaster(val);
masterVec = vec;
return res;
}
}; //procCommunication
} // pFlow::MPI
#endif //__procCommunication_hpp__

View File

@ -0,0 +1,199 @@
#ifndef __procVector_hpp__
#define __procVector_hpp__
// from PhasicFlow
#include "localProcessors.hpp"
#include "span.hpp"
#include "streams.hpp"
#include "IOPattern.hpp"
#include "mpiTypes.hpp"
namespace pFlow::MPI
{
template<typename T>
class procVector
:
public std::vector<T>
{
public:
using ProcVectorType = procVector<T>;
using VectorType = std::vector<T>;
protected:
int rank_ = 0;
bool isMaster_ = false;
using VectorType::reserve;
using VectorType::resize;
using VectorType::assign;
using VectorType::clear;
using VectorType::erase;
public:
procVector(
const localProcessors& procs,
bool onlyMaster = false)
:
rank_(procs.localRank()),
isMaster_(procs.localMaster())
{
if( onlyMaster && !isMaster_ ) return;
this->reserve(procs.localSize());
this->resize(procs.localSize());
}
procVector(
const T& val,
const localProcessors& procs,
bool onlyMaster = false)
:
procVector(procs, onlyMaster)
{
std::fill(this->begin(), this->end(), val);
}
procVector(const T& val, const procVector& src)
{
this->reserve(src.size());
this->resize(src.size());
std::fill(this->begin(), this->end(), val);
}
procVector(const localProcessors& procs, const VectorType& src)
:
procVector(procs)
{
if(src.size()!= this->size())
{
fatalErrorInFunction<<
"Size of std::vector and procVector does not match in construction"<<endl;
fatalExit;
}
this->assign(src.begin(), src.end());
}
procVector(const procVector&) = default;
procVector(procVector&&) = default;
procVector& operator=(const procVector&) = default;
procVector& operator=(procVector&&) = default;
procVector& operator=(const VectorType& src)
{
if(src.size() != this->size())
{
fatalErrorInFunction<<
"Size of std::vector and procVector does not match in copy assignment"<<endl;
fatalExit;
}
static_cast<VectorType&>(*this).operator=(src);
return *this;
}
procVector& operator=(VectorType&& src)
{
if(src.size() != this->size())
{
fatalErrorInFunction<<
"Size of std::vector and procVector does not match in move assignment"
<<endl;
fatalExit;
}
static_cast<VectorType&>(*this).operator=(std::move(src));
return *this;
}
procVector(const localProcessors& procs, VectorType&& src)
:
VectorType(std::move(src))
{
if(this->size()!= static_cast<size_t>(procs.localSize()))
{
fatalErrorInFunction<<
"Size of std::vector and procVector does not match in move"<<endl;
fatalExit;
}
isMaster_ = procs.localMaster();
rank_ = procs.localRank();
}
~procVector()=default;
inline
auto& thisValue()
{
return VectorType::operator[](rank_);
}
inline
const auto& thisValue()const
{
return VectorType::operator[](rank_);
}
inline
auto size()const
{
return VectorType::size();
}
inline
auto rank()const
{
return rank_;
}
inline
auto getSpan()
{
return span<T>(this->data(), this->size());
}
inline
auto getSpan()const
{
return span<T>(const_cast<T*>(this->data()), this->size());
}
bool write(
iOstream& os,
const IOPattern& iop ) const
{
return writeStdVector(os, *this, iop);
}
};
template<typename T>
inline iOstream& operator << (iOstream& os, const procVector<T>& ovec )
{
if( !ovec.write(os, IOPattern::AllProcessorsDifferent) )
{
ioErrorInFile(os.name(), os.lineNumber());
fatalExit;
}
return os;
}
}
#endif

View File

@ -0,0 +1,29 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
template<class T, class MemorySpace>
pFlow::processorBoundaryField<T, MemorySpace>::processorBoundaryField
(
const boundaryBase& boundary,
InternalFieldType& internal
)
:
BoundaryFieldType(boundary, internal)
{}

View File

@ -0,0 +1,80 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __processorBoundaryField_hpp__
#define __processorBoundaryField_hpp__
#include "boundaryField.hpp"
namespace pFlow
{
template< class T, class MemorySpace = void>
class processorBoundaryField
:
public boundaryField<T, MemorySpace>
{
public:
using processorBoundaryFieldType = processorBoundaryField<T, MemorySpace>;
using BoundaryFieldType = boundaryField<T, MemorySpace>;
using InternalFieldType = typename BoundaryFieldType::InternalFieldType;
using memory_space = typename BoundaryFieldType::memory_space;
using execution_space = typename BoundaryFieldType::execution_space;
public:
TypeInfo("boundaryField<processor>");
processorBoundaryField(
const boundaryBase& boundary,
InternalFieldType& internal);
add_vCtor
(
BoundaryFieldType,
processorBoundaryFieldType,
boundaryBase
);
bool hearChanges
(
const message& msg,
const anyList& varList
) override
{
notImplementedFunction;
return false;
}
};
}
#include "processorBoundaryField.cpp"
#endif //__processorBoundaryField_hpp__

View File

@ -0,0 +1,10 @@
//#include "Field.hpp"
#include "createBoundaryFields.hpp"
#include "processorBoundaryField.hpp"
createBoundary(pFlow::int8, pFlow::HostSpace, processor);
createBoundary(pFlow::real, pFlow::HostSpace, processor);

View File

@ -0,0 +1,319 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#include "zoltan_cpp.h"
#include "error.hpp"
#include "processors.hpp"
#include "rcb1DPartitioning.hpp"
bool pFlow::rcb1DPartitioning::partition(pointCollection &points)
{
zoltan_->Set_Param("RCB_OUTPUT_LEVEL", "0");
zoltan_->Set_Param("RCB_RECTILINEAR_BLOCKS", "1");
zoltan_->Set_Param("KEEP_CUTS", "1");
zoltan_->Set_Param("REDUCE_DIMENSIONS", "1");
zoltan_->Set_Param("RCB_RECOMPUTE_BOX", "1");
zoltan_->Set_Param("AVERAGE_CUTS", "0");
zoltan_->Set_Param("MIGRATE_ONLY_PROC_CHANGES", "0");
zoltan_->Set_Num_Obj_Fn(rcb1DPartitioning::getNumberOfPoints, &points);
zoltan_->Set_Obj_List_Fn(rcb1DPartitioning::getPointList, &points);
zoltan_->Set_Num_Geom_Fn(rcb1DPartitioning::getNumGeometry, &points);
switch (direction_)
{
case Direction::X:
zoltan_->Set_Geom_Multi_Fn(rcb1DPartitioning::getGeometryList_x, &points);
break;
case Direction::Y:
zoltan_->Set_Geom_Multi_Fn(rcb1DPartitioning::getGeometryList_y, &points);
break;
case Direction::Z:
zoltan_->Set_Geom_Multi_Fn(rcb1DPartitioning::getGeometryList_z, &points);
break;
}
int numGidEntries_, numLidEntries_;
int rc = zoltan_->LB_Partition(changes_, numGidEntries_, numLidEntries_,
numImport_, importGlobalGids_, importLocalGids_, importProcs_, importToPart_,
numExport_, exportGlobalGids_, exportLocalGids_, exportProcs_, exportToPart_);
if (rc != ZOLTAN_OK)
{
fatalErrorInFunction<< "Zoltan faild to perform partitioning."<<endl;
return false;
}
for(auto& ids:exportIds_)
{
ids.clear();
}
std::vector<int32> thisProc(points.numActivePoints(),0);
for(auto i =0; i<numExport_; i++)
{
exportIds_[exportProcs_[i]].push_back(exportGlobalGids_[i]);
thisProc[exportGlobalGids_[i]] = exportGlobalGids_[i];
}
for(int i=0; i<thisProc.size(); i++)
{
if(thisProc[i]==0)
exportIds_[0].push_back(i);
}
validPointers_ = true;
int nDim;
double x0, y0, z0, x1, y1,z1;
zoltan_->RCB_Box
(
processors::globalRank(),
nDim,
x0, y0, z0,
x1, y1, z1
);
localBox_ = globalBox_;
switch (direction_)
{
case Direction::X :
localBox_.minPoint().x_ = x0;
localBox_.maxPoint().x_ = x1;
break;
case Direction::Y :
localBox_.minPoint().y_ = x0;
localBox_.maxPoint().y_ = x1;
break;
case Direction::Z :
localBox_.minPoint().z_ = x0;
localBox_.maxPoint().z_ = x1;
break;
}
localBox_.minPoint() = max(localBox_.minPoint(), globalBox_.minPoint());
localBox_.maxPoint() = min(localBox_.maxPoint(), globalBox_.maxPoint());
return true;
}
pFlow::rcb1DPartitioning::rcb1DPartitioning
(
const dictionary &dict,
const box &globalBox
)
:
partitioning(dict, globalBox),
exportIds_(pFlowProcessors())
{
word directionName = dict.getVal<word>("direction");
if(toUpper(directionName)== "X")
{
direction_ = Direction::X;
dirVector_ ={1.0, 0.0, 0.0};
}
else if( toUpper(directionName) == "Y")
{
direction_ = Direction::Y;
dirVector_ ={0.0, 1.0, 0.0};
}
else if( toUpper(directionName) == "Z")
{
direction_ = Direction::Z;
dirVector_ ={0.0, 0.0, 1.0};
}
else
{
fatalErrorInFunction<< "wrong direction in dictionary "<<
dict.globalName()<<". Directions should be one of x, y, or z."<<endl;
fatalError;
}
}
int pFlow::rcb1DPartitioning::getNumGeometry(void *data, int *ierr)
{
*ierr = ZOLTAN_OK;
return 1;
}
int pFlow::rcb1DPartitioning::getNumberOfPoints(void *data, int *ierr)
{
auto *obj = static_cast<pointCollection *>(data);
*ierr = ZOLTAN_OK;
return obj->numActivePoints();
}
void pFlow::rcb1DPartitioning::getPointList
(
void *data,
int sizeGID,
int sizeLID,
ZOLTAN_ID_PTR globalID,
ZOLTAN_ID_PTR localID,
int wgt_dim,
float *obj_wgts,
int *ierr
)
{
auto* obj = static_cast<pointCollection *>(data);
*ierr = ZOLTAN_OK;
auto activeRange = obj->pFlag_.activeRange();
uint32 n = 0;
for (auto i=activeRange.start(); i<activeRange.end(); i++)
{
if( obj->pFlag_.isActive(i) )
{
globalID[n] = i;
localID[n] = n;
n++;
}
}
}
void pFlow::rcb1DPartitioning::getGeometryList_x
(
void *data,
int sizeGID,
int sizeLID,
int num_obj,
ZOLTAN_ID_PTR globalID,
ZOLTAN_ID_PTR localID,
int num_dim,
double *geom_vec,
int *ierr
)
{
auto* obj = static_cast<pointCollection *>(data);
if ( (sizeGID != 1) || (sizeLID != 1) || (num_dim != 1))
{
*ierr = ZOLTAN_FATAL;
return;
}
auto activeRange = obj->pFlag_.activeRange();
uint32 n = 0;
for (auto i=activeRange.start(); i<activeRange.end(); i++)
{
if( obj->pFlag_.isActive(i) )
{
geom_vec[n] = obj->points_[i].x_;
n++;
}
}
*ierr = ZOLTAN_OK;
return;
}
void pFlow::rcb1DPartitioning::getGeometryList_y
(
void *data,
int sizeGID,
int sizeLID,
int num_obj,
ZOLTAN_ID_PTR globalID,
ZOLTAN_ID_PTR localID,
int num_dim,
double *geom_vec,
int *ierr
)
{
auto* obj = static_cast<pointCollection *>(data);
if ( (sizeGID != 1) || (sizeLID != 1) || (num_dim != 1))
{
*ierr = ZOLTAN_FATAL;
return;
}
auto activeRange = obj->pFlag_.activeRange();
uint32 n = 0;
for (auto i=activeRange.start(); i<activeRange.end(); i++)
{
if( obj->pFlag_.isActive(i) )
{
geom_vec[n] = obj->points_[i].y_;
n++;
}
}
*ierr = ZOLTAN_OK;
return;
}
void pFlow::rcb1DPartitioning::getGeometryList_z
(
void *data,
int sizeGID,
int sizeLID,
int num_obj,
ZOLTAN_ID_PTR globalID,
ZOLTAN_ID_PTR localID,
int num_dim,
double *geom_vec,
int *ierr
)
{
auto* obj = static_cast<pointCollection *>(data);
if ( (sizeGID != 1) || (sizeLID != 1) || (num_dim != 1))
{
*ierr = ZOLTAN_FATAL;
return;
}
auto activeRange = obj->pFlag_.activeRange();
uint32 n = 0;
for (auto i=activeRange.start(); i<activeRange.end(); i++)
{
if( obj->pFlag_.isActive(i) )
{
geom_vec[n] = obj->points_[i].z_;
n++;
}
}
*ierr = ZOLTAN_OK;
return;
}

View File

@ -0,0 +1,241 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __rcb1DPartitioning_hpp__
#define __rcb1DPartitioning_hpp__
#include "partitioning.hpp"
#include "procVector.hpp"
namespace pFlow
{
class rcb1DPartitioning
:
public partitioning
{
public:
enum Direction
{
X = 0,
Y = 1,
Z = 2
};
protected:
/// Direction of partitioning
Direction direction_ = Direction::X;
realx3 dirVector_ = {1.0, 0.0, 0.0};
word directionName_ = "x";
MPI::procVector<std::vector<int>> exportIds_;
bool partition(pointCollection& points) override;
public:
rcb1DPartitioning(
const dictionary& dict,
const box& globalBox);
virtual
~rcb1DPartitioning()=default;
span<int32> exportList(int procNo)const override
{
return span<int32>(
const_cast<int32*>(exportIds_[procNo].data()),
exportIds_[procNo].size());
}
pFlow::MPI::procVector<span<int32>> allExportLists()const override
{
pFlow::MPI::procVector<span<int32>> allList(pFlowProcessors());
for(int i=0; i<allList.size(); i++)
allList[i]= exportList(i);
return allList;
}
static
int getNumGeometry(void *data, int *ierr);
static
int getNumberOfPoints(void *data, int *ierr);
static
void getPointList
(
void *data,
int sizeGID,
int sizeLID,
ZOLTAN_ID_PTR globalID,
ZOLTAN_ID_PTR localID,
int wgt_dim,
float *obj_wgts,
int *ierr
);
static
void getGeometryList_x(
void *data,
int sizeGID,
int sizeLID,
int num_obj,
ZOLTAN_ID_PTR globalID,
ZOLTAN_ID_PTR localID,
int num_dim,
double *geom_vec,
int *ierr);
static
void getGeometryList_y(
void *data,
int sizeGID,
int sizeLID,
int num_obj,
ZOLTAN_ID_PTR globalID,
ZOLTAN_ID_PTR localID,
int num_dim,
double *geom_vec,
int *ierr);
static
void getGeometryList_z(
void *data,
int sizeGID,
int sizeLID,
int num_obj,
ZOLTAN_ID_PTR globalID,
ZOLTAN_ID_PTR localID,
int num_dim,
double *geom_vec,
int *ierr);
};
/*class RCB_y_partitioning
:
public partitioning
{
public:
RCB_y_partitioning(int argc, char *argv[], pointCollection& collection, const box& gBox)
:
partitioning(argc, argv, collection, gBox)
{}
virtual
~RCB_y_partitioning()=default;
bool partition() override;
static
void getGeometryList(
void *data,
int sizeGID,
int sizeLID,
int num_obj,
ZOLTAN_ID_PTR globalID,
ZOLTAN_ID_PTR localID,
int num_dim,
double *geom_vec,
int *ierr)
{
auto* obj = static_cast<pointCollection *>(data);
if ( (sizeGID != 1) || (sizeLID != 1) || (num_dim != 1))
{
*ierr = ZOLTAN_FATAL;
return;
}
*ierr = ZOLTAN_OK;
for (int i=0; i < num_obj ; i++)
{
geom_vec[i] = obj->pointList()[i].y_;
}
return;
}
static
int getNumGeometry(void *data, int *ierr)
{
*ierr = ZOLTAN_OK;
return 1;
}
};
class RCB_x_partitioning
:
public partitioning
{
public:
RCB_x_partitioning(int argc, char *argv[], pointCollection& collection, const box& gBox)
:
partitioning(argc, argv, collection, gBox)
{}
virtual
~RCB_x_partitioning()=default;
bool partition() override;
static
void getGeometryList(
void *data,
int sizeGID,
int sizeLID,
int num_obj,
ZOLTAN_ID_PTR globalID,
ZOLTAN_ID_PTR localID,
int num_dim,
double *geom_vec,
int *ierr);
static
int getNumGeometry(void *data, int *ierr);
};*/
} // pFlow
#endif //__rcb1DPartitioning_hpp__

View File

@ -0,0 +1,158 @@
template<typename T>
pFlow::MPI::scatteredMasterDistribute<T>::scatteredMasterDistribute
(
const localProcessors& procs
)
:
procCommunication(procs),
indexedMap_(TypeNull, procs, true)
{
}
template<typename T>
bool pFlow::MPI::scatteredMasterDistribute<T>::setDataMaps
(
procVector<span<uint32>>& maps
)
{
if(this->localMaster())
{
if(maps.size() != this->localSize() )
{
fatalErrorInFunction<<"size mismatch";
return false;
}
std::vector<int32> index;
freeIndexedMap();
for(auto proc = 0; proc< maps.size(); proc++)
{
auto m = maps[proc];
index.resize(m.size());
for(auto i=0; i<index.size(); i++ )
{
index[i] = m[i];
}
DataType dt;
if(! typeCreateIndexedBlock<T>( makeSpan(index), dt))
{
fatalErrorInFunction;
return false;
}
else
{
indexedMap_[proc] = dt;
}
}
}
return true;
}
template<typename T>
bool pFlow::MPI::scatteredMasterDistribute<T>::setDataMaps
(
procVector<span<int32>>& maps
)
{
if(this->localMaster())
{
if(maps.size() != this->localSize() )
{
fatalErrorInFunction<<"size mismatch";
return false;
}
freeIndexedMap();
for(auto proc = 0; proc< maps.size(); proc++)
{
DataType dt;
if( !typeCreateIndexedBlock<T>(maps[proc], dt) )
{
fatalErrorInFunction;
return false;
}
else
{
indexedMap_[proc] = dt;
}
}
}
return true;
}
template<typename T>
void pFlow::MPI::scatteredMasterDistribute<T>::freeIndexedMap()
{
for(auto i=0; i<indexedMap_.size(); i++)
{
if(indexedMap_[i]!= TypeNull)
{
TypeFree(&indexedMap_[i]);
indexedMap_[i] = TypeNull;
}
}
}
template<typename T>
bool pFlow::MPI::scatteredMasterDistribute<T>::distribute
(
span<T>& sendBuff,
span<T>& recvb
)
{
procVector<Request> requests(processors(), true);
procVector<Status> statuses(processors(), true);
if(this->localMaster())
{
bool res = true;
for(int32 i = indexedMap_.size()-1; i>=0; i--)
{
res = res&&CheckMPI(
MPI_Issend(
sendBuff.data(),
1,
indexedMap_[i],
i,
0,
localCommunicator(),
&requests[i]),
false);
}
if(!res)return false;
}
Status stat;
bool sucss = CheckMPI(
MPI_Recv(
recvb.data(),
recvb.size()*sFactor<T>(),
Type<T>(),
0,
0,
localCommunicator(),
&stat),
false);
if(this->localMaster())
{
CheckMPI(
MPI_Waitall(requests.size(), requests.data(), statuses.data()),
false
);
}
return sucss;
}

View File

@ -0,0 +1,69 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __scatteredMasterDistribute_hpp__
#define __scatteredMasterDistribute_hpp__
#include "procCommunication.hpp"
#include "mpiCommunication.hpp"
#include "procVector.hpp"
#include "streams.hpp"
namespace pFlow::MPI
{
template<typename T>
class scatteredMasterDistribute
:
public procCommunication
{
protected:
procVector<DataType> indexedMap_;
void freeIndexedMap();
public:
scatteredMasterDistribute(const localProcessors& procs);
~scatteredMasterDistribute()
{
freeIndexedMap();
}
scatteredMasterDistribute(const scatteredMasterDistribute&)=delete;
scatteredMasterDistribute& operator=(const scatteredMasterDistribute&) = delete;
bool setDataMaps(procVector<span<uint32>>& maps);
bool setDataMaps(procVector<span<int32>>& maps);
bool distribute(span<T>& sendBuff, span<T>& recvb);
};
} //pFlow::MPI
#include "scatteredMasterDistribute.cpp"
#endif //__scatteredMasterDistribute_hpp__

View File

@ -0,0 +1,166 @@
#include "scatteredMasterDistributeChar.hpp"
pFlow::MPI::scatteredMasterDistribute<char>::scatteredMasterDistribute
(
size_t sizeOfElement,
const localProcessors& procs
)
:
procCommunication(procs),
indexedMap_(TypeNull, procs, true),
sizeOfElement_(sizeOfElement)
{}
bool pFlow::MPI::scatteredMasterDistribute<char>::setDataMaps
(
procVector<span<uint32>>& maps
)
{
if(this->localMaster())
{
if(maps.size() != this->localSize() )
{
fatalErrorInFunction<<"size mismatch";
return false;
}
freeIndexedMap();
std::vector<MPI_Aint> index;
for(auto proc = 0; proc< maps.size(); proc++)
{
auto m = maps[proc];
index.resize(m.size());
for(auto i=0; i<index.size(); i++ )
{
index[i] = m[i]*sizeOfElement_;
}
DataType dt;
MPI_Type_create_hindexed_block(
m.size(),
sizeOfElement_,
index.data(),
MPI_BYTE,
&dt);
MPI_Type_commit(&dt);
indexedMap_[proc] = dt;
}
}
return true;
}
bool pFlow::MPI::scatteredMasterDistribute<char>::setDataMaps
(
procVector<span<int32>>& maps
)
{
if(this->localMaster())
{
if(maps.size() != this->localSize() )
{
fatalErrorInFunction<<"size mismatch";
return false;
}
std::vector<MPI_Aint> index;
freeIndexedMap();
for(auto proc = 0; proc< maps.size(); proc++)
{
auto m = maps[proc];
index.resize(m.size());
for(auto i=0; i<index.size(); i++ )
{
index[i] = m[i]*sizeOfElement_;
}
DataType dt;
MPI_Type_create_hindexed_block(
index.size(),
sizeOfElement_,
index.data(),
MPI_CHAR,
&dt);
MPI_Type_commit(&dt);
indexedMap_[proc] = dt;
}
}
return true;
}
void pFlow::MPI::scatteredMasterDistribute<char>::freeIndexedMap()
{
for(auto i=0; i<indexedMap_.size(); i++)
{
if(indexedMap_[i]!= TypeNull)
{
TypeFree(&indexedMap_[i]);
indexedMap_[i] = TypeNull;
}
}
}
bool pFlow::MPI::scatteredMasterDistribute<char>::distribute
(
span<char>& sendBuff,
span<char>& recvb
)
{
procVector<Request> requests(processors(), true);
procVector<Status> statuses(processors(), true);
if(this->localMaster())
{
bool res = true;
for(int32 i = indexedMap_.size()-1; i>=0; i--)
{
res = res&&CheckMPI(
MPI_Issend(
sendBuff.data(),
1,
indexedMap_[i],
i,
0,
localCommunicator(),
&requests[i]),
false);
}
if(!res)return false;
}
Status stat;
bool sucss = CheckMPI(
MPI_Recv(
recvb.data(),
recvb.size(),
MPI_CHAR,
0,
0,
localCommunicator(),
&stat),
true);
if(this->localMaster())
{
CheckMPI(
MPI_Waitall(requests.size(), requests.data(), statuses.data()),
false
);
}
return sucss;
}

View File

@ -0,0 +1,67 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __scatteredMasterDistributeChar_hpp__
#define __scatteredMasterDistributeChar_hpp__
#include "scatteredMasterDistribute.hpp"
namespace pFlow::MPI
{
template<>
class scatteredMasterDistribute<char>
:
public procCommunication
{
protected:
procVector<DataType> indexedMap_;
size_t sizeOfElement_;
void freeIndexedMap();
public:
scatteredMasterDistribute(
size_t sizeOfElement,
const localProcessors& procs);
~scatteredMasterDistribute()
{
freeIndexedMap();
}
scatteredMasterDistribute(const scatteredMasterDistribute&)=delete;
scatteredMasterDistribute& operator=(const scatteredMasterDistribute&) = delete;
bool setDataMaps(procVector<span<uint32>>& maps);
bool setDataMaps(procVector<span<int32>>& maps);
bool distribute(span<char>& sendBuff, span<char>& recvb);
};
} // pFlow::MPI
#endif //__scatteredMasterDistributeChar_hpp__