mirror of
https://github.com/PhasicFlow/phasicFlow.git
synced 2025-07-28 03:27:05 +00:00
Merge branch 'MPI' of github.com:hamidrezanorouzi/phasicFlowMPI into MPIdev
This commit is contained in:
@ -7,6 +7,8 @@ contactSearch/methods/cellBased/NBS/NBS.cpp
|
|||||||
contactSearch/methods/cellBased/NBS/cellsWallLevel0.cpp
|
contactSearch/methods/cellBased/NBS/cellsWallLevel0.cpp
|
||||||
|
|
||||||
contactSearch/boundaries/boundaryContactSearch/boundaryContactSearch.cpp
|
contactSearch/boundaries/boundaryContactSearch/boundaryContactSearch.cpp
|
||||||
|
contactSearch/boundaries/twoPartContactSearch/twoPartContactSearchKernels.cpp
|
||||||
|
contactSearch/boundaries/twoPartContactSearch/twoPartContactSearch.cpp
|
||||||
contactSearch/boundaries/periodicBoundaryContactSearch/ppwBndryContactSearchKernels.cpp
|
contactSearch/boundaries/periodicBoundaryContactSearch/ppwBndryContactSearchKernels.cpp
|
||||||
contactSearch/boundaries/periodicBoundaryContactSearch/ppwBndryContactSearch.cpp
|
contactSearch/boundaries/periodicBoundaryContactSearch/ppwBndryContactSearch.cpp
|
||||||
contactSearch/boundaries/periodicBoundaryContactSearch/wallBoundaryContactSearch.cpp
|
contactSearch/boundaries/periodicBoundaryContactSearch/wallBoundaryContactSearch.cpp
|
||||||
@ -22,6 +24,13 @@ sphereInteraction/sphereInteractionsNonLinearModels.cpp
|
|||||||
sphereInteraction/sphereInteractionsNonLinearModModels.cpp
|
sphereInteraction/sphereInteractionsNonLinearModModels.cpp
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if(pFlow_Build_MPI)
|
||||||
|
list(APPEND SourceFiles
|
||||||
|
contactSearch/boundaries/processorBoundaryContactSearch/processorBoundaryContactSearch.cpp
|
||||||
|
sphereInteraction/boundaries/processorBoundarySphereInteraction/processorBoundarySphereInteractions.cpp
|
||||||
|
)
|
||||||
|
endif()
|
||||||
|
|
||||||
set(link_libs Kokkos::kokkos phasicFlow Property Particles Geometry)
|
set(link_libs Kokkos::kokkos phasicFlow Property Particles Geometry)
|
||||||
|
|
||||||
pFlow_add_library_install(Interaction SourceFiles link_libs)
|
pFlow_add_library_install(Interaction SourceFiles link_libs)
|
||||||
|
@ -130,9 +130,10 @@ public:
|
|||||||
csPairContainerType& pwPairs,
|
csPairContainerType& pwPairs,
|
||||||
bool force = false) override
|
bool force = false) override
|
||||||
{
|
{
|
||||||
|
|
||||||
ppTimer().start();
|
ppTimer().start();
|
||||||
|
|
||||||
|
Particles().boundingSphere().updateBoundaries(DataDirection::SlaveToMaster);
|
||||||
|
|
||||||
const auto& position = Particles().pointPosition().deviceViewAll();
|
const auto& position = Particles().pointPosition().deviceViewAll();
|
||||||
const auto& flags = Particles().dynPointStruct().activePointsMaskDevice();
|
const auto& flags = Particles().dynPointStruct().activePointsMaskDevice();
|
||||||
const auto& diam = Particles().boundingSphere().deviceViewAll();
|
const auto& diam = Particles().boundingSphere().deviceViewAll();
|
||||||
|
@ -36,15 +36,16 @@ public:
|
|||||||
using NextType = deviceViewType1D<uint32>;
|
using NextType = deviceViewType1D<uint32>;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
cells searchCells_;
|
|
||||||
|
|
||||||
HeadType head_{"periodic::head", 1, 1, 1};
|
cells searchCells_;
|
||||||
|
|
||||||
NextType next_{"periodic::next", 1};
|
HeadType head_{ "periodic::head", 1, 1, 1 };
|
||||||
|
|
||||||
real sizeRatio_ = 1.0;
|
NextType next_{ "periodic::next", 1 };
|
||||||
|
|
||||||
uint32 nextCapacity_ = 0;
|
real sizeRatio_ = 1.0;
|
||||||
|
|
||||||
|
uint32 nextCapacity_ = 0;
|
||||||
|
|
||||||
void checkAllocateNext(uint32 n);
|
void checkAllocateNext(uint32 n);
|
||||||
|
|
||||||
|
@ -0,0 +1,108 @@
|
|||||||
|
/*------------------------------- phasicFlow ---------------------------------
|
||||||
|
O C enter of
|
||||||
|
O O E ngineering and
|
||||||
|
O O M ultiscale modeling of
|
||||||
|
OOOOOOO F luid flow
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Copyright (C): www.cemf.ir
|
||||||
|
email: hamid.r.norouzi AT gmail.com
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Licence:
|
||||||
|
This file is part of phasicFlow code. It is a free software for simulating
|
||||||
|
granular and multiphase flows. You can redistribute it and/or modify it under
|
||||||
|
the terms of GNU General Public License v3 or any other later versions.
|
||||||
|
|
||||||
|
phasicFlow is distributed to help others in their research in the field of
|
||||||
|
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
|
||||||
|
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
|
||||||
|
-----------------------------------------------------------------------------*/
|
||||||
|
|
||||||
|
#include "processorBoundaryContactSearch.hpp"
|
||||||
|
#include "contactSearch.hpp"
|
||||||
|
#include "particles.hpp"
|
||||||
|
//#include "pointStructure.hpp"
|
||||||
|
//#include "geometry.hpp"
|
||||||
|
|
||||||
|
|
||||||
|
void pFlow::processorBoundaryContactSearch::setSearchBox()
|
||||||
|
{
|
||||||
|
|
||||||
|
auto l = boundary().neighborLength();
|
||||||
|
auto n = boundary().boundaryPlane().normal();
|
||||||
|
auto pp1 = boundary().boundaryPlane().parallelPlane(l);
|
||||||
|
auto pp2 = boundary().boundaryPlane().parallelPlane(-l);
|
||||||
|
|
||||||
|
realx3 minP1 = min(min(min(pp1.p1(), pp1.p2()), pp1.p3()), pp1.p4());
|
||||||
|
realx3 maxP1 = max(max(max(pp1.p1(), pp1.p2()), pp1.p3()), pp1.p4());
|
||||||
|
|
||||||
|
realx3 minP2 = min(min(min(pp2.p1(), pp2.p2()), pp2.p3()), pp2.p4());
|
||||||
|
realx3 maxP2 = max(max(max(pp2.p1(), pp2.p2()), pp2.p3()), pp2.p4());
|
||||||
|
|
||||||
|
auto minP = min(minP1, minP2) - l*(realx3(1.0)-abs(n));
|
||||||
|
auto maxP = max(maxP1, maxP2) + l*(realx3(1.0)-abs(n));
|
||||||
|
|
||||||
|
searchBox_={minP, maxP};
|
||||||
|
}
|
||||||
|
|
||||||
|
pFlow::processorBoundaryContactSearch::processorBoundaryContactSearch(
|
||||||
|
const dictionary &dict,
|
||||||
|
const boundaryBase &boundary,
|
||||||
|
const contactSearch &cSearch)
|
||||||
|
:
|
||||||
|
boundaryContactSearch(dict, boundary, cSearch),
|
||||||
|
diameter_(cSearch.Particles().boundingSphere()),
|
||||||
|
masterSearch_(this->isBoundaryMaster())
|
||||||
|
{
|
||||||
|
|
||||||
|
if(masterSearch_)
|
||||||
|
{
|
||||||
|
setSearchBox();
|
||||||
|
|
||||||
|
real minD;
|
||||||
|
real maxD;
|
||||||
|
cSearch.Particles().boundingSphereMinMax(minD, maxD);
|
||||||
|
|
||||||
|
ppContactSearch_ = makeUnique<twoPartContactSearch>(
|
||||||
|
searchBox_,
|
||||||
|
maxD);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
searchBox_={{0,0,0},{0,0,0}};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool pFlow::processorBoundaryContactSearch::broadSearch
|
||||||
|
(
|
||||||
|
uint32 iter,
|
||||||
|
real t,
|
||||||
|
real dt,
|
||||||
|
csPairContainerType &ppPairs,
|
||||||
|
csPairContainerType &pwPairs,
|
||||||
|
bool force
|
||||||
|
)
|
||||||
|
{
|
||||||
|
if(masterSearch_)
|
||||||
|
{
|
||||||
|
/*const auto thisPoints = boundary().thisPoints();
|
||||||
|
const auto& neighborProcPoints = boundary().neighborProcPoints();
|
||||||
|
const auto& bDiams = diameter_.BoundaryField(thisBoundaryIndex());
|
||||||
|
const auto thisDiams = bDiams.thisField();
|
||||||
|
const auto& neighborProcDiams = bDiams.neighborProcField();
|
||||||
|
|
||||||
|
ppContactSearch_().broadSearchPP(
|
||||||
|
ppPairs,
|
||||||
|
thisPoints,
|
||||||
|
thisDiams,
|
||||||
|
neighborProcPoints,
|
||||||
|
neighborProcDiams);
|
||||||
|
|
||||||
|
pOutput<<"ppPairs size in boundary"<< ppPairs.size()<<endl; */
|
||||||
|
return true;
|
||||||
|
|
||||||
|
}else
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,74 @@
|
|||||||
|
/*------------------------------- phasicFlow ---------------------------------
|
||||||
|
O C enter of
|
||||||
|
O O E ngineering and
|
||||||
|
O O M ultiscale modeling of
|
||||||
|
OOOOOOO F luid flow
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Copyright (C): www.cemf.ir
|
||||||
|
email: hamid.r.norouzi AT gmail.com
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Licence:
|
||||||
|
This file is part of phasicFlow code. It is a free software for simulating
|
||||||
|
granular and multiphase flows. You can redistribute it and/or modify it under
|
||||||
|
the terms of GNU General Public License v3 or any other later versions.
|
||||||
|
|
||||||
|
phasicFlow is distributed to help others in their research in the field of
|
||||||
|
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
|
||||||
|
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
|
||||||
|
-----------------------------------------------------------------------------*/
|
||||||
|
#ifndef __processorBoundaryContactSearch_hpp__
|
||||||
|
#define __processorBoundaryContactSearch_hpp__
|
||||||
|
|
||||||
|
#include "boundaryContactSearch.hpp"
|
||||||
|
#include "pointFields.hpp"
|
||||||
|
#include "twoPartContactSearch.hpp"
|
||||||
|
|
||||||
|
namespace pFlow
|
||||||
|
{
|
||||||
|
|
||||||
|
class processorBoundaryContactSearch : public boundaryContactSearch
|
||||||
|
{
|
||||||
|
private:
|
||||||
|
|
||||||
|
box searchBox_;
|
||||||
|
|
||||||
|
uniquePtr<twoPartContactSearch> ppContactSearch_ = nullptr;
|
||||||
|
|
||||||
|
const realPointField_D& diameter_;
|
||||||
|
|
||||||
|
bool masterSearch_;
|
||||||
|
|
||||||
|
void setSearchBox();
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
TypeInfo("boundaryContactSearch<MPI,processor>")
|
||||||
|
|
||||||
|
processorBoundaryContactSearch(
|
||||||
|
const dictionary& dict,
|
||||||
|
const boundaryBase& boundary,
|
||||||
|
const contactSearch& cSearch
|
||||||
|
);
|
||||||
|
|
||||||
|
~processorBoundaryContactSearch() override = default;
|
||||||
|
|
||||||
|
add_vCtor(
|
||||||
|
boundaryContactSearch,
|
||||||
|
processorBoundaryContactSearch,
|
||||||
|
boundaryBase
|
||||||
|
);
|
||||||
|
|
||||||
|
bool broadSearch(
|
||||||
|
uint32 iter,
|
||||||
|
real t,
|
||||||
|
real dt,
|
||||||
|
csPairContainerType& ppPairs,
|
||||||
|
csPairContainerType& pwPairs,
|
||||||
|
bool force = false
|
||||||
|
) override;
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif //__processorBoundaryContactSearch_hpp__
|
@ -0,0 +1,160 @@
|
|||||||
|
|
||||||
|
#include "twoPartContactSearch.hpp"
|
||||||
|
#include "twoPartContactSearchKernels.hpp"
|
||||||
|
#include "phasicFlowKokkos.hpp"
|
||||||
|
#include "streams.hpp"
|
||||||
|
|
||||||
|
void pFlow::twoPartContactSearch::checkAllocateNext(uint32 n)
|
||||||
|
{
|
||||||
|
if( nextCapacity_ < n)
|
||||||
|
{
|
||||||
|
nextCapacity_ = n;
|
||||||
|
reallocNoInit(next_, n);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void pFlow::twoPartContactSearch::nullifyHead()
|
||||||
|
{
|
||||||
|
fill(head_, static_cast<uint32>(-1));
|
||||||
|
}
|
||||||
|
|
||||||
|
void pFlow::twoPartContactSearch::nullifyNext(uint32 n)
|
||||||
|
{
|
||||||
|
fill(next_, 0u, n, static_cast<uint32>(-1));
|
||||||
|
}
|
||||||
|
|
||||||
|
void pFlow::twoPartContactSearch::buildList(
|
||||||
|
const deviceScatteredFieldAccess<realx3> &points)
|
||||||
|
{
|
||||||
|
if(points.empty())return;
|
||||||
|
uint32 n = points.size();
|
||||||
|
checkAllocateNext(n);
|
||||||
|
nullifyNext(n);
|
||||||
|
nullifyHead();
|
||||||
|
|
||||||
|
pFlow::twoPartContactSearchKernels::buildNextHead(
|
||||||
|
points,
|
||||||
|
searchCells_,
|
||||||
|
head_,
|
||||||
|
next_
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
pFlow::twoPartContactSearch::twoPartContactSearch
|
||||||
|
(
|
||||||
|
const box &domain,
|
||||||
|
real cellSize,
|
||||||
|
real sizeRatio
|
||||||
|
)
|
||||||
|
:
|
||||||
|
searchCells_(domain, cellSize),
|
||||||
|
head_("periodic:head",searchCells_.nx(), searchCells_.ny(), searchCells_.nz()),
|
||||||
|
sizeRatio_(sizeRatio)
|
||||||
|
{
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
bool pFlow::twoPartContactSearch::broadSearchPP
|
||||||
|
(
|
||||||
|
csPairContainerType &ppPairs,
|
||||||
|
const deviceScatteredFieldAccess<realx3> &points1,
|
||||||
|
const deviceScatteredFieldAccess<real>& diams1,
|
||||||
|
const deviceScatteredFieldAccess<realx3> &points2,
|
||||||
|
const deviceScatteredFieldAccess<real>& diams2,
|
||||||
|
const realx3& transferVec
|
||||||
|
)
|
||||||
|
{
|
||||||
|
|
||||||
|
buildList(points1);
|
||||||
|
|
||||||
|
uint32 nNotInserted = 1;
|
||||||
|
|
||||||
|
// loop until the container size fits the numebr of contact pairs
|
||||||
|
while (nNotInserted > 0)
|
||||||
|
{
|
||||||
|
|
||||||
|
nNotInserted = pFlow::twoPartContactSearchKernels::broadSearchPP
|
||||||
|
(
|
||||||
|
ppPairs,
|
||||||
|
points1,
|
||||||
|
diams1,
|
||||||
|
points2,
|
||||||
|
diams2,
|
||||||
|
transferVec,
|
||||||
|
head_,
|
||||||
|
next_,
|
||||||
|
searchCells_,
|
||||||
|
sizeRatio_
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
if(nNotInserted)
|
||||||
|
{
|
||||||
|
// - resize the container
|
||||||
|
// note that getFull now shows the number of failed insertions.
|
||||||
|
uint32 len = max(nNotInserted,100u) ;
|
||||||
|
|
||||||
|
auto oldCap = ppPairs.capacity();
|
||||||
|
|
||||||
|
ppPairs.increaseCapacityBy(len);
|
||||||
|
|
||||||
|
INFORMATION<< "Particle-particle contact pair container capacity increased from "<<
|
||||||
|
oldCap << " to "<<ppPairs.capacity()<<" in peiodicBoundaryContactSearch."<<END_INFO;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool pFlow::twoPartContactSearch::broadSearchPP
|
||||||
|
(
|
||||||
|
csPairContainerType &ppPairs,
|
||||||
|
const deviceScatteredFieldAccess<realx3> &points1,
|
||||||
|
const deviceScatteredFieldAccess<real> &diams1,
|
||||||
|
const realx3Vector_D& points2,
|
||||||
|
const realVector_D& diams2
|
||||||
|
)
|
||||||
|
{
|
||||||
|
buildList(points1);
|
||||||
|
|
||||||
|
uint32 nNotInserted = 1;
|
||||||
|
|
||||||
|
// loop until the container size fits the numebr of contact pairs
|
||||||
|
while (nNotInserted > 0)
|
||||||
|
{
|
||||||
|
|
||||||
|
nNotInserted = pFlow::twoPartContactSearchKernels::broadSearchPP
|
||||||
|
(
|
||||||
|
ppPairs,
|
||||||
|
points1,
|
||||||
|
diams1,
|
||||||
|
points2,
|
||||||
|
diams2,
|
||||||
|
head_,
|
||||||
|
next_,
|
||||||
|
searchCells_,
|
||||||
|
sizeRatio_
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
if(nNotInserted)
|
||||||
|
{
|
||||||
|
// - resize the container
|
||||||
|
// note that getFull now shows the number of failed insertions.
|
||||||
|
uint32 len = max(nNotInserted,100u) ;
|
||||||
|
|
||||||
|
auto oldCap = ppPairs.capacity();
|
||||||
|
|
||||||
|
ppPairs.increaseCapacityBy(len);
|
||||||
|
|
||||||
|
INFORMATION<< "Particle-particle contact pair container capacity increased from "<<
|
||||||
|
oldCap << " to "<<ppPairs.capacity()<<" in peiodicBoundaryContactSearch."<<END_INFO;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
@ -0,0 +1,103 @@
|
|||||||
|
/*------------------------------- phasicFlow ---------------------------------
|
||||||
|
O C enter of
|
||||||
|
O O E ngineering and
|
||||||
|
O O M ultiscale modeling of
|
||||||
|
OOOOOOO F luid flow
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Copyright (C): www.cemf.ir
|
||||||
|
email: hamid.r.norouzi AT gmail.com
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Licence:
|
||||||
|
This file is part of phasicFlow code. It is a free software for simulating
|
||||||
|
granular and multiphase flows. You can redistribute it and/or modify it under
|
||||||
|
the terms of GNU General Public License v3 or any other later versions.
|
||||||
|
|
||||||
|
phasicFlow is distributed to help others in their research in the field of
|
||||||
|
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
|
||||||
|
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
|
||||||
|
-----------------------------------------------------------------------------*/
|
||||||
|
|
||||||
|
#ifndef __twoPartContactSearch_hpp__
|
||||||
|
#define __twoPartContactSearch_hpp__
|
||||||
|
|
||||||
|
#include "contactSearchGlobals.hpp"
|
||||||
|
#include "scatteredFieldAccess.hpp"
|
||||||
|
#include "cells.hpp"
|
||||||
|
#include "VectorSingles.hpp"
|
||||||
|
|
||||||
|
namespace pFlow
|
||||||
|
{
|
||||||
|
|
||||||
|
class twoPartContactSearch
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
using HeadType = deviceViewType3D<uint32>;
|
||||||
|
|
||||||
|
using NextType = deviceViewType1D<uint32>;
|
||||||
|
|
||||||
|
private:
|
||||||
|
|
||||||
|
cells searchCells_;
|
||||||
|
|
||||||
|
HeadType head_{ "periodic::head", 1, 1, 1 };
|
||||||
|
|
||||||
|
NextType next_{ "periodic::next", 1 };
|
||||||
|
|
||||||
|
real sizeRatio_ = 1.0;
|
||||||
|
|
||||||
|
uint32 nextCapacity_ = 0;
|
||||||
|
|
||||||
|
void checkAllocateNext(uint32 n);
|
||||||
|
|
||||||
|
void nullifyHead();
|
||||||
|
|
||||||
|
void nullifyNext(uint32 n);
|
||||||
|
|
||||||
|
void buildList(
|
||||||
|
const deviceScatteredFieldAccess<realx3> &points);
|
||||||
|
|
||||||
|
public:
|
||||||
|
twoPartContactSearch(
|
||||||
|
const box &domain,
|
||||||
|
real cellSize,
|
||||||
|
real sizeRatio = 1.0);
|
||||||
|
|
||||||
|
/// @brief Perform a broad-search for spheres in two adjacent regions.
|
||||||
|
/// Region 1 is considered as the master (primary) region and region 2 as slave
|
||||||
|
/// @param ppPairs pairs container which holds i and j
|
||||||
|
/// @param points1 point positions in region 1
|
||||||
|
/// @param diams1 diameter of spheres in region 1
|
||||||
|
/// @param points2 point positions in region 2
|
||||||
|
/// @param diams2 diameter of spheres in region 2
|
||||||
|
/// @param transferVec a vector to transfer points from region 2 to region 1
|
||||||
|
/// @return true if it is successful
|
||||||
|
bool broadSearchPP(
|
||||||
|
csPairContainerType &ppPairs,
|
||||||
|
const deviceScatteredFieldAccess<realx3> &points1,
|
||||||
|
const deviceScatteredFieldAccess<real> &diams1,
|
||||||
|
const deviceScatteredFieldAccess<realx3> &points2,
|
||||||
|
const deviceScatteredFieldAccess<real> &diams2,
|
||||||
|
const realx3 &transferVec);
|
||||||
|
|
||||||
|
bool broadSearchPP(
|
||||||
|
csPairContainerType &ppPairs,
|
||||||
|
const deviceScatteredFieldAccess<realx3> &points1,
|
||||||
|
const deviceScatteredFieldAccess<real> &diams1,
|
||||||
|
const realx3Vector_D& points2,
|
||||||
|
const realVector_D& diams2);
|
||||||
|
|
||||||
|
const auto& searchCells()const
|
||||||
|
{
|
||||||
|
return searchCells_;
|
||||||
|
}
|
||||||
|
|
||||||
|
real sizeRatio()const
|
||||||
|
{
|
||||||
|
return sizeRatio_;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif //__twoPartContactSearch_hpp__
|
@ -0,0 +1,188 @@
|
|||||||
|
#include "twoPartContactSearchKernels.hpp"
|
||||||
|
|
||||||
|
INLINE_FUNCTION_HD
|
||||||
|
bool
|
||||||
|
sphereSphereCheckB(
|
||||||
|
const pFlow::realx3& p1,
|
||||||
|
const pFlow::realx3 p2,
|
||||||
|
pFlow::real d1,
|
||||||
|
pFlow::real d2
|
||||||
|
)
|
||||||
|
{
|
||||||
|
return pFlow::length(p2 - p1) < 0.5 * (d2 + d1);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
pFlow::twoPartContactSearchKernels::buildNextHead(
|
||||||
|
const deviceScatteredFieldAccess<realx3>& points,
|
||||||
|
const cells& searchCells,
|
||||||
|
deviceViewType3D<uint32>& head,
|
||||||
|
deviceViewType1D<uint32>& next
|
||||||
|
)
|
||||||
|
{
|
||||||
|
if (points.empty())
|
||||||
|
return;
|
||||||
|
|
||||||
|
uint32 n = points.size();
|
||||||
|
|
||||||
|
Kokkos::parallel_for(
|
||||||
|
"pFlow::ppwBndryContactSearch::buildList",
|
||||||
|
deviceRPolicyStatic(0, n),
|
||||||
|
LAMBDA_HD(uint32 i) {
|
||||||
|
int32x3 ind;
|
||||||
|
if (searchCells.pointIndexInDomain(points[i], ind))
|
||||||
|
{
|
||||||
|
// discards points out of searchCell
|
||||||
|
uint32 old =
|
||||||
|
Kokkos::atomic_exchange(&head(ind.x(), ind.y(), ind.z()), i);
|
||||||
|
next[i] = old;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
);
|
||||||
|
Kokkos::fence();
|
||||||
|
}
|
||||||
|
|
||||||
|
pFlow::uint32
|
||||||
|
pFlow::twoPartContactSearchKernels::broadSearchPP(
|
||||||
|
csPairContainerType& ppPairs,
|
||||||
|
const deviceScatteredFieldAccess<realx3>& points,
|
||||||
|
const deviceScatteredFieldAccess<real>& diams,
|
||||||
|
const deviceScatteredFieldAccess<realx3>& mirrorPoints,
|
||||||
|
const deviceScatteredFieldAccess<real>& mirrorDiams,
|
||||||
|
const realx3& transferVec,
|
||||||
|
const deviceViewType3D<uint32>& head,
|
||||||
|
const deviceViewType1D<uint32>& next,
|
||||||
|
const cells& searchCells,
|
||||||
|
const real sizeRatio
|
||||||
|
)
|
||||||
|
{
|
||||||
|
if (points.empty())
|
||||||
|
return 0;
|
||||||
|
if (mirrorPoints.empty())
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
auto nMirror = mirrorPoints.size();
|
||||||
|
|
||||||
|
uint32 getFull = 0;
|
||||||
|
|
||||||
|
Kokkos::parallel_reduce(
|
||||||
|
"pFlow::twoPartContactSearchKernels::broadSearchPP",
|
||||||
|
deviceRPolicyStatic(0, nMirror),
|
||||||
|
LAMBDA_HD(const uint32 mrrI, uint32& getFullUpdate) {
|
||||||
|
realx3 p_m = mirrorPoints(mrrI) + transferVec;
|
||||||
|
|
||||||
|
int32x3 ind_m;
|
||||||
|
if (!searchCells.pointIndexInDomain(p_m, ind_m))
|
||||||
|
return;
|
||||||
|
|
||||||
|
real d_m = sizeRatio * mirrorDiams[mrrI];
|
||||||
|
|
||||||
|
for (int ii = -1; ii < 2; ii++)
|
||||||
|
{
|
||||||
|
for (int jj = -1; jj < 2; jj++)
|
||||||
|
{
|
||||||
|
for (int kk = -1; kk < 2; kk++)
|
||||||
|
{
|
||||||
|
auto ind = ind_m + int32x3{ ii, jj, kk };
|
||||||
|
|
||||||
|
if (!searchCells.inCellRange(ind))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
uint32 thisI = head(ind.x(), ind.y(), ind.z());
|
||||||
|
while (thisI != -1)
|
||||||
|
{
|
||||||
|
auto d_n = sizeRatio * diams[thisI];
|
||||||
|
|
||||||
|
// first item is for this boundary and second itme,
|
||||||
|
// for mirror
|
||||||
|
if(sphereSphereCheckB(p_m, points[thisI], d_m, d_n)&&
|
||||||
|
ppPairs.insert(thisI,mrrI) == -1)
|
||||||
|
{
|
||||||
|
getFullUpdate++;
|
||||||
|
}
|
||||||
|
|
||||||
|
thisI = next(thisI);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
getFull
|
||||||
|
);
|
||||||
|
|
||||||
|
return getFull;
|
||||||
|
}
|
||||||
|
|
||||||
|
pFlow::uint32
|
||||||
|
pFlow::twoPartContactSearchKernels::broadSearchPP(
|
||||||
|
csPairContainerType& ppPairs,
|
||||||
|
const deviceScatteredFieldAccess<realx3>& points1,
|
||||||
|
const deviceScatteredFieldAccess<real>& diams1,
|
||||||
|
const realx3Vector_D& points2,
|
||||||
|
const realVector_D& diams2,
|
||||||
|
const deviceViewType3D<uint32>& head,
|
||||||
|
const deviceViewType1D<uint32>& next,
|
||||||
|
const cells& searchCells,
|
||||||
|
real sizeRatio
|
||||||
|
)
|
||||||
|
{
|
||||||
|
if (points1.empty())
|
||||||
|
return 0;
|
||||||
|
if (points2.empty())
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
auto nP2 = points2.size();
|
||||||
|
auto points2View = points2.deviceView();
|
||||||
|
auto diams2View = diams2.deviceView();
|
||||||
|
|
||||||
|
uint32 getFull = 0;
|
||||||
|
|
||||||
|
Kokkos::parallel_reduce(
|
||||||
|
"pFlow::twoPartContactSearchKernels::broadSearchPP",
|
||||||
|
deviceRPolicyStatic(0, nP2),
|
||||||
|
LAMBDA_HD(const uint32 i2, uint32& getFullUpdate) {
|
||||||
|
realx3 p_m = points2View(i2);
|
||||||
|
|
||||||
|
int32x3 ind_m;
|
||||||
|
if (!searchCells.pointIndexInDomain(p_m, ind_m))
|
||||||
|
return;
|
||||||
|
|
||||||
|
real d_m = sizeRatio * diams2View[i2];
|
||||||
|
|
||||||
|
for (int ii = -1; ii < 2; ii++)
|
||||||
|
{
|
||||||
|
for (int jj = -1; jj < 2; jj++)
|
||||||
|
{
|
||||||
|
for (int kk = -1; kk < 2; kk++)
|
||||||
|
{
|
||||||
|
auto ind = ind_m + int32x3{ ii, jj, kk };
|
||||||
|
|
||||||
|
if (!searchCells.inCellRange(ind))
|
||||||
|
{
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32 i1 = head(ind.x(), ind.y(), ind.z());
|
||||||
|
while (i1 != -1)
|
||||||
|
{
|
||||||
|
auto d_n = sizeRatio * diams1[i1];
|
||||||
|
|
||||||
|
// first item is for this boundary and second itme,
|
||||||
|
// for mirror
|
||||||
|
if(sphereSphereCheckB(p_m, points1[i1], d_m, d_n)&&
|
||||||
|
ppPairs.insert(i1,i2) == -1)
|
||||||
|
{
|
||||||
|
getFullUpdate++;
|
||||||
|
}
|
||||||
|
|
||||||
|
i1 = next(i1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
getFull
|
||||||
|
);
|
||||||
|
|
||||||
|
return getFull;
|
||||||
|
}
|
@ -0,0 +1,49 @@
|
|||||||
|
#ifndef __twoPartContactSearchKernels_hpp__
|
||||||
|
#define __twoPartContactSearchKernels_hpp__
|
||||||
|
|
||||||
|
#include "contactSearchGlobals.hpp"
|
||||||
|
#include "cells.hpp"
|
||||||
|
#include "contactSearchFunctions.hpp"
|
||||||
|
#include "scatteredFieldAccess.hpp"
|
||||||
|
#include "VectorSingles.hpp"
|
||||||
|
|
||||||
|
namespace pFlow::twoPartContactSearchKernels
|
||||||
|
{
|
||||||
|
|
||||||
|
void buildNextHead(
|
||||||
|
const deviceScatteredFieldAccess<realx3> &points,
|
||||||
|
const cells &searchCells,
|
||||||
|
deviceViewType3D<uint32> &head,
|
||||||
|
deviceViewType1D<uint32> &next );
|
||||||
|
|
||||||
|
|
||||||
|
uint32 broadSearchPP
|
||||||
|
(
|
||||||
|
csPairContainerType &ppPairs,
|
||||||
|
const deviceScatteredFieldAccess<realx3> &points,
|
||||||
|
const deviceScatteredFieldAccess<real> &diams,
|
||||||
|
const deviceScatteredFieldAccess<realx3> &mirrorPoints,
|
||||||
|
const deviceScatteredFieldAccess<real> &mirrorDiams,
|
||||||
|
const realx3 &transferVec,
|
||||||
|
const deviceViewType3D<uint32> &head,
|
||||||
|
const deviceViewType1D<uint32> &next,
|
||||||
|
const cells &searchCells,
|
||||||
|
real sizeRatio
|
||||||
|
);
|
||||||
|
|
||||||
|
uint32
|
||||||
|
broadSearchPP(
|
||||||
|
csPairContainerType& ppPairs,
|
||||||
|
const deviceScatteredFieldAccess<realx3>& points1,
|
||||||
|
const deviceScatteredFieldAccess<real>& diams1,
|
||||||
|
const realx3Vector_D& points2,
|
||||||
|
const realVector_D& diams2,
|
||||||
|
const deviceViewType3D<uint32>& head,
|
||||||
|
const deviceViewType1D<uint32>& next,
|
||||||
|
const cells& searchCells,
|
||||||
|
real sizeRatio
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#endif //__twoPartContactSearchKernels_hpp__
|
@ -143,7 +143,9 @@ public:
|
|||||||
) override
|
) override
|
||||||
{
|
{
|
||||||
|
|
||||||
notImplementedFunction;
|
pOutput<<"Function (hearChanges in boundarySphereInteractions)is not implmented Message "<<
|
||||||
|
msg <<endl<<" name "<< this->name() <<" type "<< this->type()<<endl;;
|
||||||
|
//notImplementedFunction;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3,6 +3,7 @@ namespace pFlow::periodicBoundarySIKernels
|
|||||||
{
|
{
|
||||||
|
|
||||||
template<typename ContactListType, typename ContactForceModel>
|
template<typename ContactListType, typename ContactForceModel>
|
||||||
|
inline
|
||||||
void sphereSphereInteraction
|
void sphereSphereInteraction
|
||||||
(
|
(
|
||||||
real dt,
|
real dt,
|
||||||
@ -46,14 +47,6 @@ void sphereSphereInteraction
|
|||||||
|
|
||||||
if( ovrlp >0.0 )
|
if( ovrlp >0.0 )
|
||||||
{
|
{
|
||||||
|
|
||||||
/*auto Vi = thisVel[i];
|
|
||||||
auto Vj = mirrorVel[j];
|
|
||||||
auto wi = thisRVel[i];
|
|
||||||
auto wj = mirrorRVel[j];
|
|
||||||
auto Nij = (xj-xi)/dist;
|
|
||||||
auto Vr = Vi - Vj + cross((Ri*wi+Rj*wj), Nij);*/
|
|
||||||
|
|
||||||
auto Nij = (xj-xi)/dist;
|
auto Nij = (xj-xi)/dist;
|
||||||
auto wi = rVel[ind_i];
|
auto wi = rVel[ind_i];
|
||||||
auto wj = rVel[ind_j];
|
auto wj = rVel[ind_j];
|
||||||
|
@ -0,0 +1,131 @@
|
|||||||
|
|
||||||
|
#ifndef __processorBoundarySIKernels_hpp__
|
||||||
|
#define __processorBoundarySIKernels_hpp__
|
||||||
|
|
||||||
|
namespace pFlow::MPI::processorBoundarySIKernels
|
||||||
|
{
|
||||||
|
|
||||||
|
template<typename ContactListType, typename ContactForceModel>
|
||||||
|
inline
|
||||||
|
void sphereSphereInteraction
|
||||||
|
(
|
||||||
|
real dt,
|
||||||
|
const ContactListType& cntctList,
|
||||||
|
const ContactForceModel& forceModel,
|
||||||
|
const deviceScatteredFieldAccess<realx3>& thisPoints,
|
||||||
|
const deviceViewType1D<real>& thisDiam,
|
||||||
|
const deviceViewType1D<uint32>& thisPropId,
|
||||||
|
const deviceViewType1D<realx3>& thisVel,
|
||||||
|
const deviceViewType1D<realx3>& thisRVel,
|
||||||
|
const deviceViewType1D<realx3>& thisCForce,
|
||||||
|
const deviceViewType1D<realx3>& thisCTorque,
|
||||||
|
const deviceViewType1D<realx3>& neighborPoints,
|
||||||
|
const deviceViewType1D<real>& neighborDiam,
|
||||||
|
const deviceViewType1D<uint32>& neighborPropId,
|
||||||
|
const deviceViewType1D<realx3>& neighborVel,
|
||||||
|
const deviceViewType1D<realx3>& neighborRVel,
|
||||||
|
const deviceViewType1D<realx3>& neighborCForce,
|
||||||
|
const deviceViewType1D<realx3>& neighborCTorque
|
||||||
|
)
|
||||||
|
{
|
||||||
|
|
||||||
|
using ValueType = typename ContactListType::ValueType;
|
||||||
|
uint32 ss = cntctList.size();
|
||||||
|
if(ss == 0u)return;
|
||||||
|
|
||||||
|
uint32 lastItem = cntctList.loopCount();
|
||||||
|
|
||||||
|
Kokkos::parallel_for(
|
||||||
|
"pFlow::MPI::processorBoundarySIKernels::sphereSphereInteraction",
|
||||||
|
deviceRPolicyDynamic(0,lastItem),
|
||||||
|
LAMBDA_HD(uint32 n)
|
||||||
|
{
|
||||||
|
|
||||||
|
if(!cntctList.isValid(n))return;
|
||||||
|
|
||||||
|
auto [i,j] = cntctList.getPair(n);
|
||||||
|
uint32 ind_i = thisPoints.index(i);
|
||||||
|
uint32 ind_j = j;
|
||||||
|
|
||||||
|
real Ri = 0.5*thisDiam[ind_i];
|
||||||
|
real Rj = 0.5*neighborDiam[ind_j];
|
||||||
|
realx3 xi = thisPoints.field()[ind_i];
|
||||||
|
realx3 xj = neighborPoints[ind_j];
|
||||||
|
|
||||||
|
real dist = length(xj-xi);
|
||||||
|
real ovrlp = (Ri+Rj) - dist;
|
||||||
|
|
||||||
|
if( ovrlp >0.0 )
|
||||||
|
{
|
||||||
|
auto Nij = (xj-xi)/max(dist,smallValue);
|
||||||
|
auto wi = thisRVel[ind_i];
|
||||||
|
auto wj = neighborRVel[ind_j];
|
||||||
|
auto Vr = thisVel[ind_i] - neighborVel[ind_j] + cross((Ri*wi+Rj*wj), Nij);
|
||||||
|
|
||||||
|
auto history = cntctList.getValue(n);
|
||||||
|
|
||||||
|
int32 propId_i = thisPropId[ind_i];
|
||||||
|
int32 propId_j = neighborPropId[ind_j];
|
||||||
|
|
||||||
|
realx3 FCn, FCt, Mri, Mrj, Mij, Mji;
|
||||||
|
|
||||||
|
// calculates contact force
|
||||||
|
forceModel.contactForce(
|
||||||
|
dt, i, j,
|
||||||
|
propId_i, propId_j,
|
||||||
|
Ri, Rj,
|
||||||
|
ovrlp,
|
||||||
|
Vr, Nij,
|
||||||
|
history,
|
||||||
|
FCn, FCt);
|
||||||
|
|
||||||
|
forceModel.rollingFriction(
|
||||||
|
dt, i, j,
|
||||||
|
propId_i, propId_j,
|
||||||
|
Ri, Rj,
|
||||||
|
wi, wj,
|
||||||
|
Nij,
|
||||||
|
FCn,
|
||||||
|
Mri, Mrj);
|
||||||
|
|
||||||
|
auto M = cross(Nij,FCt);
|
||||||
|
Mij = Ri*M+Mri;
|
||||||
|
Mji = Rj*M+Mrj;
|
||||||
|
|
||||||
|
auto FC = FCn + FCt;
|
||||||
|
|
||||||
|
|
||||||
|
Kokkos::atomic_add(&thisCForce[ind_i].x_,FC.x_);
|
||||||
|
Kokkos::atomic_add(&thisCForce[ind_i].y_,FC.y_);
|
||||||
|
Kokkos::atomic_add(&thisCForce[ind_i].z_,FC.z_);
|
||||||
|
|
||||||
|
Kokkos::atomic_add(&neighborCForce[ind_j].x_,-FC.x_);
|
||||||
|
Kokkos::atomic_add(&neighborCForce[ind_j].y_,-FC.y_);
|
||||||
|
Kokkos::atomic_add(&neighborCForce[ind_j].z_,-FC.z_);
|
||||||
|
|
||||||
|
Kokkos::atomic_add(&thisCTorque[ind_i].x_, Mij.x_);
|
||||||
|
Kokkos::atomic_add(&thisCTorque[ind_i].y_, Mij.y_);
|
||||||
|
Kokkos::atomic_add(&thisCTorque[ind_i].z_, Mij.z_);
|
||||||
|
|
||||||
|
Kokkos::atomic_add(&neighborCTorque[ind_j].x_, Mji.x_);
|
||||||
|
Kokkos::atomic_add(&neighborCTorque[ind_j].y_, Mji.y_);
|
||||||
|
Kokkos::atomic_add(&neighborCTorque[ind_j].z_, Mji.z_);
|
||||||
|
|
||||||
|
|
||||||
|
cntctList.setValue(n,history);
|
||||||
|
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
cntctList.setValue(n, ValueType());
|
||||||
|
}
|
||||||
|
|
||||||
|
});
|
||||||
|
Kokkos::fence();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
} //pFlow::MPI::processorBoundarySIKernels
|
||||||
|
|
||||||
|
|
||||||
|
#endif //__processorBoundarySIKernels_hpp__
|
@ -0,0 +1,73 @@
|
|||||||
|
/*------------------------------- phasicFlow ---------------------------------
|
||||||
|
O C enter of
|
||||||
|
O O E ngineering and
|
||||||
|
O O M ultiscale modeling of
|
||||||
|
OOOOOOO F luid flow
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Copyright (C): www.cemf.ir
|
||||||
|
email: hamid.r.norouzi AT gmail.com
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Licence:
|
||||||
|
This file is part of phasicFlow code. It is a free software for simulating
|
||||||
|
granular and multiphase flows. You can redistribute it and/or modify it under
|
||||||
|
the terms of GNU General Public License v3 or any other later versions.
|
||||||
|
|
||||||
|
phasicFlow is distributed to help others in their research in the field of
|
||||||
|
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
|
||||||
|
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
|
||||||
|
-----------------------------------------------------------------------------*/
|
||||||
|
|
||||||
|
#include "processorBoundarySIKernels.hpp"
|
||||||
|
|
||||||
|
template <typename cFM, typename gMM>
|
||||||
|
pFlow::MPI::processorBoundarySphereInteraction<cFM, gMM>::processorBoundarySphereInteraction(
|
||||||
|
const boundaryBase &boundary,
|
||||||
|
const sphereParticles &sphPrtcls,
|
||||||
|
const GeometryMotionModel &geomMotion)
|
||||||
|
:
|
||||||
|
boundarySphereInteraction<cFM,gMM>(
|
||||||
|
boundary,
|
||||||
|
sphPrtcls,
|
||||||
|
geomMotion
|
||||||
|
),
|
||||||
|
masterInteraction_(boundary.isBoundaryMaster())
|
||||||
|
{
|
||||||
|
pOutput<<"Processor boundayrCondition for "<< boundary.name()<<endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename cFM, typename gMM>
|
||||||
|
bool pFlow::MPI::processorBoundarySphereInteraction<cFM, gMM>::sphereSphereInteraction
|
||||||
|
(
|
||||||
|
real dt,
|
||||||
|
const ContactForceModel &cfModel
|
||||||
|
)
|
||||||
|
{
|
||||||
|
if(!masterInteraction_) return true;
|
||||||
|
|
||||||
|
const auto & sphPar = this->sphParticles();
|
||||||
|
uint32 thisIndex = this->boundary().thisBoundaryIndex();
|
||||||
|
const auto& a = sphPar.diameter().BoundaryField(thisIndex).neighborProcField().deviceViewAll();
|
||||||
|
|
||||||
|
/*pFlow::MPI::processorBoundarySIKernels::sphereSphereInteraction(
|
||||||
|
dt,
|
||||||
|
this->ppPairs(),
|
||||||
|
cfModel,
|
||||||
|
this->boundary().thisPoints(),
|
||||||
|
sphPar.diameter().deviceViewAll(),
|
||||||
|
sphPar.propertyId().deviceViewAll(),
|
||||||
|
sphPar.velocity().deviceViewAll(),
|
||||||
|
sphPar.rVelocity().deviceViewAll(),
|
||||||
|
sphPar.contactForce().deviceViewAll(),
|
||||||
|
sphPar.contactTorque().deviceViewAll(),
|
||||||
|
this->boundary().neighborProcPoints().deviceViewAll(),
|
||||||
|
sphPar.diameter().BoundaryField(thisIndex).neighborProcField().deviceViewAll(),
|
||||||
|
sphPar.propertyId().BoundaryField(thisIndex).neighborProcField().deviceViewAll(),
|
||||||
|
sphPar.velocity().BoundaryField(thisIndex).neighborProcField().deviceViewAll(),
|
||||||
|
sphPar.rVelocity().BoundaryField(thisIndex).neighborProcField().deviceViewAll(),
|
||||||
|
sphPar.contactForce().BoundaryField(thisIndex).neighborProcField().deviceViewAll(),
|
||||||
|
sphPar.contactTorque().BoundaryField(thisIndex).neighborProcField().deviceViewAll()
|
||||||
|
);*/
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
@ -0,0 +1,90 @@
|
|||||||
|
/*------------------------------- phasicFlow ---------------------------------
|
||||||
|
O C enter of
|
||||||
|
O O E ngineering and
|
||||||
|
O O M ultiscale modeling of
|
||||||
|
OOOOOOO F luid flow
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Copyright (C): www.cemf.ir
|
||||||
|
email: hamid.r.norouzi AT gmail.com
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Licence:
|
||||||
|
This file is part of phasicFlow code. It is a free software for simulating
|
||||||
|
granular and multiphase flows. You can redistribute it and/or modify it under
|
||||||
|
the terms of GNU General Public License v3 or any other later versions.
|
||||||
|
|
||||||
|
phasicFlow is distributed to help others in their research in the field of
|
||||||
|
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
|
||||||
|
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
|
||||||
|
-----------------------------------------------------------------------------*/
|
||||||
|
#ifndef __processorBoundarySphereInteraction_hpp__
|
||||||
|
#define __processorBoundarySphereInteraction_hpp__
|
||||||
|
|
||||||
|
#include "boundarySphereInteraction.hpp"
|
||||||
|
|
||||||
|
namespace pFlow::MPI
|
||||||
|
{
|
||||||
|
|
||||||
|
template<typename contactForceModel,typename geometryMotionModel>
|
||||||
|
class processorBoundarySphereInteraction
|
||||||
|
:
|
||||||
|
public boundarySphereInteraction<contactForceModel, geometryMotionModel>
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
|
||||||
|
using PBSInteractionType =
|
||||||
|
processorBoundarySphereInteraction<contactForceModel,geometryMotionModel>;
|
||||||
|
|
||||||
|
using BSInteractionType =
|
||||||
|
boundarySphereInteraction<contactForceModel, geometryMotionModel>;
|
||||||
|
|
||||||
|
using GeometryMotionModel = typename BSInteractionType::GeometryMotionModel;
|
||||||
|
|
||||||
|
using ContactForceModel = typename BSInteractionType::ContactForceModel;
|
||||||
|
|
||||||
|
using MotionModel = typename geometryMotionModel::MotionModel;
|
||||||
|
|
||||||
|
using ModelStorage = typename ContactForceModel::contactForceStorage;
|
||||||
|
|
||||||
|
using IdType = typename BSInteractionType::IdType;
|
||||||
|
|
||||||
|
using IndexType = typename BSInteractionType::IndexType;
|
||||||
|
|
||||||
|
using ContactListType = typename BSInteractionType::ContactListType;
|
||||||
|
|
||||||
|
private:
|
||||||
|
|
||||||
|
bool masterInteraction_;
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
TypeInfoTemplate22("boundarySphereInteraction", "processor",ContactForceModel, MotionModel);
|
||||||
|
|
||||||
|
|
||||||
|
processorBoundarySphereInteraction(
|
||||||
|
const boundaryBase& boundary,
|
||||||
|
const sphereParticles& sphPrtcls,
|
||||||
|
const GeometryMotionModel& geomMotion
|
||||||
|
);
|
||||||
|
|
||||||
|
add_vCtor
|
||||||
|
(
|
||||||
|
BSInteractionType,
|
||||||
|
PBSInteractionType,
|
||||||
|
boundaryBase
|
||||||
|
);
|
||||||
|
|
||||||
|
~processorBoundarySphereInteraction()override = default;
|
||||||
|
|
||||||
|
bool sphereSphereInteraction(
|
||||||
|
real dt,
|
||||||
|
const ContactForceModel& cfModel)override;
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#include "processorBoundarySphereInteraction.cpp"
|
||||||
|
|
||||||
|
|
||||||
|
#endif //__processorBoundarySphereInteraction_hpp__
|
@ -0,0 +1,17 @@
|
|||||||
|
|
||||||
|
#include "processorBoundarySphereInteraction.hpp"
|
||||||
|
#include "geometryMotions.hpp"
|
||||||
|
#include "contactForceModels.hpp"
|
||||||
|
|
||||||
|
|
||||||
|
template class pFlow::MPI::processorBoundarySphereInteraction
|
||||||
|
<
|
||||||
|
pFlow::cfModels::limitedNonLinearNormalRolling,
|
||||||
|
pFlow::rotationAxisMotionGeometry
|
||||||
|
>;
|
||||||
|
|
||||||
|
template class pFlow::MPI::processorBoundarySphereInteraction
|
||||||
|
<
|
||||||
|
pFlow::cfModels::nonLimitedNonLinearNormalRolling,
|
||||||
|
pFlow::rotationAxisMotionGeometry
|
||||||
|
>;
|
@ -163,9 +163,16 @@ bool pFlow::sphereInteraction<cFM,gMM, cLT>::iterate()
|
|||||||
auto t = this->currentTime();
|
auto t = this->currentTime();
|
||||||
auto dt = this->dt();
|
auto dt = this->dt();
|
||||||
|
|
||||||
//output<<"iter, t, dt "<< iter<<" "<< t << " "<<dt<<endl;
|
|
||||||
bool broadSearch = contactSearch_().enterBroadSearch(iter, t, dt);
|
bool broadSearch = contactSearch_().enterBroadSearch(iter, t, dt);
|
||||||
|
|
||||||
|
/*sphParticles_.diameter().updateBoundaries(DataDirection::SlaveToMaster);
|
||||||
|
sphParticles_.velocity().updateBoundaries(DataDirection::SlaveToMaster);
|
||||||
|
sphParticles_.rVelocity().updateBoundaries(DataDirection::SlaveToMaster);
|
||||||
|
sphParticles_.mass().updateBoundaries(DataDirection::SlaveToMaster);
|
||||||
|
sphParticles_.I().updateBoundaries(DataDirection::SlaveToMaster);
|
||||||
|
sphParticles_.propertyId().updateBoundaries(DataDirection::SlaveToMaster);*/
|
||||||
|
|
||||||
|
|
||||||
if(broadSearch)
|
if(broadSearch)
|
||||||
{
|
{
|
||||||
|
@ -16,6 +16,11 @@ Insertion/shapeMixture/shapeMixture.cpp
|
|||||||
Insertion/Insertion/Insertions.cpp
|
Insertion/Insertion/Insertions.cpp
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if(pFlow_Build_MPI)
|
||||||
|
list(APPEND SourceFiles
|
||||||
|
particles/MPIParticleIdHandler/MPIParticleIdHandler.cpp)
|
||||||
|
endif()
|
||||||
|
|
||||||
set(link_libs Kokkos::kokkos phasicFlow Integration Property)
|
set(link_libs Kokkos::kokkos phasicFlow Integration Property)
|
||||||
|
|
||||||
pFlow_add_library_install(Particles SourceFiles link_libs)
|
pFlow_add_library_install(Particles SourceFiles link_libs)
|
||||||
|
@ -513,6 +513,14 @@ bool pFlow::sphereParticles::beforeIteration()
|
|||||||
dynPointStruct().predict(dt, accelertion());
|
dynPointStruct().predict(dt, accelertion());
|
||||||
rVelIntegration_().predict(dt,rVelocity_, rAcceleration_);
|
rVelIntegration_().predict(dt,rVelocity_, rAcceleration_);
|
||||||
intPredictTimer_.end();
|
intPredictTimer_.end();
|
||||||
|
|
||||||
|
propertyId_.updateBoundariesSlaveToMasterIfRequested();
|
||||||
|
diameter_.updateBoundariesSlaveToMasterIfRequested();
|
||||||
|
mass_.updateBoundariesSlaveToMasterIfRequested();
|
||||||
|
I_.updateBoundariesSlaveToMasterIfRequested();
|
||||||
|
rVelocity_.updateBoundariesSlaveToMasterIfRequested();
|
||||||
|
rAcceleration_.updateBoundariesSlaveToMasterIfRequested();
|
||||||
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,70 @@
|
|||||||
|
#include "MPIParticleIdHandler.hpp"
|
||||||
|
#include "procCommunication.hpp"
|
||||||
|
|
||||||
|
pFlow::MPI::MPIParticleIdHandler::MPIParticleIdHandler
|
||||||
|
(
|
||||||
|
pointStructure& pStruct
|
||||||
|
)
|
||||||
|
:
|
||||||
|
particleIdHandler(pStruct)
|
||||||
|
{
|
||||||
|
initialIdCheck();
|
||||||
|
}
|
||||||
|
|
||||||
|
pFlow::Pair<pFlow::uint32, pFlow::uint32>
|
||||||
|
pFlow::MPI::MPIParticleIdHandler::getIdRange(uint32 nNewParticles)
|
||||||
|
{
|
||||||
|
uint32 startId;
|
||||||
|
if(maxId_==-1)
|
||||||
|
{
|
||||||
|
startId = 0;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
startId = maxId_+1;
|
||||||
|
}
|
||||||
|
uint32 endId = startId+nNewParticles-1;
|
||||||
|
maxId_ = endId;
|
||||||
|
return {startId, endId};
|
||||||
|
}
|
||||||
|
|
||||||
|
bool pFlow::MPI::MPIParticleIdHandler::initialIdCheck()
|
||||||
|
{
|
||||||
|
/// empty point structure / no particles in simulation
|
||||||
|
uint32 maxId = -1;
|
||||||
|
if( !pStruct().empty() )
|
||||||
|
{
|
||||||
|
maxId = max( *this );
|
||||||
|
}
|
||||||
|
|
||||||
|
auto maxIdAll = procVector<uint32>(pFlowProcessors());
|
||||||
|
auto numAll = procVector<uint32>(pFlowProcessors());
|
||||||
|
auto comm = procCommunication(pFlowProcessors());
|
||||||
|
|
||||||
|
comm.collectAllToAll(maxId, maxIdAll);
|
||||||
|
comm.collectAllToAll(size(),numAll);
|
||||||
|
|
||||||
|
uint32 n = 0;
|
||||||
|
for(uint32 i=0; i<maxIdAll.size(); i++)
|
||||||
|
{
|
||||||
|
if( maxIdAll[i]==-1 && numAll[i]!= 0)
|
||||||
|
{
|
||||||
|
if(comm.localRank() == i)
|
||||||
|
{
|
||||||
|
fillSequence(*this, n);
|
||||||
|
maxId_ = size()-1 + n;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if(comm.localRank() == i)
|
||||||
|
{
|
||||||
|
maxId_ = maxIdAll[i];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
n += numAll[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
@ -0,0 +1,60 @@
|
|||||||
|
/*------------------------------- phasicFlow ---------------------------------
|
||||||
|
O C enter of
|
||||||
|
O O E ngineering and
|
||||||
|
O O M ultiscale modeling of
|
||||||
|
OOOOOOO F luid flow
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Copyright (C): www.cemf.ir
|
||||||
|
email: hamid.r.norouzi AT gmail.com
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Licence:
|
||||||
|
This file is part of phasicFlow code. It is a free software for simulating
|
||||||
|
granular and multiphase flows. You can redistribute it and/or modify it under
|
||||||
|
the terms of GNU General Public License v3 or any other later versions.
|
||||||
|
|
||||||
|
phasicFlow is distributed to help others in their research in the field of
|
||||||
|
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
|
||||||
|
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
|
||||||
|
-----------------------------------------------------------------------------*/
|
||||||
|
#ifndef __MPIParticleIdHandler_hpp__
|
||||||
|
#define __MPIParticleIdHandler_hpp__
|
||||||
|
|
||||||
|
#include "particleIdHandler.hpp"
|
||||||
|
|
||||||
|
namespace pFlow::MPI
|
||||||
|
{
|
||||||
|
|
||||||
|
class MPIParticleIdHandler : public particleIdHandler
|
||||||
|
{
|
||||||
|
private:
|
||||||
|
|
||||||
|
uint32 maxId_ = -1;
|
||||||
|
|
||||||
|
bool initialIdCheck() override;
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
ClassInfo("particleIdHandler<MPI>");
|
||||||
|
|
||||||
|
explicit MPIParticleIdHandler(pointStructure& pStruct);
|
||||||
|
|
||||||
|
~MPIParticleIdHandler() override = default;
|
||||||
|
|
||||||
|
add_vCtor(
|
||||||
|
particleIdHandler,
|
||||||
|
MPIParticleIdHandler,
|
||||||
|
pointStructure
|
||||||
|
);
|
||||||
|
|
||||||
|
Pair<uint32, uint32> getIdRange(uint32 nNewParticles) override;
|
||||||
|
|
||||||
|
uint32 maxId() const override
|
||||||
|
{
|
||||||
|
return maxId_;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif //__MPIParticleIdHandler_hpp__
|
@ -31,7 +31,12 @@ class particleIdHandler
|
|||||||
:
|
:
|
||||||
public uint32PointField_D
|
public uint32PointField_D
|
||||||
{
|
{
|
||||||
|
|
||||||
|
private:
|
||||||
|
|
||||||
|
virtual
|
||||||
|
bool initialIdCheck()=0;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
/// class info
|
/// class info
|
||||||
@ -53,7 +58,9 @@ public:
|
|||||||
Pair<uint32, uint32> getIdRange(uint32 nNewParticles)=0;
|
Pair<uint32, uint32> getIdRange(uint32 nNewParticles)=0;
|
||||||
|
|
||||||
virtual
|
virtual
|
||||||
bool initialIdCheck()=0;
|
uint32 maxId()const = 0;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// heat change for possible insertion of particles
|
// heat change for possible insertion of particles
|
||||||
// overrdie from internalField
|
// overrdie from internalField
|
||||||
|
@ -78,7 +78,7 @@ pFlow::particles::particles(systemControl& control)
|
|||||||
{
|
{
|
||||||
this->addToSubscriber(dynPointStruct_);
|
this->addToSubscriber(dynPointStruct_);
|
||||||
|
|
||||||
idHandler_().initialIdCheck();
|
//idHandler_().initialIdCheck();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
@ -87,7 +87,16 @@ pFlow::particles::beforeIteration()
|
|||||||
zeroForce();
|
zeroForce();
|
||||||
zeroTorque();
|
zeroTorque();
|
||||||
|
|
||||||
return dynPointStruct_.beforeIteration();
|
if( !dynPointStruct_.beforeIteration())
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
shapeIndex_.updateBoundariesSlaveToMasterIfRequested();
|
||||||
|
accelertion_.updateBoundariesSlaveToMasterIfRequested();
|
||||||
|
idHandler_().updateBoundariesSlaveToMasterIfRequested();
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
|
@ -181,6 +181,12 @@ public:
|
|||||||
return contactTorque_;
|
return contactTorque_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline
|
||||||
|
uint maxId()const
|
||||||
|
{
|
||||||
|
return idHandler_().maxId();
|
||||||
|
}
|
||||||
|
|
||||||
bool beforeIteration() override;
|
bool beforeIteration() override;
|
||||||
|
|
||||||
bool iterate() override;
|
bool iterate() override;
|
||||||
|
@ -9,6 +9,7 @@ pFlow::regularParticleIdHandler::regularParticleIdHandler
|
|||||||
:
|
:
|
||||||
particleIdHandler(pStruct)
|
particleIdHandler(pStruct)
|
||||||
{
|
{
|
||||||
|
initialIdCheck();
|
||||||
}
|
}
|
||||||
|
|
||||||
pFlow::Pair<pFlow::uint32, pFlow::uint32>
|
pFlow::Pair<pFlow::uint32, pFlow::uint32>
|
||||||
|
@ -33,6 +33,7 @@ private:
|
|||||||
|
|
||||||
uint32 maxId_ = -1;
|
uint32 maxId_ = -1;
|
||||||
|
|
||||||
|
bool initialIdCheck()override;
|
||||||
public:
|
public:
|
||||||
|
|
||||||
ClassInfo("particleIdHandler<regular>");
|
ClassInfo("particleIdHandler<regular>");
|
||||||
@ -50,7 +51,10 @@ public:
|
|||||||
|
|
||||||
Pair<uint32, uint32> getIdRange(uint32 nNewParticles)override;
|
Pair<uint32, uint32> getIdRange(uint32 nNewParticles)override;
|
||||||
|
|
||||||
bool initialIdCheck()override;
|
uint32 maxId()const override
|
||||||
|
{
|
||||||
|
return maxId_;
|
||||||
|
}
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -25,6 +25,7 @@ streams/TStream/oTstream.cpp
|
|||||||
streams/Fstream/iFstream.cpp
|
streams/Fstream/iFstream.cpp
|
||||||
streams/Fstream/oFstream.cpp
|
streams/Fstream/oFstream.cpp
|
||||||
streams/Fstream/fileStream.cpp
|
streams/Fstream/fileStream.cpp
|
||||||
|
streams/dataIO/dataIORegulars.cpp
|
||||||
streams/streams.cpp
|
streams/streams.cpp
|
||||||
|
|
||||||
fileSystem/fileSystem.cpp
|
fileSystem/fileSystem.cpp
|
||||||
@ -132,14 +133,14 @@ if(pFlow_Build_MPI)
|
|||||||
message(STATUS "Zoltan lib path: ${ZOLTAN_LIBRARY}")
|
message(STATUS "Zoltan lib path: ${ZOLTAN_LIBRARY}")
|
||||||
|
|
||||||
list(APPEND SourceFiles
|
list(APPEND SourceFiles
|
||||||
MPIParallelization/partitioning.cpp
|
MPIParallelization/domain/partitioning/partitioning.cpp
|
||||||
MPIParallelization/rcb1DPartitioning.cpp
|
MPIParallelization/domain/partitioning/rcb1DPartitioning.cpp
|
||||||
MPIParallelization/domain/MPISimulationDomain.cpp
|
MPIParallelization/domain/MPISimulationDomain.cpp
|
||||||
#MPIParallelization/dataIOMPI.cpp
|
MPIParallelization/dataIOMPI/dataIOMPIs.cpp
|
||||||
MPIParallelization/procCommunication.cpp
|
MPIParallelization/MPI/procCommunication.cpp
|
||||||
MPIParallelization/boundaryProcessor.cpp
|
MPIParallelization/MPI/scatteredMasterDistributeChar.cpp
|
||||||
MPIParallelization/scatteredMasterDistributeChar.cpp
|
MPIParallelization/pointStructure/boundaries/boundaryProcessor.cpp
|
||||||
MPIParallelization/processorBoundaryFields.cpp
|
MPIParallelization/pointField/processorBoundaryFields.cpp
|
||||||
)
|
)
|
||||||
|
|
||||||
list(APPEND link_libs MPI::MPI_CXX ${ZOLTAN_LIBRARY} -lm )
|
list(APPEND link_libs MPI::MPI_CXX ${ZOLTAN_LIBRARY} -lm )
|
||||||
|
106
src/phasicFlow/MPIParallelization/MPI/gatherMaster.hpp
Normal file
106
src/phasicFlow/MPIParallelization/MPI/gatherMaster.hpp
Normal file
@ -0,0 +1,106 @@
|
|||||||
|
/*------------------------------- phasicFlow ---------------------------------
|
||||||
|
O C enter of
|
||||||
|
O O E ngineering and
|
||||||
|
O O M ultiscale modeling of
|
||||||
|
OOOOOOO F luid flow
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Copyright (C): www.cemf.ir
|
||||||
|
email: hamid.r.norouzi AT gmail.com
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Licence:
|
||||||
|
This file is part of phasicFlow code. It is a free software for simulating
|
||||||
|
granular and multiphase flows. You can redistribute it and/or modify it under
|
||||||
|
the terms of GNU General Public License v3 or any other later versions.
|
||||||
|
|
||||||
|
phasicFlow is distributed to help others in their research in the field of
|
||||||
|
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
|
||||||
|
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
|
||||||
|
-----------------------------------------------------------------------------*/
|
||||||
|
#ifndef __gatherMaster_hpp__
|
||||||
|
#define __gatherMaster_hpp__
|
||||||
|
|
||||||
|
#include <numeric>
|
||||||
|
|
||||||
|
#include "procCommunication.hpp"
|
||||||
|
#include "stdVectorHelper.hpp"
|
||||||
|
|
||||||
|
namespace pFlow::MPI
|
||||||
|
{
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
class gatherMaster
|
||||||
|
:
|
||||||
|
public procCommunication
|
||||||
|
{
|
||||||
|
protected:
|
||||||
|
|
||||||
|
std::vector<T> buffer_;
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
gatherMaster(const localProcessors& procs)
|
||||||
|
:
|
||||||
|
procCommunication(procs)
|
||||||
|
{}
|
||||||
|
|
||||||
|
span<T> getData()
|
||||||
|
{
|
||||||
|
if(this->localMaster())
|
||||||
|
return span<T>( buffer_.data(), buffer_.size());
|
||||||
|
else
|
||||||
|
return span<T>(nullptr, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<T> moveData()
|
||||||
|
{
|
||||||
|
return std::move(buffer_);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool gatherData(span<T> data)
|
||||||
|
{
|
||||||
|
int thisN = data.size();
|
||||||
|
|
||||||
|
bool succss;
|
||||||
|
|
||||||
|
procVector<int> numElems(this->processors(), true);
|
||||||
|
procVector<int> displ(this->processors(), true);
|
||||||
|
|
||||||
|
if( !this->collectAllToMaster(thisN, numElems) )
|
||||||
|
{
|
||||||
|
fatalErrorInFunction<<
|
||||||
|
"error in collecting number of elements from processors"<<endl;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
auto totalN = std::accumulate(
|
||||||
|
numElems.begin(),
|
||||||
|
numElems.end(),
|
||||||
|
static_cast<int>(0));
|
||||||
|
|
||||||
|
buffer_.resize(totalN);
|
||||||
|
|
||||||
|
std::exclusive_scan(
|
||||||
|
numElems.begin(),
|
||||||
|
numElems.end(),
|
||||||
|
displ.begin(),
|
||||||
|
0);
|
||||||
|
|
||||||
|
auto bufferSpan = span<T>(this->buffer_.data(),this->buffer_.size() );
|
||||||
|
|
||||||
|
return CheckMPI(
|
||||||
|
Gatherv(
|
||||||
|
data,
|
||||||
|
bufferSpan,
|
||||||
|
numElems.getSpan(),
|
||||||
|
displ.getSpan(),
|
||||||
|
this->localMasterNo(),
|
||||||
|
this->localCommunicator()),
|
||||||
|
false);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
427
src/phasicFlow/MPIParallelization/MPI/mpiCommunication.hpp
Normal file
427
src/phasicFlow/MPIParallelization/MPI/mpiCommunication.hpp
Normal file
@ -0,0 +1,427 @@
|
|||||||
|
/*------------------------------- phasicFlow ---------------------------------
|
||||||
|
O C enter of
|
||||||
|
O O E ngineering and
|
||||||
|
O O M ultiscale modeling of
|
||||||
|
OOOOOOO F luid flow
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Copyright (C): www.cemf.ir
|
||||||
|
email: hamid.r.norouzi AT gmail.com
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Licence:
|
||||||
|
This file is part of phasicFlow code. It is a free software for simulating
|
||||||
|
granular and multiphase flows. You can redistribute it and/or modify it under
|
||||||
|
the terms of GNU General Public License v3 or any other later versions.
|
||||||
|
|
||||||
|
phasicFlow is distributed to help others in their research in the field of
|
||||||
|
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
|
||||||
|
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
|
||||||
|
-----------------------------------------------------------------------------*/
|
||||||
|
|
||||||
|
#ifndef __mpiCommunication_H__
|
||||||
|
#define __mpiCommunication_H__
|
||||||
|
|
||||||
|
|
||||||
|
#include "mpiTypes.hpp"
|
||||||
|
#include "types.hpp"
|
||||||
|
#include "span.hpp"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
namespace pFlow::MPI
|
||||||
|
{
|
||||||
|
|
||||||
|
extern DataType realx3Type__;
|
||||||
|
|
||||||
|
extern DataType realx4Type__;
|
||||||
|
|
||||||
|
extern DataType int32x3Type__;
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
auto constexpr Type()
|
||||||
|
{
|
||||||
|
return MPI_BYTE;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
auto constexpr sFactor()
|
||||||
|
{
|
||||||
|
return sizeof(T);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<char>
|
||||||
|
auto constexpr Type()
|
||||||
|
{
|
||||||
|
return MPI_CHAR;
|
||||||
|
}
|
||||||
|
template<char>
|
||||||
|
auto constexpr sFactor()
|
||||||
|
{
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<short>
|
||||||
|
auto constexpr Type()
|
||||||
|
{
|
||||||
|
return MPI_SHORT;
|
||||||
|
}
|
||||||
|
template<short>
|
||||||
|
auto constexpr sFactor()
|
||||||
|
{
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<unsigned short>
|
||||||
|
auto constexpr Type()
|
||||||
|
{
|
||||||
|
return MPI_UNSIGNED_SHORT;
|
||||||
|
}
|
||||||
|
template<unsigned short>
|
||||||
|
auto constexpr sFactor()
|
||||||
|
{
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<int>
|
||||||
|
auto constexpr Type()
|
||||||
|
{
|
||||||
|
return MPI_INT;
|
||||||
|
}
|
||||||
|
template<int>
|
||||||
|
auto constexpr sFactor()
|
||||||
|
{
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<>
|
||||||
|
auto constexpr Type<unsigned int>()
|
||||||
|
{
|
||||||
|
return MPI_UNSIGNED;
|
||||||
|
}
|
||||||
|
template<>
|
||||||
|
auto constexpr sFactor<unsigned int>()
|
||||||
|
{
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<>
|
||||||
|
auto constexpr Type<long>()
|
||||||
|
{
|
||||||
|
return MPI_LONG;
|
||||||
|
}
|
||||||
|
template<>
|
||||||
|
auto constexpr sFactor<long>()
|
||||||
|
{
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<>
|
||||||
|
auto constexpr Type<unsigned long>()
|
||||||
|
{
|
||||||
|
return MPI_UNSIGNED_LONG;
|
||||||
|
}
|
||||||
|
template<>
|
||||||
|
auto constexpr sFactor<unsigned long>()
|
||||||
|
{
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template<>
|
||||||
|
auto constexpr Type<float>()
|
||||||
|
{
|
||||||
|
return MPI_FLOAT;
|
||||||
|
}
|
||||||
|
template<>
|
||||||
|
auto constexpr sFactor<float>()
|
||||||
|
{
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<>
|
||||||
|
auto constexpr Type<double>()
|
||||||
|
{
|
||||||
|
return MPI_DOUBLE;
|
||||||
|
}
|
||||||
|
template<>
|
||||||
|
auto constexpr sFactor<double>()
|
||||||
|
{
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<>
|
||||||
|
inline
|
||||||
|
auto Type<realx3>()
|
||||||
|
{
|
||||||
|
return realx3Type__;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<>
|
||||||
|
auto constexpr sFactor<realx3>()
|
||||||
|
{
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<>
|
||||||
|
inline
|
||||||
|
auto Type<realx4>()
|
||||||
|
{
|
||||||
|
return realx4Type__;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<>
|
||||||
|
auto constexpr sFactor<realx4>()
|
||||||
|
{
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template<>
|
||||||
|
inline
|
||||||
|
auto Type<int32x3>()
|
||||||
|
{
|
||||||
|
return int32x3Type__;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template<>
|
||||||
|
auto constexpr sFactor<int32x3>()
|
||||||
|
{
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*inline
|
||||||
|
auto createByteSequence(int sizeOfElement)
|
||||||
|
{
|
||||||
|
DataType newType;
|
||||||
|
MPI_Type_contiguous(sizeOfElement, MPI_CHAR, &newType);
|
||||||
|
MPI_Type_commit(&newType);
|
||||||
|
return newType;
|
||||||
|
}*/
|
||||||
|
|
||||||
|
inline
|
||||||
|
auto TypeCommit(DataType* type)
|
||||||
|
{
|
||||||
|
return MPI_Type_commit(type);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline
|
||||||
|
auto TypeFree(DataType* type)
|
||||||
|
{
|
||||||
|
return MPI_Type_free(type);
|
||||||
|
|
||||||
|
}
|
||||||
|
template<typename T>
|
||||||
|
inline auto getCount(Status* status, int& count)
|
||||||
|
{
|
||||||
|
int lCount;
|
||||||
|
auto res = MPI_Get_count(status, Type<T>(), &lCount);
|
||||||
|
count = lCount/sFactor<T>();
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
inline int convertIndex(const int& ind)
|
||||||
|
{
|
||||||
|
return ind*sFactor<T>();
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
inline auto send(span<T> data, int dest, int tag, Comm comm)
|
||||||
|
{
|
||||||
|
return MPI_Send(
|
||||||
|
data.data(),
|
||||||
|
sFactor<T>()*data().size(),
|
||||||
|
Type<T>(),
|
||||||
|
dest,
|
||||||
|
tag,
|
||||||
|
comm);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
inline auto Isend(span<T> data, int dest, int tag, Comm comm, Request* req)
|
||||||
|
{
|
||||||
|
return MPI_Isend(
|
||||||
|
data.data(),
|
||||||
|
sFactor<T>()*data.size(),
|
||||||
|
Type<T>(),
|
||||||
|
dest,
|
||||||
|
tag,
|
||||||
|
comm,
|
||||||
|
req);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
inline auto Isend(const T& data, int dest, int tag, Comm comm, Request* req)
|
||||||
|
{
|
||||||
|
return MPI_Isend(
|
||||||
|
&data,
|
||||||
|
sFactor<T>(),
|
||||||
|
Type<T>(),
|
||||||
|
dest,
|
||||||
|
tag,
|
||||||
|
comm,
|
||||||
|
req);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
inline auto recv(span<T> data, int source, int tag, Comm comm, Status *status)
|
||||||
|
{
|
||||||
|
return MPI_Recv(
|
||||||
|
data.data(),
|
||||||
|
sFactor<T>()*data.size(),
|
||||||
|
Type<T>(),
|
||||||
|
source,
|
||||||
|
tag,
|
||||||
|
comm,
|
||||||
|
status);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
inline auto Irecv(T& data, int source, int tag, Comm comm, Request* req)
|
||||||
|
{
|
||||||
|
return MPI_Irecv(
|
||||||
|
&data,
|
||||||
|
sFactor<T>(),
|
||||||
|
Type<T>(),
|
||||||
|
source,
|
||||||
|
tag,
|
||||||
|
comm,
|
||||||
|
req);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
inline auto Irecv(span<T> data, int source, int tag, Comm comm, Request* req)
|
||||||
|
{
|
||||||
|
return MPI_Irecv(
|
||||||
|
data.data(),
|
||||||
|
sFactor<T>()*data.size(),
|
||||||
|
Type<T>(),
|
||||||
|
source,
|
||||||
|
tag,
|
||||||
|
comm,
|
||||||
|
req);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
inline auto scan(T sData, T& rData, Comm comm, Operation op = SumOp)
|
||||||
|
{
|
||||||
|
return MPI_Scan(&sData, &rData, sFactor<T>()*1, Type<T>(), op , comm );
|
||||||
|
}
|
||||||
|
|
||||||
|
// gathering one scalar data to root processor
|
||||||
|
template<typename T>
|
||||||
|
inline auto gather(T sendData, span<T>& recvData, int root, Comm comm)
|
||||||
|
{
|
||||||
|
return MPI_Gather(
|
||||||
|
&sendData,
|
||||||
|
sFactor<T>()*1,
|
||||||
|
Type<T>(),
|
||||||
|
recvData.data(),
|
||||||
|
sFactor<T>()*1,
|
||||||
|
Type<T>(),
|
||||||
|
root,
|
||||||
|
comm);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
inline auto allGather(T sendData, span<T>& recvData, Comm comm)
|
||||||
|
{
|
||||||
|
return MPI_Allgather(
|
||||||
|
&sendData,
|
||||||
|
sFactor<T>()*1,
|
||||||
|
Type<T>(),
|
||||||
|
recvData.data(),
|
||||||
|
sFactor<T>()*1,
|
||||||
|
Type<T>(),
|
||||||
|
comm);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
inline auto scatter(span<T> sendData, T& recvData, int root, Comm comm)
|
||||||
|
{
|
||||||
|
return MPI_Scatter(
|
||||||
|
sendData.data(),
|
||||||
|
sFactor<T>()*1,
|
||||||
|
Type<T>(),
|
||||||
|
&recvData,
|
||||||
|
sFactor<T>()*1,
|
||||||
|
Type<T>(),
|
||||||
|
root,
|
||||||
|
comm);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
inline auto Bcast(T& sendData, int root, Comm comm)
|
||||||
|
{
|
||||||
|
return MPI_Bcast(
|
||||||
|
&sendData, sFactor<T>()*1, Type<T>(), root, comm);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
bool typeCreateIndexedBlock(
|
||||||
|
span<int32> index,
|
||||||
|
DataType &newType)
|
||||||
|
{
|
||||||
|
auto res = MPI_Type_create_indexed_block(
|
||||||
|
index.size(),
|
||||||
|
sFactor<T>(),
|
||||||
|
index.data(),
|
||||||
|
Type<T>(),
|
||||||
|
&newType);
|
||||||
|
|
||||||
|
if(res == Success)
|
||||||
|
{
|
||||||
|
TypeCommit(&newType);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
inline auto Gatherv
|
||||||
|
(
|
||||||
|
span<T> sendData,
|
||||||
|
span<T>& recvData,
|
||||||
|
span<int> recvCounts,
|
||||||
|
span<int> displs,
|
||||||
|
int root,
|
||||||
|
Comm comm)
|
||||||
|
{
|
||||||
|
|
||||||
|
return MPI_Gatherv(
|
||||||
|
sendData.data(),
|
||||||
|
sendData.size()*sFactor<T>(),
|
||||||
|
Type<T>(),
|
||||||
|
recvData.data(),
|
||||||
|
recvCounts.data(),
|
||||||
|
displs.data(),
|
||||||
|
Type<T>(),
|
||||||
|
root,
|
||||||
|
comm
|
||||||
|
);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
inline auto Wait(Request* request, Status* status)
|
||||||
|
{
|
||||||
|
return MPI_Wait(request, status);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline auto typeFree(DataType& type)
|
||||||
|
{
|
||||||
|
return MPI_Type_free(&type);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#endif //__mpiCommunication_H__
|
69
src/phasicFlow/MPIParallelization/MPI/mpiTypes.hpp
Normal file
69
src/phasicFlow/MPIParallelization/MPI/mpiTypes.hpp
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
/*------------------------------- phasicFlow ---------------------------------
|
||||||
|
O C enter of
|
||||||
|
O O E ngineering and
|
||||||
|
O O M ultiscale modeling of
|
||||||
|
OOOOOOO F luid flow
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Copyright (C): www.cemf.ir
|
||||||
|
email: hamid.r.norouzi AT gmail.com
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Licence:
|
||||||
|
This file is part of phasicFlow code. It is a free software for simulating
|
||||||
|
granular and multiphase flows. You can redistribute it and/or modify it under
|
||||||
|
the terms of GNU General Public License v3 or any other later versions.
|
||||||
|
|
||||||
|
phasicFlow is distributed to help others in their research in the field of
|
||||||
|
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
|
||||||
|
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
|
||||||
|
-----------------------------------------------------------------------------*/
|
||||||
|
|
||||||
|
#ifndef __mpiTypes_H__
|
||||||
|
#define __mpiTypes_H__
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#include <mpi.h>
|
||||||
|
|
||||||
|
namespace pFlow::MPI
|
||||||
|
{
|
||||||
|
// types
|
||||||
|
using Comm = MPI_Comm;
|
||||||
|
using Group = MPI_Group;
|
||||||
|
using Status = MPI_Status;
|
||||||
|
using Offset = MPI_Offset;
|
||||||
|
using Request = MPI_Request;
|
||||||
|
using Operation = MPI_Op;
|
||||||
|
using Information = MPI_Info;
|
||||||
|
using DataType = MPI_Datatype;
|
||||||
|
|
||||||
|
inline Comm CommWorld = MPI_COMM_WORLD;
|
||||||
|
|
||||||
|
// all nulls
|
||||||
|
|
||||||
|
inline auto ProcNull = MPI_PROC_NULL;
|
||||||
|
inline auto InfoNull = MPI_INFO_NULL;
|
||||||
|
inline auto RequestNull = MPI_REQUEST_NULL;
|
||||||
|
inline auto StatusIgnore = MPI_STATUS_IGNORE;
|
||||||
|
inline auto StatusesIgnore = MPI_STATUSES_IGNORE;
|
||||||
|
inline auto FileNull = MPI_FILE_NULL;
|
||||||
|
inline Comm CommNull = MPI_COMM_NULL;
|
||||||
|
inline auto TypeNull = MPI_DATATYPE_NULL;
|
||||||
|
|
||||||
|
// errors
|
||||||
|
inline const auto Success = MPI_SUCCESS;
|
||||||
|
inline const auto ErrOp = MPI_ERR_OP;
|
||||||
|
|
||||||
|
inline const auto SumOp = MPI_SUM;
|
||||||
|
|
||||||
|
inline const size_t MaxNoProcessors = 2048;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#endif //__mpiTypes_H__
|
30
src/phasicFlow/MPIParallelization/MPI/procCommunication.cpp
Normal file
30
src/phasicFlow/MPIParallelization/MPI/procCommunication.cpp
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
/*------------------------------- phasicFlow ---------------------------------
|
||||||
|
O C enter of
|
||||||
|
O O E ngineering and
|
||||||
|
O O M ultiscale modeling of
|
||||||
|
OOOOOOO F luid flow
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Copyright (C): www.cemf.ir
|
||||||
|
email: hamid.r.norouzi AT gmail.com
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Licence:
|
||||||
|
This file is part of phasicFlow code. It is a free software for simulating
|
||||||
|
granular and multiphase flows. You can redistribute it and/or modify it under
|
||||||
|
the terms of GNU General Public License v3 or any other later versions.
|
||||||
|
|
||||||
|
phasicFlow is distributed to help others in their research in the field of
|
||||||
|
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
|
||||||
|
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
|
||||||
|
-----------------------------------------------------------------------------*/
|
||||||
|
|
||||||
|
#include "procCommunication.hpp"
|
||||||
|
|
||||||
|
|
||||||
|
pFlow::MPI::procCommunication::procCommunication
|
||||||
|
(
|
||||||
|
const localProcessors& proc
|
||||||
|
)
|
||||||
|
:
|
||||||
|
processors_(proc)
|
||||||
|
{}
|
178
src/phasicFlow/MPIParallelization/MPI/procCommunication.hpp
Normal file
178
src/phasicFlow/MPIParallelization/MPI/procCommunication.hpp
Normal file
@ -0,0 +1,178 @@
|
|||||||
|
/*------------------------------- phasicFlow ---------------------------------
|
||||||
|
O C enter of
|
||||||
|
O O E ngineering and
|
||||||
|
O O M ultiscale modeling of
|
||||||
|
OOOOOOO F luid flow
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Copyright (C): www.cemf.ir
|
||||||
|
email: hamid.r.norouzi AT gmail.com
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Licence:
|
||||||
|
This file is part of phasicFlow code. It is a free software for simulating
|
||||||
|
granular and multiphase flows. You can redistribute it and/or modify it under
|
||||||
|
the terms of GNU General Public License v3 or any other later versions.
|
||||||
|
|
||||||
|
phasicFlow is distributed to help others in their research in the field of
|
||||||
|
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
|
||||||
|
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
|
||||||
|
-----------------------------------------------------------------------------*/
|
||||||
|
#ifndef __procCommunication_hpp__
|
||||||
|
#define __procCommunication_hpp__
|
||||||
|
|
||||||
|
|
||||||
|
#include "procVector.hpp"
|
||||||
|
#include "localProcessors.hpp"
|
||||||
|
#include "mpiCommunication.hpp"
|
||||||
|
|
||||||
|
namespace pFlow::MPI
|
||||||
|
{
|
||||||
|
|
||||||
|
|
||||||
|
class procCommunication
|
||||||
|
{
|
||||||
|
protected:
|
||||||
|
|
||||||
|
const localProcessors& processors_;
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
procCommunication(const localProcessors& proc);
|
||||||
|
|
||||||
|
~procCommunication()=default;
|
||||||
|
|
||||||
|
/// @brief Tell if this processor is master processor in the local
|
||||||
|
/// communicator
|
||||||
|
/// @return true if this processor is master
|
||||||
|
|
||||||
|
inline
|
||||||
|
const auto& processors()const
|
||||||
|
{
|
||||||
|
return processors_;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline
|
||||||
|
bool localMaster()const
|
||||||
|
{
|
||||||
|
return processors_.localMaster();;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline
|
||||||
|
auto localSize()const
|
||||||
|
{
|
||||||
|
return processors_.localSize();
|
||||||
|
}
|
||||||
|
|
||||||
|
inline
|
||||||
|
auto localRank()const
|
||||||
|
{
|
||||||
|
return processors_.localRank();
|
||||||
|
}
|
||||||
|
|
||||||
|
inline
|
||||||
|
auto localCommunicator()const
|
||||||
|
{
|
||||||
|
return processors_.localCommunicator();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// @brief return the master number in the local communicator
|
||||||
|
auto localMasterNo()const
|
||||||
|
{
|
||||||
|
return processors_.localMasterNo();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Send a single val to all processors including itself (local communicator)
|
||||||
|
template<typename T>
|
||||||
|
std::pair<T,bool> distributeMasterToAll(const T& val)
|
||||||
|
{
|
||||||
|
|
||||||
|
T retVal = val;
|
||||||
|
auto res = CheckMPI(
|
||||||
|
Bcast(retVal, localMasterNo(),localCommunicator() ),
|
||||||
|
false);
|
||||||
|
|
||||||
|
return {retVal, res};
|
||||||
|
}
|
||||||
|
|
||||||
|
/// @brief Send a single value to all processor including master (in local communicator)
|
||||||
|
/// @param val value to be sent
|
||||||
|
/// @param recvVal recieved value
|
||||||
|
/// @return true if successful and false if fail
|
||||||
|
template<typename T>
|
||||||
|
bool distributeMasterToAll(const T& val, T& recvVal)
|
||||||
|
{
|
||||||
|
recvVal = val;
|
||||||
|
return CheckMPI(
|
||||||
|
Bcast(recvVal, localMasterNo(), localCommunicator()),
|
||||||
|
false);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// @brief values in the vector (size is equal to number of
|
||||||
|
// processors in local communicator) to each processor
|
||||||
|
template<typename T>
|
||||||
|
std::pair<T,bool> distributeMasterToAll(const procVector<T>& vals)
|
||||||
|
{
|
||||||
|
T val;
|
||||||
|
auto vec = vals.getSpan();
|
||||||
|
auto res = CheckMPI(
|
||||||
|
scatter(vec, val, localMasterNo(), localCommunicator()),
|
||||||
|
false);
|
||||||
|
|
||||||
|
return {val, res};
|
||||||
|
}
|
||||||
|
|
||||||
|
/// @brief Each processor in the local communicator calls this funtion with a value
|
||||||
|
/// and the values are distributed among all processors
|
||||||
|
template<typename T>
|
||||||
|
std::pair<procVector<T>, bool> collectAllToAll(const T& val)
|
||||||
|
{
|
||||||
|
procVector<T> allVec(processors_);
|
||||||
|
auto vec = allVec.getSpan();
|
||||||
|
auto res = CheckMPI(
|
||||||
|
allGather(val, vec, localCommunicator()),
|
||||||
|
false);
|
||||||
|
return {allVec, res};
|
||||||
|
}
|
||||||
|
|
||||||
|
/// @brief Each processor in the local communicator calls this funtion with a value
|
||||||
|
/// and the values are distributed among all processors
|
||||||
|
template<typename T>
|
||||||
|
bool collectAllToAll(const T& val, procVector<T>& allVec)
|
||||||
|
{
|
||||||
|
auto vec = allVec.getSpan();
|
||||||
|
return CheckMPI(
|
||||||
|
allGather(val, vec, localCommunicator()),
|
||||||
|
false);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// @brief Each processor in the local communicator calls this function with a value
|
||||||
|
/// and all values are collected in the master processor
|
||||||
|
template<typename T>
|
||||||
|
std::pair<procVector<T>,bool> collectAllToMaster(const T& val)
|
||||||
|
{
|
||||||
|
// only on master processor
|
||||||
|
procVector<T> masterVec(processors_, true);
|
||||||
|
|
||||||
|
auto masterSpan = masterVec.getSpan();
|
||||||
|
auto res = CheckMPI(
|
||||||
|
gather(val,masterSpan, localMasterNo(), localCommunicator()),
|
||||||
|
false);
|
||||||
|
|
||||||
|
return {masterVec, res};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
bool collectAllToMaster(const T& val, procVector<T>& masterVec)
|
||||||
|
{
|
||||||
|
// only on master processor
|
||||||
|
auto [vec, res] = collectAllToMaster(val);
|
||||||
|
masterVec = vec;
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
}; //procCommunication
|
||||||
|
|
||||||
|
} // pFlow::MPI
|
||||||
|
|
||||||
|
#endif //__procCommunication_hpp__
|
199
src/phasicFlow/MPIParallelization/MPI/procVector.hpp
Normal file
199
src/phasicFlow/MPIParallelization/MPI/procVector.hpp
Normal file
@ -0,0 +1,199 @@
|
|||||||
|
#ifndef __procVector_hpp__
|
||||||
|
#define __procVector_hpp__
|
||||||
|
|
||||||
|
// from PhasicFlow
|
||||||
|
|
||||||
|
#include "localProcessors.hpp"
|
||||||
|
#include "span.hpp"
|
||||||
|
#include "streams.hpp"
|
||||||
|
#include "IOPattern.hpp"
|
||||||
|
|
||||||
|
#include "mpiTypes.hpp"
|
||||||
|
|
||||||
|
namespace pFlow::MPI
|
||||||
|
{
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
class procVector
|
||||||
|
:
|
||||||
|
public std::vector<T>
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
|
||||||
|
using ProcVectorType = procVector<T>;
|
||||||
|
|
||||||
|
using VectorType = std::vector<T>;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
|
||||||
|
int rank_ = 0;
|
||||||
|
|
||||||
|
bool isMaster_ = false;
|
||||||
|
|
||||||
|
using VectorType::reserve;
|
||||||
|
|
||||||
|
using VectorType::resize;
|
||||||
|
|
||||||
|
using VectorType::assign;
|
||||||
|
|
||||||
|
using VectorType::clear;
|
||||||
|
|
||||||
|
using VectorType::erase;
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
procVector(
|
||||||
|
const localProcessors& procs,
|
||||||
|
bool onlyMaster = false)
|
||||||
|
:
|
||||||
|
rank_(procs.localRank()),
|
||||||
|
isMaster_(procs.localMaster())
|
||||||
|
{
|
||||||
|
|
||||||
|
if( onlyMaster && !isMaster_ ) return;
|
||||||
|
this->reserve(procs.localSize());
|
||||||
|
this->resize(procs.localSize());
|
||||||
|
}
|
||||||
|
|
||||||
|
procVector(
|
||||||
|
const T& val,
|
||||||
|
const localProcessors& procs,
|
||||||
|
bool onlyMaster = false)
|
||||||
|
:
|
||||||
|
procVector(procs, onlyMaster)
|
||||||
|
{
|
||||||
|
std::fill(this->begin(), this->end(), val);
|
||||||
|
}
|
||||||
|
|
||||||
|
procVector(const T& val, const procVector& src)
|
||||||
|
{
|
||||||
|
this->reserve(src.size());
|
||||||
|
this->resize(src.size());
|
||||||
|
std::fill(this->begin(), this->end(), val);
|
||||||
|
}
|
||||||
|
|
||||||
|
procVector(const localProcessors& procs, const VectorType& src)
|
||||||
|
:
|
||||||
|
procVector(procs)
|
||||||
|
{
|
||||||
|
if(src.size()!= this->size())
|
||||||
|
{
|
||||||
|
fatalErrorInFunction<<
|
||||||
|
"Size of std::vector and procVector does not match in construction"<<endl;
|
||||||
|
fatalExit;
|
||||||
|
}
|
||||||
|
|
||||||
|
this->assign(src.begin(), src.end());
|
||||||
|
}
|
||||||
|
|
||||||
|
procVector(const procVector&) = default;
|
||||||
|
|
||||||
|
procVector(procVector&&) = default;
|
||||||
|
|
||||||
|
procVector& operator=(const procVector&) = default;
|
||||||
|
|
||||||
|
procVector& operator=(procVector&&) = default;
|
||||||
|
|
||||||
|
procVector& operator=(const VectorType& src)
|
||||||
|
{
|
||||||
|
if(src.size() != this->size())
|
||||||
|
{
|
||||||
|
fatalErrorInFunction<<
|
||||||
|
"Size of std::vector and procVector does not match in copy assignment"<<endl;
|
||||||
|
fatalExit;
|
||||||
|
}
|
||||||
|
|
||||||
|
static_cast<VectorType&>(*this).operator=(src);
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
procVector& operator=(VectorType&& src)
|
||||||
|
{
|
||||||
|
if(src.size() != this->size())
|
||||||
|
{
|
||||||
|
fatalErrorInFunction<<
|
||||||
|
"Size of std::vector and procVector does not match in move assignment"
|
||||||
|
<<endl;
|
||||||
|
fatalExit;
|
||||||
|
}
|
||||||
|
|
||||||
|
static_cast<VectorType&>(*this).operator=(std::move(src));
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
procVector(const localProcessors& procs, VectorType&& src)
|
||||||
|
:
|
||||||
|
VectorType(std::move(src))
|
||||||
|
{
|
||||||
|
if(this->size()!= static_cast<size_t>(procs.localSize()))
|
||||||
|
{
|
||||||
|
fatalErrorInFunction<<
|
||||||
|
"Size of std::vector and procVector does not match in move"<<endl;
|
||||||
|
fatalExit;
|
||||||
|
}
|
||||||
|
isMaster_ = procs.localMaster();
|
||||||
|
rank_ = procs.localRank();
|
||||||
|
}
|
||||||
|
|
||||||
|
~procVector()=default;
|
||||||
|
|
||||||
|
inline
|
||||||
|
auto& thisValue()
|
||||||
|
{
|
||||||
|
return VectorType::operator[](rank_);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline
|
||||||
|
const auto& thisValue()const
|
||||||
|
{
|
||||||
|
return VectorType::operator[](rank_);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline
|
||||||
|
auto size()const
|
||||||
|
{
|
||||||
|
return VectorType::size();
|
||||||
|
}
|
||||||
|
|
||||||
|
inline
|
||||||
|
auto rank()const
|
||||||
|
{
|
||||||
|
return rank_;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline
|
||||||
|
auto getSpan()
|
||||||
|
{
|
||||||
|
return span<T>(this->data(), this->size());
|
||||||
|
}
|
||||||
|
|
||||||
|
inline
|
||||||
|
auto getSpan()const
|
||||||
|
{
|
||||||
|
return span<T>(const_cast<T*>(this->data()), this->size());
|
||||||
|
}
|
||||||
|
|
||||||
|
bool write(
|
||||||
|
iOstream& os,
|
||||||
|
const IOPattern& iop ) const
|
||||||
|
{
|
||||||
|
return writeStdVector(os, *this, iop);
|
||||||
|
}
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
inline iOstream& operator << (iOstream& os, const procVector<T>& ovec )
|
||||||
|
{
|
||||||
|
if( !ovec.write(os, IOPattern::AllProcessorsDifferent) )
|
||||||
|
{
|
||||||
|
ioErrorInFile(os.name(), os.lineNumber());
|
||||||
|
fatalExit;
|
||||||
|
}
|
||||||
|
return os;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#endif
|
@ -0,0 +1,158 @@
|
|||||||
|
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
pFlow::MPI::scatteredMasterDistribute<T>::scatteredMasterDistribute
|
||||||
|
(
|
||||||
|
const localProcessors& procs
|
||||||
|
)
|
||||||
|
:
|
||||||
|
procCommunication(procs),
|
||||||
|
indexedMap_(TypeNull, procs, true)
|
||||||
|
{
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
bool pFlow::MPI::scatteredMasterDistribute<T>::setDataMaps
|
||||||
|
(
|
||||||
|
procVector<span<uint32>>& maps
|
||||||
|
)
|
||||||
|
{
|
||||||
|
if(this->localMaster())
|
||||||
|
{
|
||||||
|
if(maps.size() != this->localSize() )
|
||||||
|
{
|
||||||
|
fatalErrorInFunction<<"size mismatch";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<int32> index;
|
||||||
|
|
||||||
|
freeIndexedMap();
|
||||||
|
|
||||||
|
for(auto proc = 0; proc< maps.size(); proc++)
|
||||||
|
{
|
||||||
|
auto m = maps[proc];
|
||||||
|
index.resize(m.size());
|
||||||
|
for(auto i=0; i<index.size(); i++ )
|
||||||
|
{
|
||||||
|
index[i] = m[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
DataType dt;
|
||||||
|
|
||||||
|
if(! typeCreateIndexedBlock<T>( makeSpan(index), dt))
|
||||||
|
{
|
||||||
|
fatalErrorInFunction;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
indexedMap_[proc] = dt;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
bool pFlow::MPI::scatteredMasterDistribute<T>::setDataMaps
|
||||||
|
(
|
||||||
|
procVector<span<int32>>& maps
|
||||||
|
)
|
||||||
|
{
|
||||||
|
if(this->localMaster())
|
||||||
|
{
|
||||||
|
if(maps.size() != this->localSize() )
|
||||||
|
{
|
||||||
|
fatalErrorInFunction<<"size mismatch";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
freeIndexedMap();
|
||||||
|
|
||||||
|
|
||||||
|
for(auto proc = 0; proc< maps.size(); proc++)
|
||||||
|
{
|
||||||
|
DataType dt;
|
||||||
|
if( !typeCreateIndexedBlock<T>(maps[proc], dt) )
|
||||||
|
{
|
||||||
|
fatalErrorInFunction;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
indexedMap_[proc] = dt;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
void pFlow::MPI::scatteredMasterDistribute<T>::freeIndexedMap()
|
||||||
|
{
|
||||||
|
for(auto i=0; i<indexedMap_.size(); i++)
|
||||||
|
{
|
||||||
|
if(indexedMap_[i]!= TypeNull)
|
||||||
|
{
|
||||||
|
TypeFree(&indexedMap_[i]);
|
||||||
|
indexedMap_[i] = TypeNull;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
bool pFlow::MPI::scatteredMasterDistribute<T>::distribute
|
||||||
|
(
|
||||||
|
span<T>& sendBuff,
|
||||||
|
span<T>& recvb
|
||||||
|
)
|
||||||
|
{
|
||||||
|
procVector<Request> requests(processors(), true);
|
||||||
|
procVector<Status> statuses(processors(), true);
|
||||||
|
|
||||||
|
if(this->localMaster())
|
||||||
|
{
|
||||||
|
bool res = true;
|
||||||
|
for(int32 i = indexedMap_.size()-1; i>=0; i--)
|
||||||
|
{
|
||||||
|
res = res&&CheckMPI(
|
||||||
|
MPI_Issend(
|
||||||
|
sendBuff.data(),
|
||||||
|
1,
|
||||||
|
indexedMap_[i],
|
||||||
|
i,
|
||||||
|
0,
|
||||||
|
localCommunicator(),
|
||||||
|
&requests[i]),
|
||||||
|
false);
|
||||||
|
}
|
||||||
|
|
||||||
|
if(!res)return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
Status stat;
|
||||||
|
bool sucss = CheckMPI(
|
||||||
|
MPI_Recv(
|
||||||
|
recvb.data(),
|
||||||
|
recvb.size()*sFactor<T>(),
|
||||||
|
Type<T>(),
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
localCommunicator(),
|
||||||
|
&stat),
|
||||||
|
false);
|
||||||
|
|
||||||
|
if(this->localMaster())
|
||||||
|
{
|
||||||
|
CheckMPI(
|
||||||
|
MPI_Waitall(requests.size(), requests.data(), statuses.data()),
|
||||||
|
false
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return sucss;
|
||||||
|
}
|
@ -0,0 +1,67 @@
|
|||||||
|
/*------------------------------- phasicFlow ---------------------------------
|
||||||
|
O C enter of
|
||||||
|
O O E ngineering and
|
||||||
|
O O M ultiscale modeling of
|
||||||
|
OOOOOOO F luid flow
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Copyright (C): www.cemf.ir
|
||||||
|
email: hamid.r.norouzi AT gmail.com
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Licence:
|
||||||
|
This file is part of phasicFlow code. It is a free software for simulating
|
||||||
|
granular and multiphase flows. You can redistribute it and/or modify it under
|
||||||
|
the terms of GNU General Public License v3 or any other later versions.
|
||||||
|
|
||||||
|
phasicFlow is distributed to help others in their research in the field of
|
||||||
|
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
|
||||||
|
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
|
||||||
|
-----------------------------------------------------------------------------*/
|
||||||
|
|
||||||
|
#ifndef __scatteredMasterDistribute_hpp__
|
||||||
|
#define __scatteredMasterDistribute_hpp__
|
||||||
|
|
||||||
|
#include "mpiCommunication.hpp"
|
||||||
|
#include "procCommunication.hpp"
|
||||||
|
#include "procVector.hpp"
|
||||||
|
#include "stdVectorHelper.hpp"
|
||||||
|
#include "streams.hpp"
|
||||||
|
|
||||||
|
namespace pFlow::MPI
|
||||||
|
{
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
class scatteredMasterDistribute : public procCommunication
|
||||||
|
{
|
||||||
|
protected:
|
||||||
|
|
||||||
|
procVector<DataType> indexedMap_;
|
||||||
|
|
||||||
|
void freeIndexedMap();
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
scatteredMasterDistribute(const localProcessors& procs);
|
||||||
|
|
||||||
|
~scatteredMasterDistribute()
|
||||||
|
{
|
||||||
|
freeIndexedMap();
|
||||||
|
}
|
||||||
|
|
||||||
|
scatteredMasterDistribute(const scatteredMasterDistribute&) = delete;
|
||||||
|
|
||||||
|
scatteredMasterDistribute& operator=(const scatteredMasterDistribute&) =
|
||||||
|
delete;
|
||||||
|
|
||||||
|
bool setDataMaps(procVector<span<uint32>>& maps);
|
||||||
|
|
||||||
|
bool setDataMaps(procVector<span<int32>>& maps);
|
||||||
|
|
||||||
|
bool distribute(span<T>& sendBuff, span<T>& recvb);
|
||||||
|
};
|
||||||
|
|
||||||
|
} // pFlow::MPI
|
||||||
|
|
||||||
|
#include "scatteredMasterDistribute.cpp"
|
||||||
|
|
||||||
|
#endif //__scatteredMasterDistribute_hpp__
|
@ -0,0 +1,166 @@
|
|||||||
|
|
||||||
|
#include "scatteredMasterDistributeChar.hpp"
|
||||||
|
|
||||||
|
pFlow::MPI::scatteredMasterDistribute<char>::scatteredMasterDistribute
|
||||||
|
(
|
||||||
|
size_t sizeOfElement,
|
||||||
|
const localProcessors& procs
|
||||||
|
)
|
||||||
|
:
|
||||||
|
procCommunication(procs),
|
||||||
|
indexedMap_(TypeNull, procs, true),
|
||||||
|
sizeOfElement_(sizeOfElement)
|
||||||
|
{}
|
||||||
|
|
||||||
|
|
||||||
|
bool pFlow::MPI::scatteredMasterDistribute<char>::setDataMaps
|
||||||
|
(
|
||||||
|
procVector<span<uint32>>& maps
|
||||||
|
)
|
||||||
|
{
|
||||||
|
if(this->localMaster())
|
||||||
|
{
|
||||||
|
if(maps.size() != this->localSize() )
|
||||||
|
{
|
||||||
|
fatalErrorInFunction<<"size mismatch";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
freeIndexedMap();
|
||||||
|
|
||||||
|
std::vector<MPI_Aint> index;
|
||||||
|
|
||||||
|
for(auto proc = 0; proc< maps.size(); proc++)
|
||||||
|
{
|
||||||
|
auto m = maps[proc];
|
||||||
|
index.resize(m.size());
|
||||||
|
for(auto i=0; i<index.size(); i++ )
|
||||||
|
{
|
||||||
|
index[i] = m[i]*sizeOfElement_;
|
||||||
|
}
|
||||||
|
|
||||||
|
DataType dt;
|
||||||
|
MPI_Type_create_hindexed_block(
|
||||||
|
m.size(),
|
||||||
|
sizeOfElement_,
|
||||||
|
index.data(),
|
||||||
|
MPI_BYTE,
|
||||||
|
&dt);
|
||||||
|
MPI_Type_commit(&dt);
|
||||||
|
|
||||||
|
indexedMap_[proc] = dt;
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool pFlow::MPI::scatteredMasterDistribute<char>::setDataMaps
|
||||||
|
(
|
||||||
|
procVector<span<int32>>& maps
|
||||||
|
)
|
||||||
|
{
|
||||||
|
if(this->localMaster())
|
||||||
|
{
|
||||||
|
if(maps.size() != this->localSize() )
|
||||||
|
{
|
||||||
|
fatalErrorInFunction<<"size mismatch";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<MPI_Aint> index;
|
||||||
|
freeIndexedMap();
|
||||||
|
|
||||||
|
for(auto proc = 0; proc< maps.size(); proc++)
|
||||||
|
{
|
||||||
|
|
||||||
|
auto m = maps[proc];
|
||||||
|
index.resize(m.size());
|
||||||
|
for(auto i=0; i<index.size(); i++ )
|
||||||
|
{
|
||||||
|
index[i] = m[i]*sizeOfElement_;
|
||||||
|
}
|
||||||
|
|
||||||
|
DataType dt;
|
||||||
|
MPI_Type_create_hindexed_block(
|
||||||
|
index.size(),
|
||||||
|
sizeOfElement_,
|
||||||
|
index.data(),
|
||||||
|
MPI_CHAR,
|
||||||
|
&dt);
|
||||||
|
MPI_Type_commit(&dt);
|
||||||
|
|
||||||
|
indexedMap_[proc] = dt;
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void pFlow::MPI::scatteredMasterDistribute<char>::freeIndexedMap()
|
||||||
|
{
|
||||||
|
for(auto i=0; i<indexedMap_.size(); i++)
|
||||||
|
{
|
||||||
|
if(indexedMap_[i]!= TypeNull)
|
||||||
|
{
|
||||||
|
TypeFree(&indexedMap_[i]);
|
||||||
|
indexedMap_[i] = TypeNull;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool pFlow::MPI::scatteredMasterDistribute<char>::distribute
|
||||||
|
(
|
||||||
|
span<char>& sendBuff,
|
||||||
|
span<char>& recvb
|
||||||
|
)
|
||||||
|
{
|
||||||
|
procVector<Request> requests(processors(), true);
|
||||||
|
procVector<Status> statuses(processors(), true);
|
||||||
|
|
||||||
|
|
||||||
|
if(this->localMaster())
|
||||||
|
{
|
||||||
|
bool res = true;
|
||||||
|
for(int32 i = indexedMap_.size()-1; i>=0; i--)
|
||||||
|
{
|
||||||
|
res = res&&CheckMPI(
|
||||||
|
MPI_Issend(
|
||||||
|
sendBuff.data(),
|
||||||
|
1,
|
||||||
|
indexedMap_[i],
|
||||||
|
i,
|
||||||
|
0,
|
||||||
|
localCommunicator(),
|
||||||
|
&requests[i]),
|
||||||
|
false);
|
||||||
|
}
|
||||||
|
|
||||||
|
if(!res)return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
Status stat;
|
||||||
|
bool sucss = CheckMPI(
|
||||||
|
MPI_Recv(
|
||||||
|
recvb.data(),
|
||||||
|
recvb.size(),
|
||||||
|
MPI_CHAR,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
localCommunicator(),
|
||||||
|
&stat),
|
||||||
|
true);
|
||||||
|
|
||||||
|
if(this->localMaster())
|
||||||
|
{
|
||||||
|
CheckMPI(
|
||||||
|
MPI_Waitall(requests.size(), requests.data(), statuses.data()),
|
||||||
|
false
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return sucss;
|
||||||
|
}
|
@ -0,0 +1,66 @@
|
|||||||
|
/*------------------------------- phasicFlow ---------------------------------
|
||||||
|
O C enter of
|
||||||
|
O O E ngineering and
|
||||||
|
O O M ultiscale modeling of
|
||||||
|
OOOOOOO F luid flow
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Copyright (C): www.cemf.ir
|
||||||
|
email: hamid.r.norouzi AT gmail.com
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Licence:
|
||||||
|
This file is part of phasicFlow code. It is a free software for simulating
|
||||||
|
granular and multiphase flows. You can redistribute it and/or modify it under
|
||||||
|
the terms of GNU General Public License v3 or any other later versions.
|
||||||
|
|
||||||
|
phasicFlow is distributed to help others in their research in the field of
|
||||||
|
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
|
||||||
|
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
|
||||||
|
-----------------------------------------------------------------------------*/
|
||||||
|
|
||||||
|
#ifndef __scatteredMasterDistributeChar_hpp__
|
||||||
|
#define __scatteredMasterDistributeChar_hpp__
|
||||||
|
|
||||||
|
#include "scatteredMasterDistribute.hpp"
|
||||||
|
|
||||||
|
namespace pFlow::MPI
|
||||||
|
{
|
||||||
|
|
||||||
|
template<>
|
||||||
|
class scatteredMasterDistribute<char> : public procCommunication
|
||||||
|
{
|
||||||
|
protected:
|
||||||
|
|
||||||
|
procVector<DataType> indexedMap_;
|
||||||
|
|
||||||
|
size_t sizeOfElement_;
|
||||||
|
|
||||||
|
void freeIndexedMap();
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
scatteredMasterDistribute(
|
||||||
|
size_t sizeOfElement,
|
||||||
|
const localProcessors& procs
|
||||||
|
);
|
||||||
|
|
||||||
|
~scatteredMasterDistribute()
|
||||||
|
{
|
||||||
|
freeIndexedMap();
|
||||||
|
}
|
||||||
|
|
||||||
|
scatteredMasterDistribute(const scatteredMasterDistribute&) = delete;
|
||||||
|
|
||||||
|
scatteredMasterDistribute& operator=(const scatteredMasterDistribute&) =
|
||||||
|
delete;
|
||||||
|
|
||||||
|
bool setDataMaps(procVector<span<uint32>>& maps);
|
||||||
|
|
||||||
|
bool setDataMaps(procVector<span<int32>>& maps);
|
||||||
|
|
||||||
|
bool distribute(span<char>& sendBuff, span<char>& recvb);
|
||||||
|
};
|
||||||
|
|
||||||
|
} // pFlow::MPI
|
||||||
|
|
||||||
|
#endif //__scatteredMasterDistributeChar_hpp__
|
52
src/phasicFlow/MPIParallelization/dataIOMPI/dataIOMPI.cpp
Normal file
52
src/phasicFlow/MPIParallelization/dataIOMPI/dataIOMPI.cpp
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
|
||||||
|
template<typename T>
|
||||||
|
bool pFlow::MPI::dataIOMPI<T>::gatherData(span<T> data )
|
||||||
|
{
|
||||||
|
|
||||||
|
if(this->ioPattern_.isAllProcessorsDifferent())
|
||||||
|
{
|
||||||
|
this->bufferSpan_ = data;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if( this->ioPattern_.isMasterProcessorDistribute())
|
||||||
|
{
|
||||||
|
|
||||||
|
auto gatherT = pFlow::MPI::gatherMaster<T>(pFlowProcessors());
|
||||||
|
|
||||||
|
if(!gatherT.gatherData(data))
|
||||||
|
{
|
||||||
|
fatalErrorInFunction<<"Error in gathering data to master"<<endl;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
this->buffer_ = gatherT.moveData();
|
||||||
|
this->bufferSpan_ = span<T>(this->buffer_.data(),this->buffer_.size() );
|
||||||
|
|
||||||
|
return true;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if( this->ioPattern_.isMasterProcessorOnly() || this->ioPattern_.isAllProcessorSimilar() )
|
||||||
|
{
|
||||||
|
if( this->ioPattern_.isMaster() )
|
||||||
|
{
|
||||||
|
this->bufferSpan_ = data;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
this->bufferSpan_ = span<T>(nullptr, 0);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
pFlow::MPI::dataIOMPI<T>::dataIOMPI(const IOPattern& iop)
|
||||||
|
:
|
||||||
|
dataIO<T>(iop)
|
||||||
|
{}
|
58
src/phasicFlow/MPIParallelization/dataIOMPI/dataIOMPI.hpp
Normal file
58
src/phasicFlow/MPIParallelization/dataIOMPI/dataIOMPI.hpp
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
#ifndef __datIOMPI_hpp__
|
||||||
|
#define __datIOMPI_hpp__
|
||||||
|
|
||||||
|
#include "dataIO.hpp"
|
||||||
|
#include "pFlowProcessors.hpp"
|
||||||
|
#include "gatherMaster.hpp"
|
||||||
|
|
||||||
|
|
||||||
|
namespace pFlow::MPI
|
||||||
|
{
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
class dataIOMPI
|
||||||
|
:
|
||||||
|
public dataIO<T>
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
|
||||||
|
using DataIOType = dataIO<T>;
|
||||||
|
|
||||||
|
using DataIOMPIType = dataIOMPI<T>;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
|
||||||
|
bool gatherData(span<T> data ) override;
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
TypeInfoTemplate111("dataIO",T,"MPI");
|
||||||
|
|
||||||
|
explicit dataIOMPI(const IOPattern& iop);
|
||||||
|
|
||||||
|
dataIOMPI(const dataIOMPI&) = default;
|
||||||
|
|
||||||
|
dataIOMPI(dataIOMPI&&) = default;
|
||||||
|
|
||||||
|
|
||||||
|
dataIOMPI& operator=(const dataIOMPI&) = default;
|
||||||
|
|
||||||
|
dataIOMPI& operator=(dataIOMPI&&) = default;
|
||||||
|
|
||||||
|
~dataIOMPI() = default;
|
||||||
|
|
||||||
|
add_vCtor
|
||||||
|
(
|
||||||
|
DataIOType,
|
||||||
|
DataIOMPIType,
|
||||||
|
IOPattern
|
||||||
|
);
|
||||||
|
|
||||||
|
}; //dataIOMPI
|
||||||
|
|
||||||
|
|
||||||
|
} //namespace pFlow::MPI
|
||||||
|
|
||||||
|
#include "dataIOMPI.cpp"
|
||||||
|
|
||||||
|
#endif //__datIOMPI_hpp__
|
27
src/phasicFlow/MPIParallelization/dataIOMPI/dataIOMPIs.cpp
Normal file
27
src/phasicFlow/MPIParallelization/dataIOMPI/dataIOMPIs.cpp
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
|
||||||
|
#include "types.hpp"
|
||||||
|
#include "dataIOMPI.hpp"
|
||||||
|
|
||||||
|
|
||||||
|
template class pFlow::MPI::dataIOMPI<pFlow::uint8>;
|
||||||
|
|
||||||
|
template class pFlow::MPI::dataIOMPI<pFlow::int8>;
|
||||||
|
|
||||||
|
template class pFlow::MPI::dataIOMPI<pFlow::int32>;
|
||||||
|
|
||||||
|
template class pFlow::MPI::dataIOMPI<pFlow::int64>;
|
||||||
|
|
||||||
|
template class pFlow::MPI::dataIOMPI<pFlow::uint32>;
|
||||||
|
template class pFlow::MPI::dataIOMPI<pFlow::uint32x3>;
|
||||||
|
|
||||||
|
template class pFlow::MPI::dataIOMPI<pFlow::uint64>;
|
||||||
|
|
||||||
|
template class pFlow::MPI::dataIOMPI<pFlow::size_t>;
|
||||||
|
|
||||||
|
template class pFlow::MPI::dataIOMPI<pFlow::real>;
|
||||||
|
|
||||||
|
template class pFlow::MPI::dataIOMPI<pFlow::realx3>;
|
||||||
|
|
||||||
|
template class pFlow::MPI::dataIOMPI<pFlow::realx4>;
|
||||||
|
|
||||||
|
template class pFlow::MPI::dataIOMPI<pFlow::word>;
|
450
src/phasicFlow/MPIParallelization/domain/MPISimulationDomain.cpp
Normal file
450
src/phasicFlow/MPIParallelization/domain/MPISimulationDomain.cpp
Normal file
@ -0,0 +1,450 @@
|
|||||||
|
/*------------------------------- phasicFlow ---------------------------------
|
||||||
|
O C enter of
|
||||||
|
O O E ngineering and
|
||||||
|
O O M ultiscale modeling of
|
||||||
|
OOOOOOO F luid flow
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Copyright (C): www.cemf.ir
|
||||||
|
email: hamid.r.norouzi AT gmail.com
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Licence:
|
||||||
|
This file is part of phasicFlow code. It is a free software for simulating
|
||||||
|
granular and multiphase flows. You can redistribute it and/or modify it under
|
||||||
|
the terms of GNU General Public License v3 or any other later versions.
|
||||||
|
|
||||||
|
phasicFlow is distributed to help others in their research in the field of
|
||||||
|
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
|
||||||
|
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
|
||||||
|
-----------------------------------------------------------------------------*/
|
||||||
|
|
||||||
|
#include "MPISimulationDomain.hpp"
|
||||||
|
#include "systemControl.hpp"
|
||||||
|
#include "rcb1DPartitioning.hpp"
|
||||||
|
#include "scatteredMasterDistribute.hpp"
|
||||||
|
#include "scatteredMasterDistributeChar.hpp"
|
||||||
|
|
||||||
|
pFlow::MPI::MPISimulationDomain::MPISimulationDomain(systemControl& control)
|
||||||
|
:
|
||||||
|
simulationDomain(control),
|
||||||
|
communication_(pFlowProcessors()),
|
||||||
|
subDomainsAll_(pFlowProcessors()),
|
||||||
|
numPointsAll_(pFlowProcessors()),
|
||||||
|
domainPartitioning_( makeUnique<rcb1DPartitioning>(subDict("decomposition"), globalBox()))
|
||||||
|
{}
|
||||||
|
|
||||||
|
bool pFlow::MPI::MPISimulationDomain::createBoundaryDicts()
|
||||||
|
{
|
||||||
|
auto& boundaries = this->subDict("boundaries");
|
||||||
|
|
||||||
|
this->addDict("MPIBoundaries", boundaries);
|
||||||
|
auto& mpiBoundaries = this->subDict("MPIBoundaries");
|
||||||
|
|
||||||
|
real neighborLength = boundaries.getVal<real>("neighborLength");
|
||||||
|
|
||||||
|
auto neighbors = findPlaneNeighbors();
|
||||||
|
|
||||||
|
for(uint32 i=0; i<sizeOfBoundaries(); i++)
|
||||||
|
{
|
||||||
|
word bName = bundaryName(i);
|
||||||
|
if( !boundaries.containsDictionay(bName) )
|
||||||
|
{
|
||||||
|
fatalErrorInFunction<<"dictionary "<< bName<<
|
||||||
|
"does not exist in "<< boundaries.globalName()<<endl;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
auto& bDict = mpiBoundaries.subDict(bName);
|
||||||
|
|
||||||
|
if(!bDict.addOrKeep("neighborLength", neighborLength))
|
||||||
|
{
|
||||||
|
fatalErrorInFunction<<"error in adding neighborLength to "<< bName <<
|
||||||
|
"in dictionary "<< boundaries.globalName()<<endl;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if( thisDomainActive_ )
|
||||||
|
{
|
||||||
|
if( neighbors[i] == -1 )
|
||||||
|
{
|
||||||
|
bDict.add("neighborProcessorNo", processors::globalRank());
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
bDict.add("neighborProcessorNo", neighbors[i]);
|
||||||
|
bDict.addOrReplace("type", "processor");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
bDict.add("neighborProcessorNo", processors::globalRank());
|
||||||
|
bDict.addOrReplace("type", "none");
|
||||||
|
}
|
||||||
|
|
||||||
|
if( bDict.getVal<word>("type") == "periodic")
|
||||||
|
{
|
||||||
|
fatalErrorInFunction<<
|
||||||
|
"periodic is not implemented "<<endl;
|
||||||
|
fatalExit;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool pFlow::MPI::MPISimulationDomain::setThisDomain()
|
||||||
|
{
|
||||||
|
thisDomain_ = domain(domainPartitioning_->localBox());
|
||||||
|
uint32 thisNumPoints = initialNumberInThis();
|
||||||
|
|
||||||
|
if(!communication_.collectAllToAll(thisNumPoints, numPointsAll_))
|
||||||
|
{
|
||||||
|
fatalErrorInFunction<<
|
||||||
|
"Failed to distribute number of points."<<endl;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
uint32 allNumPoints = std::accumulate(numPointsAll_.begin(), numPointsAll_.end(), 0u);
|
||||||
|
|
||||||
|
if( thisNumPoints != 0u )
|
||||||
|
{
|
||||||
|
thisDomainActive_ = true;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if(communication_.localMaster()&& allNumPoints == 0u)
|
||||||
|
thisDomainActive_ = true;
|
||||||
|
else
|
||||||
|
thisDomainActive_ = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if( thisDomainActive_ )
|
||||||
|
{
|
||||||
|
bool allInactive = true;
|
||||||
|
for(int32 i=0; i<communication_.localSize(); i++ )
|
||||||
|
{
|
||||||
|
if(i == communication_.localRank() )continue;
|
||||||
|
if(numPointsAll_[i]!=0)
|
||||||
|
{
|
||||||
|
allInactive = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if(allInactive)
|
||||||
|
{
|
||||||
|
thisDomain_ = domain(globalBox());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if(!communication_.collectAllToAll(thisDomain_, subDomainsAll_))
|
||||||
|
{
|
||||||
|
fatalErrorInFunction<< "Failed to distributed domains"<<endl;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<int> pFlow::MPI::MPISimulationDomain::findPlaneNeighbors() const
|
||||||
|
{
|
||||||
|
|
||||||
|
std::vector<int> neighbors(sizeOfBoundaries(), -2);
|
||||||
|
domain gDomain(globalBox());
|
||||||
|
|
||||||
|
// left
|
||||||
|
if( thisDomain_.left().parallelTouch( gDomain.left() ) )
|
||||||
|
{
|
||||||
|
neighbors[0] = -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
for(int i=0; i<subDomainsAll_.size(); i++)
|
||||||
|
{
|
||||||
|
if(i == subDomainsAll_.rank())continue;
|
||||||
|
|
||||||
|
if( thisDomain_.left().parallelTouch(
|
||||||
|
subDomainsAll_[i].right()) )
|
||||||
|
{
|
||||||
|
neighbors[0] = i;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// right
|
||||||
|
if( thisDomain_.right().parallelTouch( gDomain.right() ) )
|
||||||
|
{
|
||||||
|
neighbors[1] = -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
for(int i=0; i<subDomainsAll_.size(); i++)
|
||||||
|
{
|
||||||
|
|
||||||
|
if(i == subDomainsAll_.rank())continue;
|
||||||
|
|
||||||
|
if( thisDomain_.right().parallelTouch(
|
||||||
|
subDomainsAll_[i].left()) )
|
||||||
|
{
|
||||||
|
neighbors[1] = i;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// bottom
|
||||||
|
if( thisDomain_.bottom().parallelTouch( gDomain.bottom() ) )
|
||||||
|
{
|
||||||
|
neighbors[2] = -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
for(int i=0; i<subDomainsAll_.size(); i++)
|
||||||
|
{
|
||||||
|
if(i == subDomainsAll_.rank())continue;
|
||||||
|
|
||||||
|
if( thisDomain_.bottom().parallelTouch(
|
||||||
|
subDomainsAll_[i].top()) )
|
||||||
|
{
|
||||||
|
neighbors[2] = i;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// top
|
||||||
|
if( thisDomain_.top().parallelTouch( gDomain.top() ) )
|
||||||
|
{
|
||||||
|
neighbors[3] = -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
for(int i=0; i<subDomainsAll_.size(); i++)
|
||||||
|
{
|
||||||
|
if(i == subDomainsAll_.rank())continue;
|
||||||
|
|
||||||
|
if( thisDomain_.top().parallelTouch(
|
||||||
|
subDomainsAll_[i].bottom()) )
|
||||||
|
{
|
||||||
|
neighbors[3] = i;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// rear
|
||||||
|
if( thisDomain_.rear().parallelTouch( gDomain.rear() ) )
|
||||||
|
{
|
||||||
|
neighbors[4] = -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
for(int i=0; i<subDomainsAll_.size(); i++)
|
||||||
|
{
|
||||||
|
if(i == subDomainsAll_.rank())continue;
|
||||||
|
|
||||||
|
if( thisDomain_.rear().parallelTouch(
|
||||||
|
subDomainsAll_[i].front()) )
|
||||||
|
{
|
||||||
|
neighbors[4] = i;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// front
|
||||||
|
if( thisDomain_.front().parallelTouch( gDomain.front() ) )
|
||||||
|
{
|
||||||
|
neighbors[5] = -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
for(int i=0; i<subDomainsAll_.size(); i++)
|
||||||
|
{
|
||||||
|
if(i == subDomainsAll_.rank())continue;
|
||||||
|
|
||||||
|
if( thisDomain_.front().parallelTouch(
|
||||||
|
subDomainsAll_[i].rear()) )
|
||||||
|
{
|
||||||
|
neighbors[5] = i;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return neighbors;
|
||||||
|
}
|
||||||
|
|
||||||
|
const pFlow::dictionary &
|
||||||
|
pFlow::MPI::MPISimulationDomain::thisBoundaryDict() const
|
||||||
|
{
|
||||||
|
return this->subDict("MPIBoundaries");
|
||||||
|
}
|
||||||
|
|
||||||
|
bool pFlow::MPI::MPISimulationDomain::initialUpdateDomains(span<realx3> pointPos)
|
||||||
|
{
|
||||||
|
pFlagTypeHost flags(pointPos.size(), 0 , pointPos.size());
|
||||||
|
initialNumPoints_ = pointPos.size();
|
||||||
|
if( !domainPartitioning_->partition(pointPos, flags) )
|
||||||
|
{
|
||||||
|
fatalErrorInFunction<<
|
||||||
|
"Point partitioning failed."<<endl;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(!setThisDomain()) return false;
|
||||||
|
|
||||||
|
if(!createBoundaryDicts()) return false;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
pFlow::uint32 pFlow::MPI::MPISimulationDomain::initialNumberInThis() const
|
||||||
|
{
|
||||||
|
uint32 numImport = domainPartitioning_->numberImportThisProc();
|
||||||
|
uint32 numExport = domainPartitioning_->numberExportThisProc();
|
||||||
|
return max(initialNumPoints_+ numImport - numExport, 0u);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool pFlow::MPI::MPISimulationDomain::initialTransferBlockData
|
||||||
|
(
|
||||||
|
span<char> src,
|
||||||
|
span<char> dst,
|
||||||
|
size_t sizeOfElement
|
||||||
|
)const
|
||||||
|
{
|
||||||
|
MPI::scatteredMasterDistribute<char> dataDist(sizeOfElement, pFlowProcessors());
|
||||||
|
|
||||||
|
auto lists = domainPartitioning_->allExportLists();
|
||||||
|
|
||||||
|
if(!dataDist.setDataMaps( lists ))
|
||||||
|
{
|
||||||
|
fatalErrorInFunction;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(!dataDist.distribute(src, dst))
|
||||||
|
{
|
||||||
|
fatalErrorInFunction<<
|
||||||
|
"Error in distribute"<<endl;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool pFlow::MPI::MPISimulationDomain::initialTransferBlockData
|
||||||
|
(
|
||||||
|
span<realx3> src,
|
||||||
|
span<realx3> dst
|
||||||
|
)const
|
||||||
|
{
|
||||||
|
|
||||||
|
MPI::scatteredMasterDistribute<realx3>
|
||||||
|
dataDist(pFlowProcessors());
|
||||||
|
auto lists = domainPartitioning_->allExportLists();
|
||||||
|
|
||||||
|
if(!dataDist.setDataMaps( lists ))
|
||||||
|
{
|
||||||
|
fatalErrorInFunction;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(!dataDist.distribute(src, dst))
|
||||||
|
{
|
||||||
|
fatalErrorInFunction<<
|
||||||
|
"Error in distribute"<<endl;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool pFlow::MPI::MPISimulationDomain::initialTransferBlockData
|
||||||
|
(
|
||||||
|
span<real> src,
|
||||||
|
span<real> dst
|
||||||
|
)const
|
||||||
|
{
|
||||||
|
MPI::scatteredMasterDistribute<real>
|
||||||
|
dataDist(pFlowProcessors());
|
||||||
|
|
||||||
|
auto lists = domainPartitioning_->allExportLists();
|
||||||
|
|
||||||
|
if(!dataDist.setDataMaps( lists ))
|
||||||
|
{
|
||||||
|
fatalErrorInFunction;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(!dataDist.distribute(src, dst))
|
||||||
|
{
|
||||||
|
fatalErrorInFunction<<
|
||||||
|
"Error in distribute"<<endl;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool pFlow::MPI::MPISimulationDomain::initialTransferBlockData
|
||||||
|
(
|
||||||
|
span<uint32> src,
|
||||||
|
span<uint32> dst
|
||||||
|
)const
|
||||||
|
{
|
||||||
|
MPI::scatteredMasterDistribute<uint32>
|
||||||
|
dataDist(pFlowProcessors());
|
||||||
|
|
||||||
|
auto lists = domainPartitioning_->allExportLists();
|
||||||
|
|
||||||
|
if(!dataDist.setDataMaps( lists ))
|
||||||
|
{
|
||||||
|
fatalErrorInFunction;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(!dataDist.distribute(src, dst))
|
||||||
|
{
|
||||||
|
fatalErrorInFunction<<
|
||||||
|
"Error in distribute"<<endl;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool pFlow::MPI::MPISimulationDomain::initialTransferBlockData
|
||||||
|
(
|
||||||
|
span<int32> src,
|
||||||
|
span<int32> dst
|
||||||
|
)const
|
||||||
|
{
|
||||||
|
MPI::scatteredMasterDistribute<int32>
|
||||||
|
dataDist(pFlowProcessors());
|
||||||
|
|
||||||
|
auto lists = domainPartitioning_->allExportLists();
|
||||||
|
|
||||||
|
if(!dataDist.setDataMaps( lists ))
|
||||||
|
{
|
||||||
|
fatalErrorInFunction;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(!dataDist.distribute(src, dst))
|
||||||
|
{
|
||||||
|
fatalErrorInFunction<<
|
||||||
|
"Error in distribute"<<endl;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
pFlow::uint32 pFlow::MPI::MPISimulationDomain::numberToBeImported() const
|
||||||
|
{
|
||||||
|
return domainPartitioning_->numberImportThisProc();
|
||||||
|
}
|
||||||
|
|
||||||
|
pFlow::uint32 pFlow::MPI::MPISimulationDomain::numberToBeExported() const
|
||||||
|
{
|
||||||
|
return domainPartitioning_->numberExportThisProc();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
pFlow::MPI::MPISimulationDomain::domainActive() const
|
||||||
|
{
|
||||||
|
return thisDomainActive_;
|
||||||
|
}
|
||||||
|
|
||||||
|
const pFlow::domain&
|
||||||
|
pFlow::MPI::MPISimulationDomain::thisDomain() const
|
||||||
|
{
|
||||||
|
return thisDomain_;
|
||||||
|
}
|
118
src/phasicFlow/MPIParallelization/domain/MPISimulationDomain.hpp
Normal file
118
src/phasicFlow/MPIParallelization/domain/MPISimulationDomain.hpp
Normal file
@ -0,0 +1,118 @@
|
|||||||
|
/*------------------------------- phasicFlow ---------------------------------
|
||||||
|
O C enter of
|
||||||
|
O O E ngineering and
|
||||||
|
O O M ultiscale modeling of
|
||||||
|
OOOOOOO F luid flow
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Copyright (C): www.cemf.ir
|
||||||
|
email: hamid.r.norouzi AT gmail.com
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Licence:
|
||||||
|
This file is part of phasicFlow code. It is a free software for simulating
|
||||||
|
granular and multiphase flows. You can redistribute it and/or modify it under
|
||||||
|
the terms of GNU General Public License v3 or any other later versions.
|
||||||
|
|
||||||
|
phasicFlow is distributed to help others in their research in the field of
|
||||||
|
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
|
||||||
|
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
|
||||||
|
-----------------------------------------------------------------------------*/
|
||||||
|
#ifndef __MPISimulationDomain_hpp__
|
||||||
|
#define __MPISimulationDomain_hpp__
|
||||||
|
|
||||||
|
#include "partitioning.hpp"
|
||||||
|
#include "procCommunication.hpp"
|
||||||
|
#include "procVector.hpp"
|
||||||
|
#include "simulationDomain.hpp"
|
||||||
|
|
||||||
|
namespace pFlow::MPI
|
||||||
|
{
|
||||||
|
|
||||||
|
class MPISimulationDomain : public simulationDomain
|
||||||
|
{
|
||||||
|
private:
|
||||||
|
|
||||||
|
/// a processor communcator for simulation domain
|
||||||
|
procCommunication communication_;
|
||||||
|
|
||||||
|
/// sub-domain (thisDomain_ for all processors)
|
||||||
|
procVector<domain> subDomainsAll_;
|
||||||
|
|
||||||
|
/// number of points in all processors
|
||||||
|
procVector<uint32> numPointsAll_;
|
||||||
|
|
||||||
|
/// partitioning object
|
||||||
|
uniquePtr<partitioning> domainPartitioning_ = nullptr;
|
||||||
|
|
||||||
|
/// the acutal limits of the simulation domain in this processor
|
||||||
|
domain thisDomain_;
|
||||||
|
|
||||||
|
uint32 initialNumPoints_ = 0;
|
||||||
|
|
||||||
|
bool thisDomainActive_ = false;
|
||||||
|
|
||||||
|
bool createBoundaryDicts() final;
|
||||||
|
|
||||||
|
bool setThisDomain() final;
|
||||||
|
|
||||||
|
std::vector<int> findPlaneNeighbors() const;
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
TypeInfo("simulationDomain<MPI>");
|
||||||
|
|
||||||
|
explicit MPISimulationDomain(systemControl& control);
|
||||||
|
|
||||||
|
~MPISimulationDomain() final = default;
|
||||||
|
|
||||||
|
add_vCtor
|
||||||
|
(
|
||||||
|
simulationDomain,
|
||||||
|
MPISimulationDomain,
|
||||||
|
systemControl
|
||||||
|
);
|
||||||
|
|
||||||
|
const dictionary& thisBoundaryDict() const final;
|
||||||
|
|
||||||
|
/// @brief
|
||||||
|
/// @param pointPos
|
||||||
|
/// @return
|
||||||
|
bool initialUpdateDomains(span<realx3> pointPos) final;
|
||||||
|
|
||||||
|
/// @brief
|
||||||
|
/// @return
|
||||||
|
uint32 initialNumberInThis() const final;
|
||||||
|
|
||||||
|
bool initialTransferBlockData(
|
||||||
|
span<char> src,
|
||||||
|
span<char> dst,
|
||||||
|
size_t sizeOfElement
|
||||||
|
) const final;
|
||||||
|
|
||||||
|
bool initialTransferBlockData(span<realx3> src, span<realx3> dst)
|
||||||
|
const final;
|
||||||
|
|
||||||
|
bool initialTransferBlockData(span<real> src, span<real> dst)
|
||||||
|
const final;
|
||||||
|
|
||||||
|
bool initialTransferBlockData(span<uint32> src, span<uint32> dst)
|
||||||
|
const final;
|
||||||
|
|
||||||
|
bool initialTransferBlockData(span<int32> src, span<int32> dst)
|
||||||
|
const final;
|
||||||
|
|
||||||
|
uint32 numberToBeImported() const final;
|
||||||
|
|
||||||
|
uint32 numberToBeExported() const final;
|
||||||
|
|
||||||
|
/// @brief Is this domain active?
|
||||||
|
/// Active mean, there is particle in it and
|
||||||
|
/// boundaries and other entities of simulation domains are valid
|
||||||
|
bool domainActive() const final;
|
||||||
|
|
||||||
|
const domain& thisDomain()const final;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace pFlow::MPI
|
||||||
|
|
||||||
|
#endif //
|
@ -0,0 +1,113 @@
|
|||||||
|
|
||||||
|
|
||||||
|
#include "partitioning.hpp"
|
||||||
|
#include "error.hpp"
|
||||||
|
#include "streams.hpp"
|
||||||
|
|
||||||
|
void pFlow::partitioning::freeZoltan()
|
||||||
|
{
|
||||||
|
if(validPointers_)
|
||||||
|
{
|
||||||
|
Zoltan::LB_Free_Part(&importGlobalGids_, &importLocalGids_,
|
||||||
|
&importProcs_, &importToPart_);
|
||||||
|
|
||||||
|
Zoltan::LB_Free_Part(&exportGlobalGids_, &exportLocalGids_,
|
||||||
|
&exportProcs_, &exportToPart_);
|
||||||
|
validPointers_ = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
zoltan_.release();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
pFlow::partitioning::partitioning
|
||||||
|
(
|
||||||
|
const dictionary& dict,
|
||||||
|
const box& globalBox
|
||||||
|
)
|
||||||
|
:
|
||||||
|
globalBox_(globalBox)
|
||||||
|
{
|
||||||
|
if(!zoltanInitialized__)
|
||||||
|
{
|
||||||
|
auto rc = Zoltan_Initialize
|
||||||
|
(
|
||||||
|
processors::argc(),
|
||||||
|
processors::argv(),
|
||||||
|
&version_
|
||||||
|
);
|
||||||
|
|
||||||
|
if (rc != ZOLTAN_OK)
|
||||||
|
{
|
||||||
|
fatalErrorInFunction<<"Cannot initialize zoltan"<<endl;
|
||||||
|
fatalExit;
|
||||||
|
}
|
||||||
|
zoltanInitialized__ = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Creates Zoltan object
|
||||||
|
zoltan_ = std::make_unique<Zoltan>(pFlowProcessors().localCommunicator());
|
||||||
|
|
||||||
|
zoltan_->Set_Param("DEBUG_LEVEL", "0");
|
||||||
|
zoltan_->Set_Param("LB_METHOD", "RCB");
|
||||||
|
zoltan_->Set_Param("NUM_GID_ENTRIES", "1");
|
||||||
|
zoltan_->Set_Param("NUM_LID_ENTRIES", "1");
|
||||||
|
zoltan_->Set_Param("OBJ_WEIGHT_DIM", "0");
|
||||||
|
zoltan_->Set_Param("RETURN_LISTS", "ALL");
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
bool pFlow::partitioning::partition(span<realx3> points, pFlagTypeHost flags)
|
||||||
|
{
|
||||||
|
pointCollection pointCollctn{points, flags};
|
||||||
|
|
||||||
|
return partition(pointCollctn);
|
||||||
|
}
|
||||||
|
int GetObjectSize
|
||||||
|
(
|
||||||
|
void *data,
|
||||||
|
int num_gid_entries,
|
||||||
|
int num_lid_entries,
|
||||||
|
ZOLTAN_ID_PTR global_id,
|
||||||
|
ZOLTAN_ID_PTR local_id,
|
||||||
|
int *ierr
|
||||||
|
)
|
||||||
|
{
|
||||||
|
*ierr = ZOLTAN_OK;
|
||||||
|
pFlow::uint32 s = *(static_cast<pFlow::uint32*>(data));
|
||||||
|
return static_cast<int>(s);
|
||||||
|
}
|
||||||
|
|
||||||
|
void PackObject
|
||||||
|
(
|
||||||
|
void *data,
|
||||||
|
int num_gid_entries,
|
||||||
|
int num_lid_entries,
|
||||||
|
ZOLTAN_ID_PTR global_id,
|
||||||
|
ZOLTAN_ID_PTR local_id,
|
||||||
|
int dest,
|
||||||
|
int size,
|
||||||
|
char *buf,
|
||||||
|
int *ierr
|
||||||
|
)
|
||||||
|
{
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
bool pFlow::partitioning::migrateData(span<char> src, span<char> dst, uint32 elementSize)
|
||||||
|
{
|
||||||
|
dataCollection data{src, dst, elementSize};
|
||||||
|
|
||||||
|
zoltan_->Set_Obj_Size_Fn(GetObjectSize, &elementSize);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
pFlow::partitioning::~partitioning()
|
||||||
|
{
|
||||||
|
freeZoltan();
|
||||||
|
}
|
||||||
|
|
||||||
|
void pFlow::partitioning::printBox()const
|
||||||
|
{
|
||||||
|
pOutput<< "localBox:" << localBox_<<endl;
|
||||||
|
}
|
@ -0,0 +1,168 @@
|
|||||||
|
/*------------------------------- phasicFlow ---------------------------------
|
||||||
|
O C enter of
|
||||||
|
O O E ngineering and
|
||||||
|
O O M ultiscale modeling of
|
||||||
|
OOOOOOO F luid flow
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Copyright (C): www.cemf.ir
|
||||||
|
email: hamid.r.norouzi AT gmail.com
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Licence:
|
||||||
|
This file is part of phasicFlow code. It is a free software for simulating
|
||||||
|
granular and multiphase flows. You can redistribute it and/or modify it under
|
||||||
|
the terms of GNU General Public License v3 or any other later versions.
|
||||||
|
|
||||||
|
phasicFlow is distributed to help others in their research in the field of
|
||||||
|
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
|
||||||
|
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
|
||||||
|
-----------------------------------------------------------------------------*/
|
||||||
|
|
||||||
|
#ifndef __partitioning_hpp__
|
||||||
|
#define __partitioning_hpp__
|
||||||
|
|
||||||
|
#include "zoltan_cpp.h"
|
||||||
|
|
||||||
|
#include "pFlowProcessors.hpp"
|
||||||
|
#include "virtualConstructor.hpp"
|
||||||
|
#include "box.hpp"
|
||||||
|
#include "span.hpp"
|
||||||
|
#include "pointFlag.hpp"
|
||||||
|
#include "procVector.hpp"
|
||||||
|
|
||||||
|
namespace pFlow
|
||||||
|
{
|
||||||
|
|
||||||
|
struct pointCollection
|
||||||
|
{
|
||||||
|
span<realx3> points_;
|
||||||
|
pFlagTypeHost pFlag_;
|
||||||
|
|
||||||
|
uint32 numActivePoints()const
|
||||||
|
{
|
||||||
|
return pFlag_.numActive();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
struct dataCollection
|
||||||
|
{
|
||||||
|
span<char> srcData_;
|
||||||
|
span<char> dstData_;
|
||||||
|
uint32 elementSize_;
|
||||||
|
};
|
||||||
|
|
||||||
|
class partitioning
|
||||||
|
{
|
||||||
|
protected:
|
||||||
|
|
||||||
|
float version_ = 0.0;
|
||||||
|
|
||||||
|
std::unique_ptr<Zoltan> zoltan_ = nullptr;
|
||||||
|
|
||||||
|
bool validPointers_ = false;
|
||||||
|
|
||||||
|
box globalBox_;
|
||||||
|
|
||||||
|
box localBox_;
|
||||||
|
|
||||||
|
int32 changes_, numImport_, numExport_;
|
||||||
|
|
||||||
|
id_t *importGlobalGids_, *importLocalGids_, *exportGlobalGids_, *exportLocalGids_;
|
||||||
|
|
||||||
|
int32 *importProcs_, *importToPart_, *exportProcs_, *exportToPart_;
|
||||||
|
|
||||||
|
uint32 numBeforePartition_ = 0 ;
|
||||||
|
|
||||||
|
static inline bool zoltanInitialized__ = false;
|
||||||
|
|
||||||
|
void freeZoltan();
|
||||||
|
|
||||||
|
virtual
|
||||||
|
bool partition(pointCollection& points) = 0;
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
partitioning(
|
||||||
|
const dictionary& dict,
|
||||||
|
const box& globalBox);
|
||||||
|
|
||||||
|
virtual
|
||||||
|
~partitioning();
|
||||||
|
|
||||||
|
create_vCtor(
|
||||||
|
partitioning,
|
||||||
|
dictionary,
|
||||||
|
(
|
||||||
|
const dictionary& dict,
|
||||||
|
const box& globalBox
|
||||||
|
),
|
||||||
|
(dict, globalBox));
|
||||||
|
|
||||||
|
bool partition(
|
||||||
|
span<realx3> points,
|
||||||
|
pFlagTypeHost flags);
|
||||||
|
|
||||||
|
|
||||||
|
bool migrateData(span<char> src, span<char> dst, uint32 elementSize);
|
||||||
|
|
||||||
|
inline
|
||||||
|
auto localBox()const
|
||||||
|
{
|
||||||
|
return localBox_;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline
|
||||||
|
const auto& globalBox()const
|
||||||
|
{
|
||||||
|
return globalBox_;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline
|
||||||
|
bool partitionsChanged()const
|
||||||
|
{
|
||||||
|
return changes_ == 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
uint32 numberImportThisProc()const
|
||||||
|
{
|
||||||
|
return numImport_;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32 numberExportThisProc()const
|
||||||
|
{
|
||||||
|
return numExport_;
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual
|
||||||
|
span<int32> exportList(int procNo)const = 0;
|
||||||
|
|
||||||
|
virtual
|
||||||
|
pFlow::MPI::procVector<span<int32>> allExportLists()const=0;
|
||||||
|
|
||||||
|
void printBox()const;
|
||||||
|
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#endif //__partitioning_hpp__
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
/*static
|
||||||
|
int getNumberOfPoints(void *data, int32 *ierr);
|
||||||
|
|
||||||
|
static
|
||||||
|
void getPointList(
|
||||||
|
void *data,
|
||||||
|
int32 sizeGID,
|
||||||
|
int32 sizeLID,
|
||||||
|
id_t* globalID,
|
||||||
|
id_t* localID,
|
||||||
|
int32 wgt_dim,
|
||||||
|
float *obj_wgts,
|
||||||
|
int32 *ierr);*/
|
@ -0,0 +1,330 @@
|
|||||||
|
/*------------------------------- phasicFlow ---------------------------------
|
||||||
|
O C enter of
|
||||||
|
O O E ngineering and
|
||||||
|
O O M ultiscale modeling of
|
||||||
|
OOOOOOO F luid flow
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Copyright (C): www.cemf.ir
|
||||||
|
email: hamid.r.norouzi AT gmail.com
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Licence:
|
||||||
|
This file is part of phasicFlow code. It is a free software for simulating
|
||||||
|
granular and multiphase flows. You can redistribute it and/or modify it under
|
||||||
|
the terms of GNU General Public License v3 or any other later versions.
|
||||||
|
|
||||||
|
phasicFlow is distributed to help others in their research in the field of
|
||||||
|
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
|
||||||
|
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
|
||||||
|
-----------------------------------------------------------------------------*/
|
||||||
|
|
||||||
|
#include "zoltan_cpp.h"
|
||||||
|
|
||||||
|
|
||||||
|
#include "error.hpp"
|
||||||
|
#include "processors.hpp"
|
||||||
|
#include "rcb1DPartitioning.hpp"
|
||||||
|
|
||||||
|
bool pFlow::rcb1DPartitioning::partition(pointCollection &points)
|
||||||
|
{
|
||||||
|
|
||||||
|
zoltan_->Set_Param("RCB_OUTPUT_LEVEL", "0");
|
||||||
|
zoltan_->Set_Param("RCB_RECTILINEAR_BLOCKS", "1");
|
||||||
|
zoltan_->Set_Param("KEEP_CUTS", "1");
|
||||||
|
zoltan_->Set_Param("REDUCE_DIMENSIONS", "1");
|
||||||
|
zoltan_->Set_Param("RCB_RECOMPUTE_BOX", "1");
|
||||||
|
zoltan_->Set_Param("AVERAGE_CUTS", "0");
|
||||||
|
zoltan_->Set_Param("MIGRATE_ONLY_PROC_CHANGES", "0");
|
||||||
|
|
||||||
|
zoltan_->Set_Num_Obj_Fn(rcb1DPartitioning::getNumberOfPoints, &points);
|
||||||
|
zoltan_->Set_Obj_List_Fn(rcb1DPartitioning::getPointList, &points);
|
||||||
|
zoltan_->Set_Num_Geom_Fn(rcb1DPartitioning::getNumGeometry, &points);
|
||||||
|
switch (direction_)
|
||||||
|
{
|
||||||
|
case Direction::X:
|
||||||
|
zoltan_->Set_Geom_Multi_Fn(rcb1DPartitioning::getGeometryList_x, &points);
|
||||||
|
break;
|
||||||
|
case Direction::Y:
|
||||||
|
zoltan_->Set_Geom_Multi_Fn(rcb1DPartitioning::getGeometryList_y, &points);
|
||||||
|
break;
|
||||||
|
case Direction::Z:
|
||||||
|
zoltan_->Set_Geom_Multi_Fn(rcb1DPartitioning::getGeometryList_z, &points);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
int numGidEntries_, numLidEntries_;
|
||||||
|
int rc = zoltan_->LB_Partition(changes_, numGidEntries_, numLidEntries_,
|
||||||
|
numImport_, importGlobalGids_, importLocalGids_, importProcs_, importToPart_,
|
||||||
|
numExport_, exportGlobalGids_, exportLocalGids_, exportProcs_, exportToPart_);
|
||||||
|
|
||||||
|
|
||||||
|
if (rc != ZOLTAN_OK)
|
||||||
|
{
|
||||||
|
fatalErrorInFunction<< "Zoltan faild to perform partitioning."<<endl;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
for(auto& ids:exportIds_)
|
||||||
|
{
|
||||||
|
ids.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<int32> thisProc(points.numActivePoints(),-1);
|
||||||
|
|
||||||
|
for(auto i =0; i<numExport_; i++)
|
||||||
|
{
|
||||||
|
exportIds_[exportProcs_[i]].push_back(exportGlobalGids_[i]);
|
||||||
|
thisProc[exportGlobalGids_[i]] = exportGlobalGids_[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
for(int i=0; i<thisProc.size(); i++)
|
||||||
|
{
|
||||||
|
if(thisProc[i]==-1)
|
||||||
|
exportIds_[0].push_back(i);
|
||||||
|
}
|
||||||
|
|
||||||
|
validPointers_ = true;
|
||||||
|
|
||||||
|
int nDim;
|
||||||
|
double x0;
|
||||||
|
double y0;
|
||||||
|
double z0;
|
||||||
|
double x1;
|
||||||
|
double y1;
|
||||||
|
double z1;
|
||||||
|
zoltan_->RCB_Box
|
||||||
|
(
|
||||||
|
processors::globalRank(),
|
||||||
|
nDim,
|
||||||
|
x0, y0, z0,
|
||||||
|
x1, y1, z1
|
||||||
|
);
|
||||||
|
|
||||||
|
localBox_ = globalBox_;
|
||||||
|
|
||||||
|
if(equal(x0, x1))
|
||||||
|
{
|
||||||
|
x0 = x0 - 0.00001;
|
||||||
|
x1 = x1 + 0.00001;
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (direction_)
|
||||||
|
{
|
||||||
|
case Direction::X :
|
||||||
|
localBox_.minPoint().x_ = x0;
|
||||||
|
localBox_.maxPoint().x_ = x1;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case Direction::Y :
|
||||||
|
localBox_.minPoint().y_ = x0;
|
||||||
|
localBox_.maxPoint().y_ = x1;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case Direction::Z :
|
||||||
|
localBox_.minPoint().z_ = x0;
|
||||||
|
localBox_.maxPoint().z_ = x1;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
localBox_.minPoint() = max(localBox_.minPoint(), globalBox_.minPoint());
|
||||||
|
localBox_.maxPoint() = min(localBox_.maxPoint(), globalBox_.maxPoint());
|
||||||
|
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
pFlow::rcb1DPartitioning::rcb1DPartitioning
|
||||||
|
(
|
||||||
|
const dictionary &dict,
|
||||||
|
const box &globalBox
|
||||||
|
)
|
||||||
|
:
|
||||||
|
partitioning(dict, globalBox),
|
||||||
|
exportIds_(pFlowProcessors())
|
||||||
|
{
|
||||||
|
|
||||||
|
word directionName = dict.getVal<word>("direction");
|
||||||
|
|
||||||
|
if(toUpper(directionName)== "X")
|
||||||
|
{
|
||||||
|
direction_ = Direction::X;
|
||||||
|
dirVector_ ={1.0, 0.0, 0.0};
|
||||||
|
}
|
||||||
|
else if( toUpper(directionName) == "Y")
|
||||||
|
{
|
||||||
|
direction_ = Direction::Y;
|
||||||
|
dirVector_ ={0.0, 1.0, 0.0};
|
||||||
|
}
|
||||||
|
else if( toUpper(directionName) == "Z")
|
||||||
|
{
|
||||||
|
direction_ = Direction::Z;
|
||||||
|
dirVector_ ={0.0, 0.0, 1.0};
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
fatalErrorInFunction<< "wrong direction in dictionary "<<
|
||||||
|
dict.globalName()<<". Directions should be one of x, y, or z."<<endl;
|
||||||
|
fatalError;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
int pFlow::rcb1DPartitioning::getNumGeometry(void *data, int *ierr)
|
||||||
|
{
|
||||||
|
*ierr = ZOLTAN_OK;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
int pFlow::rcb1DPartitioning::getNumberOfPoints(void *data, int *ierr)
|
||||||
|
{
|
||||||
|
auto *obj = static_cast<pointCollection *>(data);
|
||||||
|
|
||||||
|
*ierr = ZOLTAN_OK;
|
||||||
|
|
||||||
|
return obj->numActivePoints();
|
||||||
|
}
|
||||||
|
|
||||||
|
void pFlow::rcb1DPartitioning::getPointList
|
||||||
|
(
|
||||||
|
void *data,
|
||||||
|
int sizeGID,
|
||||||
|
int sizeLID,
|
||||||
|
ZOLTAN_ID_PTR globalID,
|
||||||
|
ZOLTAN_ID_PTR localID,
|
||||||
|
int wgt_dim,
|
||||||
|
float *obj_wgts,
|
||||||
|
int *ierr
|
||||||
|
)
|
||||||
|
{
|
||||||
|
auto* obj = static_cast<pointCollection *>(data);
|
||||||
|
*ierr = ZOLTAN_OK;
|
||||||
|
|
||||||
|
auto activeRange = obj->pFlag_.activeRange();
|
||||||
|
uint32 n = 0;
|
||||||
|
for (auto i=activeRange.start(); i<activeRange.end(); i++)
|
||||||
|
{
|
||||||
|
if( obj->pFlag_.isActive(i) )
|
||||||
|
{
|
||||||
|
globalID[n] = i;
|
||||||
|
localID[n] = n;
|
||||||
|
n++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
void pFlow::rcb1DPartitioning::getGeometryList_x
|
||||||
|
(
|
||||||
|
void *data,
|
||||||
|
int sizeGID,
|
||||||
|
int sizeLID,
|
||||||
|
int num_obj,
|
||||||
|
ZOLTAN_ID_PTR globalID,
|
||||||
|
ZOLTAN_ID_PTR localID,
|
||||||
|
int num_dim,
|
||||||
|
double *geom_vec,
|
||||||
|
int *ierr
|
||||||
|
)
|
||||||
|
{
|
||||||
|
|
||||||
|
auto* obj = static_cast<pointCollection *>(data);
|
||||||
|
|
||||||
|
if ( (sizeGID != 1) || (sizeLID != 1) || (num_dim != 1))
|
||||||
|
{
|
||||||
|
*ierr = ZOLTAN_FATAL;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto activeRange = obj->pFlag_.activeRange();
|
||||||
|
uint32 n = 0;
|
||||||
|
for (auto i=activeRange.start(); i<activeRange.end(); i++)
|
||||||
|
{
|
||||||
|
if( obj->pFlag_.isActive(i) )
|
||||||
|
{
|
||||||
|
geom_vec[n] = obj->points_[i].x_;
|
||||||
|
n++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
*ierr = ZOLTAN_OK;
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
void pFlow::rcb1DPartitioning::getGeometryList_y
|
||||||
|
(
|
||||||
|
void *data,
|
||||||
|
int sizeGID,
|
||||||
|
int sizeLID,
|
||||||
|
int num_obj,
|
||||||
|
ZOLTAN_ID_PTR globalID,
|
||||||
|
ZOLTAN_ID_PTR localID,
|
||||||
|
int num_dim,
|
||||||
|
double *geom_vec,
|
||||||
|
int *ierr
|
||||||
|
)
|
||||||
|
{
|
||||||
|
|
||||||
|
auto* obj = static_cast<pointCollection *>(data);
|
||||||
|
|
||||||
|
if ( (sizeGID != 1) || (sizeLID != 1) || (num_dim != 1))
|
||||||
|
{
|
||||||
|
*ierr = ZOLTAN_FATAL;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto activeRange = obj->pFlag_.activeRange();
|
||||||
|
uint32 n = 0;
|
||||||
|
for (auto i=activeRange.start(); i<activeRange.end(); i++)
|
||||||
|
{
|
||||||
|
if( obj->pFlag_.isActive(i) )
|
||||||
|
{
|
||||||
|
geom_vec[n] = obj->points_[i].y_;
|
||||||
|
n++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
*ierr = ZOLTAN_OK;
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
void pFlow::rcb1DPartitioning::getGeometryList_z
|
||||||
|
(
|
||||||
|
void *data,
|
||||||
|
int sizeGID,
|
||||||
|
int sizeLID,
|
||||||
|
int num_obj,
|
||||||
|
ZOLTAN_ID_PTR globalID,
|
||||||
|
ZOLTAN_ID_PTR localID,
|
||||||
|
int num_dim,
|
||||||
|
double *geom_vec,
|
||||||
|
int *ierr
|
||||||
|
)
|
||||||
|
{
|
||||||
|
|
||||||
|
auto* obj = static_cast<pointCollection *>(data);
|
||||||
|
|
||||||
|
if ( (sizeGID != 1) || (sizeLID != 1) || (num_dim != 1))
|
||||||
|
{
|
||||||
|
*ierr = ZOLTAN_FATAL;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto activeRange = obj->pFlag_.activeRange();
|
||||||
|
uint32 n = 0;
|
||||||
|
for (auto i=activeRange.start(); i<activeRange.end(); i++)
|
||||||
|
{
|
||||||
|
if( obj->pFlag_.isActive(i) )
|
||||||
|
{
|
||||||
|
geom_vec[n] = obj->points_[i].z_;
|
||||||
|
n++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
*ierr = ZOLTAN_OK;
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,240 @@
|
|||||||
|
/*------------------------------- phasicFlow ---------------------------------
|
||||||
|
O C enter of
|
||||||
|
O O E ngineering and
|
||||||
|
O O M ultiscale modeling of
|
||||||
|
OOOOOOO F luid flow
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Copyright (C): www.cemf.ir
|
||||||
|
email: hamid.r.norouzi AT gmail.com
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Licence:
|
||||||
|
This file is part of phasicFlow code. It is a free software for simulating
|
||||||
|
granular and multiphase flows. You can redistribute it and/or modify it under
|
||||||
|
the terms of GNU General Public License v3 or any other later versions.
|
||||||
|
|
||||||
|
phasicFlow is distributed to help others in their research in the field of
|
||||||
|
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
|
||||||
|
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
|
||||||
|
-----------------------------------------------------------------------------*/
|
||||||
|
#ifndef __rcb1DPartitioning_hpp__
|
||||||
|
#define __rcb1DPartitioning_hpp__
|
||||||
|
|
||||||
|
#include "partitioning.hpp"
|
||||||
|
#include "procVector.hpp"
|
||||||
|
|
||||||
|
namespace pFlow
|
||||||
|
{
|
||||||
|
|
||||||
|
|
||||||
|
class rcb1DPartitioning
|
||||||
|
:
|
||||||
|
public partitioning
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
|
||||||
|
enum Direction
|
||||||
|
{
|
||||||
|
X = 0,
|
||||||
|
Y = 1,
|
||||||
|
Z = 2
|
||||||
|
};
|
||||||
|
|
||||||
|
protected:
|
||||||
|
|
||||||
|
/// Direction of partitioning
|
||||||
|
Direction direction_ = Direction::X;
|
||||||
|
|
||||||
|
realx3 dirVector_ = {1.0, 0.0, 0.0};
|
||||||
|
|
||||||
|
word directionName_ = "x";
|
||||||
|
|
||||||
|
MPI::procVector<std::vector<int>> exportIds_;
|
||||||
|
|
||||||
|
bool partition(pointCollection& points) override;
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
|
||||||
|
rcb1DPartitioning(
|
||||||
|
const dictionary& dict,
|
||||||
|
const box& globalBox);
|
||||||
|
|
||||||
|
|
||||||
|
~rcb1DPartitioning() override=default;
|
||||||
|
|
||||||
|
span<int32> exportList(int procNo)const override
|
||||||
|
{
|
||||||
|
return span<int32>(
|
||||||
|
const_cast<int32*>(exportIds_[procNo].data()),
|
||||||
|
exportIds_[procNo].size());
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
pFlow::MPI::procVector<span<int32>> allExportLists()const override
|
||||||
|
{
|
||||||
|
pFlow::MPI::procVector<span<int32>> allList(pFlowProcessors());
|
||||||
|
|
||||||
|
for(int i=0; i<allList.size(); i++)
|
||||||
|
allList[i]= exportList(i);
|
||||||
|
|
||||||
|
return allList;
|
||||||
|
}
|
||||||
|
|
||||||
|
static
|
||||||
|
int getNumGeometry(void *data, int *ierr);
|
||||||
|
|
||||||
|
static
|
||||||
|
int getNumberOfPoints(void *data, int *ierr);
|
||||||
|
|
||||||
|
|
||||||
|
static
|
||||||
|
void getPointList
|
||||||
|
(
|
||||||
|
void *data,
|
||||||
|
int sizeGID,
|
||||||
|
int sizeLID,
|
||||||
|
ZOLTAN_ID_PTR globalID,
|
||||||
|
ZOLTAN_ID_PTR localID,
|
||||||
|
int wgt_dim,
|
||||||
|
float *obj_wgts,
|
||||||
|
int *ierr
|
||||||
|
);
|
||||||
|
|
||||||
|
static
|
||||||
|
void getGeometryList_x(
|
||||||
|
void *data,
|
||||||
|
int sizeGID,
|
||||||
|
int sizeLID,
|
||||||
|
int num_obj,
|
||||||
|
ZOLTAN_ID_PTR globalID,
|
||||||
|
ZOLTAN_ID_PTR localID,
|
||||||
|
int num_dim,
|
||||||
|
double *geom_vec,
|
||||||
|
int *ierr);
|
||||||
|
|
||||||
|
static
|
||||||
|
void getGeometryList_y(
|
||||||
|
void *data,
|
||||||
|
int sizeGID,
|
||||||
|
int sizeLID,
|
||||||
|
int num_obj,
|
||||||
|
ZOLTAN_ID_PTR globalID,
|
||||||
|
ZOLTAN_ID_PTR localID,
|
||||||
|
int num_dim,
|
||||||
|
double *geom_vec,
|
||||||
|
int *ierr);
|
||||||
|
|
||||||
|
static
|
||||||
|
void getGeometryList_z(
|
||||||
|
void *data,
|
||||||
|
int sizeGID,
|
||||||
|
int sizeLID,
|
||||||
|
int num_obj,
|
||||||
|
ZOLTAN_ID_PTR globalID,
|
||||||
|
ZOLTAN_ID_PTR localID,
|
||||||
|
int num_dim,
|
||||||
|
double *geom_vec,
|
||||||
|
int *ierr);
|
||||||
|
};
|
||||||
|
|
||||||
|
/*class RCB_y_partitioning
|
||||||
|
:
|
||||||
|
public partitioning
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
|
||||||
|
|
||||||
|
RCB_y_partitioning(int argc, char *argv[], pointCollection& collection, const box& gBox)
|
||||||
|
:
|
||||||
|
partitioning(argc, argv, collection, gBox)
|
||||||
|
{}
|
||||||
|
|
||||||
|
virtual
|
||||||
|
~RCB_y_partitioning()=default;
|
||||||
|
|
||||||
|
|
||||||
|
bool partition() override;
|
||||||
|
|
||||||
|
|
||||||
|
static
|
||||||
|
void getGeometryList(
|
||||||
|
void *data,
|
||||||
|
int sizeGID,
|
||||||
|
int sizeLID,
|
||||||
|
int num_obj,
|
||||||
|
ZOLTAN_ID_PTR globalID,
|
||||||
|
ZOLTAN_ID_PTR localID,
|
||||||
|
int num_dim,
|
||||||
|
double *geom_vec,
|
||||||
|
int *ierr)
|
||||||
|
{
|
||||||
|
|
||||||
|
auto* obj = static_cast<pointCollection *>(data);
|
||||||
|
|
||||||
|
if ( (sizeGID != 1) || (sizeLID != 1) || (num_dim != 1))
|
||||||
|
{
|
||||||
|
*ierr = ZOLTAN_FATAL;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
*ierr = ZOLTAN_OK;
|
||||||
|
|
||||||
|
for (int i=0; i < num_obj ; i++)
|
||||||
|
{
|
||||||
|
geom_vec[i] = obj->pointList()[i].y_;
|
||||||
|
}
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static
|
||||||
|
int getNumGeometry(void *data, int *ierr)
|
||||||
|
{
|
||||||
|
*ierr = ZOLTAN_OK;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
class RCB_x_partitioning
|
||||||
|
:
|
||||||
|
public partitioning
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
|
||||||
|
|
||||||
|
RCB_x_partitioning(int argc, char *argv[], pointCollection& collection, const box& gBox)
|
||||||
|
:
|
||||||
|
partitioning(argc, argv, collection, gBox)
|
||||||
|
{}
|
||||||
|
|
||||||
|
virtual
|
||||||
|
~RCB_x_partitioning()=default;
|
||||||
|
|
||||||
|
|
||||||
|
bool partition() override;
|
||||||
|
|
||||||
|
|
||||||
|
static
|
||||||
|
void getGeometryList(
|
||||||
|
void *data,
|
||||||
|
int sizeGID,
|
||||||
|
int sizeLID,
|
||||||
|
int num_obj,
|
||||||
|
ZOLTAN_ID_PTR globalID,
|
||||||
|
ZOLTAN_ID_PTR localID,
|
||||||
|
int num_dim,
|
||||||
|
double *geom_vec,
|
||||||
|
int *ierr);
|
||||||
|
|
||||||
|
static
|
||||||
|
int getNumGeometry(void *data, int *ierr);
|
||||||
|
|
||||||
|
|
||||||
|
};*/
|
||||||
|
|
||||||
|
} // pFlow
|
||||||
|
#endif //__rcb1DPartitioning_hpp__
|
@ -0,0 +1,110 @@
|
|||||||
|
/*------------------------------- phasicFlow ---------------------------------
|
||||||
|
O C enter of
|
||||||
|
O O E ngineering and
|
||||||
|
O O M ultiscale modeling of
|
||||||
|
OOOOOOO F luid flow
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Copyright (C): www.cemf.ir
|
||||||
|
email: hamid.r.norouzi AT gmail.com
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Licence:
|
||||||
|
This file is part of phasicFlow code. It is a free software for simulating
|
||||||
|
granular and multiphase flows. You can redistribute it and/or modify it under
|
||||||
|
the terms of GNU General Public License v3 or any other later versions.
|
||||||
|
|
||||||
|
phasicFlow is distributed to help others in their research in the field of
|
||||||
|
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
|
||||||
|
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
|
||||||
|
-----------------------------------------------------------------------------*/
|
||||||
|
|
||||||
|
template<class T, class MemorySpace>
|
||||||
|
void
|
||||||
|
pFlow::MPI::processorBoundaryField<T, MemorySpace>::checkDataRecieved() const
|
||||||
|
{
|
||||||
|
if (!dataRecieved_)
|
||||||
|
{
|
||||||
|
//uint32 nRecv = reciever_.waitComplete();
|
||||||
|
dataRecieved_ = true;
|
||||||
|
/*if (nRecv != this->neighborProcSize())
|
||||||
|
{
|
||||||
|
fatalErrorInFunction;
|
||||||
|
fatalExit;
|
||||||
|
}*/
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class T, class MemorySpace>
|
||||||
|
bool
|
||||||
|
pFlow::MPI::processorBoundaryField<T, MemorySpace>::updateBoundary(
|
||||||
|
int step,
|
||||||
|
DataDirection direction
|
||||||
|
)
|
||||||
|
{
|
||||||
|
/*if (step == 1)
|
||||||
|
{
|
||||||
|
// Isend
|
||||||
|
if (direction == DataDirection::TwoWay ||
|
||||||
|
( this->isBoundaryMaster() && direction == DataDirection::MasterToSlave) ||
|
||||||
|
(!this->isBoundaryMaster() && direction == DataDirection::SlaveToMaster))
|
||||||
|
{
|
||||||
|
sender_.sendData(pFlowProcessors(), this->thisField());
|
||||||
|
dataRecieved_ = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if (step == 2)
|
||||||
|
{
|
||||||
|
// Irecv
|
||||||
|
if (direction == DataDirection::TwoWay ||
|
||||||
|
(!this->isBoundaryMaster() && direction == DataDirection::MasterToSlave) ||
|
||||||
|
( this->isBoundaryMaster() && direction == DataDirection::SlaveToMaster))
|
||||||
|
{
|
||||||
|
reciever_.recieveData(pFlowProcessors(), this->neighborProcSize());
|
||||||
|
dataRecieved_ = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
fatalErrorInFunction << "Invalid step number " << step << endl;
|
||||||
|
return false;
|
||||||
|
}*/
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class T, class MemorySpace>
|
||||||
|
pFlow::MPI::processorBoundaryField<T, MemorySpace>::processorBoundaryField(
|
||||||
|
const boundaryBase& boundary,
|
||||||
|
const pointStructure& pStruct,
|
||||||
|
InternalFieldType& internal
|
||||||
|
)
|
||||||
|
: BoundaryFieldType(boundary, pStruct, internal),
|
||||||
|
sender_(
|
||||||
|
groupNames("sendBufferField", boundary.name()),
|
||||||
|
boundary.neighborProcessorNo(),
|
||||||
|
boundary.thisBoundaryIndex()
|
||||||
|
),
|
||||||
|
reciever_(
|
||||||
|
groupNames("neighborProcField", boundary.name()),
|
||||||
|
boundary.neighborProcessorNo(),
|
||||||
|
boundary.mirrorBoundaryIndex()
|
||||||
|
)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class T, class MemorySpace>
|
||||||
|
typename pFlow::MPI::processorBoundaryField<T, MemorySpace>::ProcVectorType&
|
||||||
|
pFlow::MPI::processorBoundaryField<T, MemorySpace>::neighborProcField()
|
||||||
|
{
|
||||||
|
checkDataRecieved();
|
||||||
|
return reciever_.buffer();
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class T, class MemorySpace>
|
||||||
|
const typename pFlow::MPI::processorBoundaryField<T, MemorySpace>::
|
||||||
|
ProcVectorType&
|
||||||
|
pFlow::MPI::processorBoundaryField<T, MemorySpace>::neighborProcField() const
|
||||||
|
{
|
||||||
|
checkDataRecieved();
|
||||||
|
return reciever_.buffer();
|
||||||
|
}
|
@ -0,0 +1,113 @@
|
|||||||
|
/*------------------------------- phasicFlow ---------------------------------
|
||||||
|
O C enter of
|
||||||
|
O O E ngineering and
|
||||||
|
O O M ultiscale modeling of
|
||||||
|
OOOOOOO F luid flow
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Copyright (C): www.cemf.ir
|
||||||
|
email: hamid.r.norouzi AT gmail.com
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Licence:
|
||||||
|
This file is part of phasicFlow code. It is a free software for simulating
|
||||||
|
granular and multiphase flows. You can redistribute it and/or modify it under
|
||||||
|
the terms of GNU General Public License v3 or any other later versions.
|
||||||
|
|
||||||
|
phasicFlow is distributed to help others in their research in the field of
|
||||||
|
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
|
||||||
|
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
|
||||||
|
-----------------------------------------------------------------------------*/
|
||||||
|
#ifndef __processorBoundaryField_hpp__
|
||||||
|
#define __processorBoundaryField_hpp__
|
||||||
|
|
||||||
|
#include "boundaryField.hpp"
|
||||||
|
#include "dataSender.hpp"
|
||||||
|
#include "dataReciever.hpp"
|
||||||
|
|
||||||
|
namespace pFlow::MPI
|
||||||
|
{
|
||||||
|
|
||||||
|
template< class T, class MemorySpace = void>
|
||||||
|
class processorBoundaryField
|
||||||
|
:
|
||||||
|
public boundaryField<T, MemorySpace>
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
|
||||||
|
using processorBoundaryFieldType = processorBoundaryField<T, MemorySpace>;
|
||||||
|
|
||||||
|
using BoundaryFieldType = boundaryField<T, MemorySpace>;
|
||||||
|
|
||||||
|
using InternalFieldType = typename BoundaryFieldType::InternalFieldType;
|
||||||
|
|
||||||
|
using memory_space = typename BoundaryFieldType::memory_space;
|
||||||
|
|
||||||
|
using execution_space = typename BoundaryFieldType::execution_space;
|
||||||
|
|
||||||
|
using FieldAccessType = typename BoundaryFieldType::FieldAccessType;
|
||||||
|
|
||||||
|
using ProcVectorType = typename BoundaryFieldType::ProcVectorType;
|
||||||
|
|
||||||
|
private:
|
||||||
|
|
||||||
|
dataSender<T, MemorySpace> sender_;
|
||||||
|
|
||||||
|
mutable dataReciever<T, MemorySpace> reciever_;
|
||||||
|
|
||||||
|
mutable bool dataRecieved_ = true;
|
||||||
|
|
||||||
|
void checkDataRecieved()const;
|
||||||
|
|
||||||
|
bool updateBoundary(int step, DataDirection direction);
|
||||||
|
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
TypeInfoTemplate211("boundaryField","processor", T, memory_space::name());
|
||||||
|
|
||||||
|
processorBoundaryField(
|
||||||
|
const boundaryBase& boundary,
|
||||||
|
const pointStructure& pStruct,
|
||||||
|
InternalFieldType& internal);
|
||||||
|
|
||||||
|
|
||||||
|
~processorBoundaryField()override = default;
|
||||||
|
|
||||||
|
add_vCtor
|
||||||
|
(
|
||||||
|
BoundaryFieldType,
|
||||||
|
processorBoundaryFieldType,
|
||||||
|
boundaryBase
|
||||||
|
);
|
||||||
|
|
||||||
|
ProcVectorType& neighborProcField() override;
|
||||||
|
|
||||||
|
|
||||||
|
const ProcVectorType& neighborProcField()const override;
|
||||||
|
|
||||||
|
bool hearChanges
|
||||||
|
(
|
||||||
|
real t,
|
||||||
|
real dt,
|
||||||
|
uint32 iter,
|
||||||
|
const message& msg,
|
||||||
|
const anyList& varList
|
||||||
|
) override
|
||||||
|
{
|
||||||
|
BoundaryFieldType::hearChanges(t,dt,iter, msg,varList);
|
||||||
|
|
||||||
|
if(msg.equivalentTo(message::BNDR_DELETE))
|
||||||
|
{
|
||||||
|
// do nothing;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#include "processorBoundaryField.cpp"
|
||||||
|
|
||||||
|
#endif //__processorBoundaryField_hpp__
|
@ -0,0 +1,24 @@
|
|||||||
|
|
||||||
|
//#include "Field.hpp"
|
||||||
|
|
||||||
|
#include "processorBoundaryField.hpp"
|
||||||
|
|
||||||
|
template class pFlow::MPI::processorBoundaryField<pFlow::uint8>;
|
||||||
|
template class pFlow::MPI::processorBoundaryField<pFlow::uint8, pFlow::HostSpace>;
|
||||||
|
|
||||||
|
template class pFlow::MPI::processorBoundaryField<pFlow::uint32>;
|
||||||
|
template class pFlow::MPI::processorBoundaryField<pFlow::uint32, pFlow::HostSpace>;
|
||||||
|
|
||||||
|
template class pFlow::MPI::processorBoundaryField<pFlow::uint64>;
|
||||||
|
template class pFlow::MPI::processorBoundaryField<pFlow::uint64, pFlow::HostSpace>;
|
||||||
|
|
||||||
|
template class pFlow::MPI::processorBoundaryField<pFlow::real>;
|
||||||
|
template class pFlow::MPI::processorBoundaryField<pFlow::real, pFlow::HostSpace>;
|
||||||
|
|
||||||
|
template class pFlow::MPI::processorBoundaryField<pFlow::realx3>;
|
||||||
|
template class pFlow::MPI::processorBoundaryField<pFlow::realx3, pFlow::HostSpace>;
|
||||||
|
|
||||||
|
template class pFlow::MPI::processorBoundaryField<pFlow::realx4>;
|
||||||
|
template class pFlow::MPI::processorBoundaryField<pFlow::realx4, pFlow::HostSpace>;
|
||||||
|
|
||||||
|
|
@ -0,0 +1,148 @@
|
|||||||
|
/*------------------------------- phasicFlow ---------------------------------
|
||||||
|
O C enter of
|
||||||
|
O O E ngineering and
|
||||||
|
O O M ultiscale modeling of
|
||||||
|
OOOOOOO F luid flow
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Copyright (C): www.cemf.ir
|
||||||
|
email: hamid.r.norouzi AT gmail.com
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Licence:
|
||||||
|
This file is part of phasicFlow code. It is a free software for simulating
|
||||||
|
granular and multiphase flows. You can redistribute it and/or modify it under
|
||||||
|
the terms of GNU General Public License v3 or any other later versions.
|
||||||
|
|
||||||
|
phasicFlow is distributed to help others in their research in the field of
|
||||||
|
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
|
||||||
|
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
|
||||||
|
-----------------------------------------------------------------------------*/
|
||||||
|
|
||||||
|
#include "boundaryProcessor.hpp"
|
||||||
|
#include "dictionary.hpp"
|
||||||
|
#include "mpiCommunication.hpp"
|
||||||
|
|
||||||
|
void
|
||||||
|
pFlow::MPI::boundaryProcessor::checkSize() const
|
||||||
|
{
|
||||||
|
if (!sizeObtained_)
|
||||||
|
{
|
||||||
|
//MPI_Wait(&sizeRequest_, StatusIgnore);
|
||||||
|
sizeObtained_ = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
pFlow::MPI::boundaryProcessor::checkDataRecieved() const
|
||||||
|
{
|
||||||
|
if (!dataRecieved_)
|
||||||
|
{
|
||||||
|
//uint32 nRecv = reciever_.waitComplete();
|
||||||
|
dataRecieved_ = true;
|
||||||
|
/*if (nRecv != neighborProcSize())
|
||||||
|
{
|
||||||
|
fatalErrorInFunction;
|
||||||
|
fatalExit;
|
||||||
|
}*/
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pFlow::MPI::boundaryProcessor::boundaryProcessor(
|
||||||
|
const dictionary& dict,
|
||||||
|
const plane& bplane,
|
||||||
|
internalPoints& internal,
|
||||||
|
boundaryList& bndrs,
|
||||||
|
uint32 thisIndex
|
||||||
|
)
|
||||||
|
: boundaryBase(dict, bplane, internal, bndrs, thisIndex),
|
||||||
|
sender_(
|
||||||
|
groupNames("sendBuffer", name()),
|
||||||
|
neighborProcessorNo(),
|
||||||
|
thisBoundaryIndex()
|
||||||
|
),
|
||||||
|
reciever_(
|
||||||
|
groupNames("neighborProcPoints", name()),
|
||||||
|
neighborProcessorNo(),
|
||||||
|
mirrorBoundaryIndex()
|
||||||
|
)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
pFlow::MPI::boundaryProcessor::beforeIteration(uint32 iterNum, real t, real dt)
|
||||||
|
{
|
||||||
|
thisNumPoints_ = size();
|
||||||
|
|
||||||
|
auto req = MPI_REQUEST_NULL;
|
||||||
|
MPI_Isend(
|
||||||
|
&thisNumPoints_,
|
||||||
|
1,
|
||||||
|
MPI_UNSIGNED,
|
||||||
|
neighborProcessorNo(),
|
||||||
|
thisBoundaryIndex(),
|
||||||
|
pFlowProcessors().localCommunicator(),
|
||||||
|
&req);
|
||||||
|
|
||||||
|
MPI_Recv(
|
||||||
|
&neighborProcNumPoints_,
|
||||||
|
1,
|
||||||
|
MPI_UNSIGNED,
|
||||||
|
neighborProcessorNo(),
|
||||||
|
mirrorBoundaryIndex(),
|
||||||
|
pFlowProcessors().localCommunicator(),
|
||||||
|
MPI_STATUS_IGNORE
|
||||||
|
);
|
||||||
|
|
||||||
|
sizeObtained_ = false;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
pFlow::uint32
|
||||||
|
pFlow::MPI::boundaryProcessor::neighborProcSize() const
|
||||||
|
{
|
||||||
|
checkSize();
|
||||||
|
return neighborProcNumPoints_;
|
||||||
|
}
|
||||||
|
|
||||||
|
pFlow::realx3Vector_D&
|
||||||
|
pFlow::MPI::boundaryProcessor::neighborProcPoints()
|
||||||
|
{
|
||||||
|
checkDataRecieved();
|
||||||
|
return reciever_.buffer();
|
||||||
|
}
|
||||||
|
|
||||||
|
const pFlow::realx3Vector_D&
|
||||||
|
pFlow::MPI::boundaryProcessor::neighborProcPoints() const
|
||||||
|
{
|
||||||
|
checkDataRecieved();
|
||||||
|
return reciever_.buffer();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
pFlow::MPI::boundaryProcessor::updataBoundary(int step)
|
||||||
|
{
|
||||||
|
if (step == 1)
|
||||||
|
{
|
||||||
|
sender_.sendData(pFlowProcessors(), thisPoints());
|
||||||
|
dataRecieved_ = false;
|
||||||
|
}
|
||||||
|
else if (step == 2)
|
||||||
|
{
|
||||||
|
reciever_.recieveData(pFlowProcessors(), neighborProcSize());
|
||||||
|
dataRecieved_ = false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
pFlow::MPI::boundaryProcessor::iterate(uint32 iterNum, real t, real dt)
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
pFlow::MPI::boundaryProcessor::afterIteration(uint32 iterNum, real t, real dt)
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
@ -0,0 +1,116 @@
|
|||||||
|
/*------------------------------- phasicFlow ---------------------------------
|
||||||
|
O C enter of
|
||||||
|
O O E ngineering and
|
||||||
|
O O M ultiscale modeling of
|
||||||
|
OOOOOOO F luid flow
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Copyright (C): www.cemf.ir
|
||||||
|
email: hamid.r.norouzi AT gmail.com
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Licence:
|
||||||
|
This file is part of phasicFlow code. It is a free software for simulating
|
||||||
|
granular and multiphase flows. You can redistribute it and/or modify it under
|
||||||
|
the terms of GNU General Public License v3 or any other later versions.
|
||||||
|
|
||||||
|
phasicFlow is distributed to help others in their research in the field of
|
||||||
|
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
|
||||||
|
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
|
||||||
|
-----------------------------------------------------------------------------*/
|
||||||
|
|
||||||
|
#ifndef __boundaryProcessor_hpp__
|
||||||
|
#define __boundaryProcessor_hpp__
|
||||||
|
|
||||||
|
|
||||||
|
#include "boundaryBase.hpp"
|
||||||
|
#include "mpiTypes.hpp"
|
||||||
|
#include "dataSender.hpp"
|
||||||
|
#include "dataReciever.hpp"
|
||||||
|
|
||||||
|
namespace pFlow::MPI
|
||||||
|
{
|
||||||
|
|
||||||
|
class boundaryProcessor
|
||||||
|
:
|
||||||
|
public boundaryBase
|
||||||
|
{
|
||||||
|
private:
|
||||||
|
|
||||||
|
uint32 neighborProcNumPoints_ = 0;
|
||||||
|
|
||||||
|
uint32 thisNumPoints_;
|
||||||
|
|
||||||
|
realx3Vector_D neighborProcPoints_;
|
||||||
|
|
||||||
|
mutable Request sizeRequest_;
|
||||||
|
|
||||||
|
mutable Request sSizeRequest_;
|
||||||
|
|
||||||
|
int req_=0;
|
||||||
|
|
||||||
|
mutable bool sizeObtained_ = true;
|
||||||
|
|
||||||
|
mutable dataSender<realx3> sender_;
|
||||||
|
|
||||||
|
mutable dataReciever<realx3> reciever_;
|
||||||
|
|
||||||
|
mutable bool dataRecieved_ = true;
|
||||||
|
|
||||||
|
void checkSize()const;
|
||||||
|
|
||||||
|
void checkDataRecieved()const;
|
||||||
|
|
||||||
|
/// @brief Update processor boundary data for this processor
|
||||||
|
/// @param step It is either 1 or 2 in the input to indicate
|
||||||
|
/// the update step
|
||||||
|
/// @return true if successful
|
||||||
|
/// @details This method is called by boundaryList two times to
|
||||||
|
/// allow processor boundaries to exchange data in two steps.
|
||||||
|
/// The first step is a buffered non-blocking send and the second
|
||||||
|
/// step is non-blocking recieve to get data.
|
||||||
|
bool updataBoundary(int step)override;
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
TypeInfo("boundary<processor>");
|
||||||
|
|
||||||
|
boundaryProcessor(
|
||||||
|
const dictionary& dict,
|
||||||
|
const plane& bplane,
|
||||||
|
internalPoints& internal,
|
||||||
|
boundaryList& bndrs,
|
||||||
|
uint32 thisIndex
|
||||||
|
);
|
||||||
|
|
||||||
|
~boundaryProcessor() override = default;
|
||||||
|
|
||||||
|
add_vCtor
|
||||||
|
(
|
||||||
|
boundaryBase,
|
||||||
|
boundaryProcessor,
|
||||||
|
dictionary
|
||||||
|
);
|
||||||
|
|
||||||
|
bool beforeIteration(uint32 iterNum, real t, real dt) override;
|
||||||
|
|
||||||
|
bool iterate(uint32 iterNum, real t, real dt) override;
|
||||||
|
|
||||||
|
bool afterIteration(uint32 iterNum, real t, real dt) override;
|
||||||
|
|
||||||
|
/// @brief Return number of points in the neighbor processor boundary.
|
||||||
|
/// This is overriden from boundaryBase.
|
||||||
|
uint32 neighborProcSize() const override;
|
||||||
|
|
||||||
|
/// @brief Return a reference to point positions in the neighbor
|
||||||
|
/// processor boundary.
|
||||||
|
realx3Vector_D& neighborProcPoints() override;
|
||||||
|
|
||||||
|
/// @brief Return a const reference to point positions in the
|
||||||
|
/// neighbor processor boundary.
|
||||||
|
const realx3Vector_D& neighborProcPoints() const override;
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace pFlow::MPI
|
||||||
|
|
||||||
|
#endif //__boundaryProcessor_hpp__
|
@ -0,0 +1,108 @@
|
|||||||
|
|
||||||
|
#ifndef __dataReciever_hpp__
|
||||||
|
#define __dataReciever_hpp__
|
||||||
|
|
||||||
|
|
||||||
|
#include "span.hpp"
|
||||||
|
#include "localProcessors.hpp"
|
||||||
|
#include "mpiCommunication.hpp"
|
||||||
|
|
||||||
|
namespace pFlow::MPI
|
||||||
|
{
|
||||||
|
|
||||||
|
template<typename T, typename MemorySpace=void>
|
||||||
|
class dataReciever
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
|
||||||
|
using BufferVectorType = VectorSingle<T, MemorySpace>;
|
||||||
|
|
||||||
|
using BufferVectorTypeHost = VectorSingle<T, HostSpace>;
|
||||||
|
|
||||||
|
using memory_space = typename BufferVectorType::memory_space;
|
||||||
|
|
||||||
|
using execution_space = typename BufferVectorType::execution_space;
|
||||||
|
|
||||||
|
private:
|
||||||
|
|
||||||
|
BufferVectorType buffer_;
|
||||||
|
|
||||||
|
std::vector<T> buffer0_;
|
||||||
|
|
||||||
|
int fromProc_;
|
||||||
|
|
||||||
|
int tag_;
|
||||||
|
|
||||||
|
Request recvRequest_;
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
dataReciever(const word& name, int from, int tag)
|
||||||
|
:
|
||||||
|
buffer_(name),
|
||||||
|
fromProc_(from),
|
||||||
|
tag_(tag)
|
||||||
|
{}
|
||||||
|
|
||||||
|
~dataReciever()=default;
|
||||||
|
|
||||||
|
void recieveData(
|
||||||
|
const localProcessors& processors,
|
||||||
|
uint32 numToRecv
|
||||||
|
)
|
||||||
|
{
|
||||||
|
|
||||||
|
buffer0_.clear();
|
||||||
|
buffer0_.resize(numToRecv);
|
||||||
|
MPI_Status status;
|
||||||
|
|
||||||
|
/*CheckMPI(recv(
|
||||||
|
buffer_.getSpan(),
|
||||||
|
fromProc_,
|
||||||
|
tag_,
|
||||||
|
processors.localCommunicator(),
|
||||||
|
&status), true);*/
|
||||||
|
MPI_Recv(
|
||||||
|
buffer0_.data(),
|
||||||
|
buffer0_.size(),
|
||||||
|
realx3Type__,
|
||||||
|
fromProc_,
|
||||||
|
tag_,
|
||||||
|
processors.localCommunicator(),
|
||||||
|
&status
|
||||||
|
);
|
||||||
|
int c;
|
||||||
|
getCount<realx3>(&status, c);
|
||||||
|
pOutput<<"Number of data recieved "<<c<<endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto& buffer()
|
||||||
|
{
|
||||||
|
return buffer_;
|
||||||
|
}
|
||||||
|
|
||||||
|
const auto& buffer()const
|
||||||
|
{
|
||||||
|
return buffer_;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32 waitComplete()
|
||||||
|
{
|
||||||
|
|
||||||
|
/*Status status;
|
||||||
|
|
||||||
|
CheckMPI(MPI_Wait(&recvRequest_, &status), true);
|
||||||
|
|
||||||
|
int count;
|
||||||
|
CheckMPI(getCount<T>(&status, count), true);
|
||||||
|
|
||||||
|
return static_cast<uint32>(count);*/
|
||||||
|
return buffer_.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#endif //__dataReciever_hpp__
|
@ -0,0 +1,120 @@
|
|||||||
|
#ifndef __dataSender_hpp__
|
||||||
|
#define __dataSender_hpp__
|
||||||
|
|
||||||
|
#include "VectorSingles.hpp"
|
||||||
|
#include "localProcessors.hpp"
|
||||||
|
#include "mpiCommunication.hpp"
|
||||||
|
|
||||||
|
namespace pFlow::MPI
|
||||||
|
{
|
||||||
|
|
||||||
|
template<typename T, typename MemorySpace=void>
|
||||||
|
class dataSender
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
|
||||||
|
using BufferVectorType = VectorSingle<T, MemorySpace>;
|
||||||
|
|
||||||
|
using BufferVectorTypeHost = VectorSingle<T, HostSpace>;
|
||||||
|
|
||||||
|
using memory_space = typename BufferVectorType::memory_space;
|
||||||
|
|
||||||
|
using execution_space = typename BufferVectorType::execution_space;
|
||||||
|
|
||||||
|
// This is device vector
|
||||||
|
|
||||||
|
|
||||||
|
private:
|
||||||
|
|
||||||
|
//BufferVectorType buffer_;
|
||||||
|
|
||||||
|
std::vector<T> buffer_;
|
||||||
|
|
||||||
|
int toProc_;
|
||||||
|
|
||||||
|
int tag_;
|
||||||
|
|
||||||
|
Request sendRequest_ = RequestNull;
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
dataSender(const word& name, int toProc, int tag)
|
||||||
|
:
|
||||||
|
toProc_(toProc),
|
||||||
|
tag_(tag)
|
||||||
|
{}
|
||||||
|
|
||||||
|
~dataSender()=default;
|
||||||
|
|
||||||
|
void sendData(
|
||||||
|
const localProcessors& processors,
|
||||||
|
const scatteredFieldAccess<T, memory_space>& scatterField
|
||||||
|
)
|
||||||
|
{
|
||||||
|
using RPolicy = Kokkos::RangePolicy<
|
||||||
|
DefaultExecutionSpace,
|
||||||
|
Kokkos::Schedule<Kokkos::Static>,
|
||||||
|
Kokkos::IndexType<pFlow::uint32>>;
|
||||||
|
|
||||||
|
uint32 n = scatterField.size();
|
||||||
|
|
||||||
|
// clear the buffer to prevent data copy if capacity increases
|
||||||
|
buffer_.clear();
|
||||||
|
buffer_.resize(n);
|
||||||
|
|
||||||
|
auto* buffView = buffer_.data();
|
||||||
|
|
||||||
|
Kokkos::parallel_for(
|
||||||
|
"dataSender::sendData",
|
||||||
|
RPolicy(0,n),
|
||||||
|
LAMBDA_HD(uint32 i)
|
||||||
|
{
|
||||||
|
buffView[i] = scatterField[i];
|
||||||
|
}
|
||||||
|
);
|
||||||
|
Kokkos::fence();
|
||||||
|
auto req = MPI_REQUEST_NULL;
|
||||||
|
|
||||||
|
MPI_Isend(
|
||||||
|
buffer_.data(),
|
||||||
|
buffer_.size(),
|
||||||
|
realx3Type__,
|
||||||
|
toProc_,
|
||||||
|
tag_,
|
||||||
|
processors.localCommunicator(),
|
||||||
|
&req);
|
||||||
|
|
||||||
|
/*CheckMPI(send(
|
||||||
|
buffer_.getSpan(),
|
||||||
|
toProc_,
|
||||||
|
tag_,
|
||||||
|
processors.localCommunicator(),
|
||||||
|
MPI_STATUS_IGNORE), true);*/
|
||||||
|
}
|
||||||
|
|
||||||
|
/*auto& buffer()
|
||||||
|
{
|
||||||
|
return buffer_;
|
||||||
|
}
|
||||||
|
|
||||||
|
const auto& buffer()const
|
||||||
|
{
|
||||||
|
return buffer_;
|
||||||
|
}*/
|
||||||
|
|
||||||
|
bool sendComplete()
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
/*int test;
|
||||||
|
MPI_Test(&sendRequest_, &test, StatusIgnore);
|
||||||
|
if(test)
|
||||||
|
return true;
|
||||||
|
else
|
||||||
|
return false;*/
|
||||||
|
}
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif //__dataSender_hpp__
|
@ -36,6 +36,25 @@ pFlow::boundaryField<T, MemorySpace>::boundaryField
|
|||||||
internal_(internal)
|
internal_(internal)
|
||||||
{}
|
{}
|
||||||
|
|
||||||
|
template<class T, class MemorySpace>
|
||||||
|
typename pFlow::boundaryField<T, MemorySpace>::ProcVectorType&
|
||||||
|
pFlow::boundaryField<T, MemorySpace>::neighborProcField()
|
||||||
|
{
|
||||||
|
static ProcVectorType dummyVector{"dummyVector"};
|
||||||
|
notImplementedFunction;
|
||||||
|
fatalExit;
|
||||||
|
return dummyVector;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class T, class MemorySpace>
|
||||||
|
const typename pFlow::boundaryField<T, MemorySpace>::ProcVectorType&
|
||||||
|
pFlow::boundaryField<T, MemorySpace>::neighborProcField() const
|
||||||
|
{
|
||||||
|
static ProcVectorType dummyVector{"dummyVector"};
|
||||||
|
notImplementedFunction;
|
||||||
|
fatalExit;
|
||||||
|
return dummyVector;
|
||||||
|
}
|
||||||
|
|
||||||
template<class T, class MemorySpace>
|
template<class T, class MemorySpace>
|
||||||
pFlow::uniquePtr<pFlow::boundaryField<T, MemorySpace>>
|
pFlow::uniquePtr<pFlow::boundaryField<T, MemorySpace>>
|
||||||
|
@ -26,6 +26,18 @@ Licence:
|
|||||||
namespace pFlow
|
namespace pFlow
|
||||||
{
|
{
|
||||||
|
|
||||||
|
|
||||||
|
// forward
|
||||||
|
template< class T, class MemorySpace>
|
||||||
|
class boundaryFieldList;
|
||||||
|
|
||||||
|
enum class DataDirection
|
||||||
|
{
|
||||||
|
MasterToSlave,
|
||||||
|
SlaveToMaster,
|
||||||
|
TwoWay
|
||||||
|
};
|
||||||
|
|
||||||
template< class T, class MemorySpace = void>
|
template< class T, class MemorySpace = void>
|
||||||
class boundaryField
|
class boundaryField
|
||||||
:
|
:
|
||||||
@ -43,12 +55,22 @@ public:
|
|||||||
|
|
||||||
using FieldAccessType = scatteredFieldAccess<T, memory_space>;
|
using FieldAccessType = scatteredFieldAccess<T, memory_space>;
|
||||||
|
|
||||||
protected:
|
using ProcVectorType = VectorSingle<T,MemorySpace>;
|
||||||
|
|
||||||
|
private:
|
||||||
|
|
||||||
|
/// friend et al.
|
||||||
|
friend class boundaryFieldList<T,MemorySpace>;
|
||||||
|
|
||||||
/// @brief a ref to the internal field
|
/// @brief a ref to the internal field
|
||||||
InternalFieldType& internal_;
|
InternalFieldType& internal_;
|
||||||
|
|
||||||
|
virtual
|
||||||
|
bool updateBoundary(int step, DataDirection direction)
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
TypeInfoTemplate211("boundaryField","none" ,T, memory_space::name());
|
TypeInfoTemplate211("boundaryField","none" ,T, memory_space::name());
|
||||||
@ -78,7 +100,6 @@ public:
|
|||||||
boundaryBase
|
boundaryBase
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
||||||
bool hearChanges
|
bool hearChanges
|
||||||
(
|
(
|
||||||
real t,
|
real t,
|
||||||
@ -116,6 +137,12 @@ public:
|
|||||||
this->mirrorBoundary().indexList().deviceViewAll(),
|
this->mirrorBoundary().indexList().deviceViewAll(),
|
||||||
internal_.deviceViewAll());
|
internal_.deviceViewAll());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
virtual
|
||||||
|
ProcVectorType& neighborProcField();
|
||||||
|
|
||||||
|
virtual
|
||||||
|
const ProcVectorType& neighborProcField()const;
|
||||||
|
|
||||||
void fill(const std::any& val)override
|
void fill(const std::any& val)override
|
||||||
{
|
{
|
||||||
|
@ -43,6 +43,8 @@ protected:
|
|||||||
|
|
||||||
const boundaryList& boundaries_;
|
const boundaryList& boundaries_;
|
||||||
|
|
||||||
|
uint32 slaveToMasterUpdateIter_ = -1;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
boundaryFieldList(const boundaryList& boundaries, InternalFieldType& internal)
|
boundaryFieldList(const boundaryList& boundaries, InternalFieldType& internal)
|
||||||
@ -60,6 +62,29 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void updateBoundaries(uint32 iter, DataDirection direction)
|
||||||
|
{
|
||||||
|
if( direction == DataDirection::SlaveToMaster
|
||||||
|
&& slaveToMasterUpdateIter_ == iter) return;
|
||||||
|
|
||||||
|
// first step
|
||||||
|
for(auto b:*this)
|
||||||
|
{
|
||||||
|
b->updateBoundary(1, direction);
|
||||||
|
}
|
||||||
|
|
||||||
|
// second step
|
||||||
|
for(auto b:*this)
|
||||||
|
{
|
||||||
|
b->updateBoundary(2, direction);
|
||||||
|
}
|
||||||
|
|
||||||
|
if(direction == DataDirection::SlaveToMaster)
|
||||||
|
{
|
||||||
|
slaveToMasterUpdateIter_ = iter;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void fill(const T& val)
|
void fill(const T& val)
|
||||||
{
|
{
|
||||||
for(auto& bf: *this)
|
for(auto& bf: *this)
|
||||||
@ -68,6 +93,12 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool slaveToMasterUpdateRequested()const
|
||||||
|
{
|
||||||
|
return slaveToMasterUpdateIter_ != -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -31,7 +31,8 @@ pFlow::generalBoundary::generalBoundary
|
|||||||
:
|
:
|
||||||
observer(&boundary, defaultMessage_),
|
observer(&boundary, defaultMessage_),
|
||||||
boundary_(boundary),
|
boundary_(boundary),
|
||||||
pStruct_(pStruct)
|
pStruct_(pStruct),
|
||||||
|
isBoundaryMaster_(boundary.isBoundaryMaster())
|
||||||
{}
|
{}
|
||||||
|
|
||||||
|
|
||||||
|
@ -45,16 +45,10 @@ private:
|
|||||||
|
|
||||||
const pointStructure& pStruct_;
|
const pointStructure& pStruct_;
|
||||||
|
|
||||||
|
const bool isBoundaryMaster_;
|
||||||
|
|
||||||
static inline
|
static inline
|
||||||
const message defaultMessage_{message::BNDR_RESET};
|
const message defaultMessage_{message::BNDR_RESET};
|
||||||
|
|
||||||
template<typename BoundaryFieldType>
|
|
||||||
inline
|
|
||||||
bool checkForType()const
|
|
||||||
{
|
|
||||||
return typeName() == BoundaryFieldType::TYPENAME();
|
|
||||||
}
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
@ -68,7 +62,6 @@ public:
|
|||||||
|
|
||||||
~generalBoundary()override = default;
|
~generalBoundary()override = default;
|
||||||
|
|
||||||
|
|
||||||
inline
|
inline
|
||||||
uint32 thisBoundaryIndex()const
|
uint32 thisBoundaryIndex()const
|
||||||
{
|
{
|
||||||
@ -93,6 +86,24 @@ public:
|
|||||||
return boundary_.capacity();
|
return boundary_.capacity();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline
|
||||||
|
auto neighborProcSize()const
|
||||||
|
{
|
||||||
|
return boundary_.neighborProcSize();
|
||||||
|
}
|
||||||
|
|
||||||
|
inline
|
||||||
|
int neighborProcessorNo()const
|
||||||
|
{
|
||||||
|
return boundary_.neighborProcessorNo();
|
||||||
|
}
|
||||||
|
|
||||||
|
inline
|
||||||
|
int thisProcessorNo()const
|
||||||
|
{
|
||||||
|
return boundary_.thisProcessorNo();
|
||||||
|
}
|
||||||
|
|
||||||
inline
|
inline
|
||||||
const auto& boundary()const
|
const auto& boundary()const
|
||||||
{
|
{
|
||||||
@ -129,22 +140,17 @@ public:
|
|||||||
return pStruct_;
|
return pStruct_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline
|
||||||
|
bool isBoundaryMaster()const
|
||||||
|
{
|
||||||
|
return isBoundaryMaster_;
|
||||||
|
}
|
||||||
|
|
||||||
const Time& time()const;
|
const Time& time()const;
|
||||||
|
|
||||||
virtual
|
virtual
|
||||||
void fill(const std::any& val)=0;
|
void fill(const std::any& val)=0;
|
||||||
|
|
||||||
/*template<typename BoundaryFieldType>
|
|
||||||
BoundaryFieldType& thisField()
|
|
||||||
{
|
|
||||||
return static_cast<BoundaryFieldType&>(*this);
|
|
||||||
}
|
|
||||||
|
|
||||||
template<typename BoundaryFieldType>
|
|
||||||
const BoundaryFieldType& thisField()const
|
|
||||||
{
|
|
||||||
return static_cast<const BoundaryFieldType&>(*this);
|
|
||||||
}*/
|
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -181,6 +181,11 @@ public:
|
|||||||
return field_.insertSetElement(indices, val);
|
return field_.insertSetElement(indices, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline const Time& time()const
|
||||||
|
{
|
||||||
|
return internalPoints_.time();
|
||||||
|
}
|
||||||
|
|
||||||
bool hearChanges
|
bool hearChanges
|
||||||
(
|
(
|
||||||
real t,
|
real t,
|
||||||
|
@ -24,7 +24,7 @@ Licence:
|
|||||||
#include "pointStructure.hpp"
|
#include "pointStructure.hpp"
|
||||||
#include "internalField.hpp"
|
#include "internalField.hpp"
|
||||||
#include "boundaryFieldList.hpp"
|
#include "boundaryFieldList.hpp"
|
||||||
|
#include "Time.hpp"
|
||||||
|
|
||||||
namespace pFlow
|
namespace pFlow
|
||||||
{
|
{
|
||||||
@ -103,6 +103,23 @@ public:
|
|||||||
return static_cast<const InternalFieldType&>(*this);
|
return static_cast<const InternalFieldType&>(*this);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void updateBoundaries(DataDirection direction)const
|
||||||
|
{
|
||||||
|
uint32 iter = this->time().currentIter();
|
||||||
|
auto& bList = const_cast<boundaryFieldListType&>(boundaryFieldList_);
|
||||||
|
bList.updateBoundaries(iter, direction);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// @brief update boundaries if it is requested previousely (slave to master).
|
||||||
|
/// It only checks for SlaveToMaster direction.
|
||||||
|
void updateBoundariesSlaveToMasterIfRequested()
|
||||||
|
{
|
||||||
|
if(boundaryFieldList_.slaveToMasterUpdateRequested())
|
||||||
|
{
|
||||||
|
updateBoundaries(DataDirection::SlaveToMaster);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const auto& boundaryFields()const
|
const auto& boundaryFields()const
|
||||||
{
|
{
|
||||||
return boundaryFieldList_;
|
return boundaryFieldList_;
|
||||||
|
@ -1,3 +1,22 @@
|
|||||||
|
/*------------------------------- phasicFlow ---------------------------------
|
||||||
|
O C enter of
|
||||||
|
O O E ngineering and
|
||||||
|
O O M ultiscale modeling of
|
||||||
|
OOOOOOO F luid flow
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Copyright (C): www.cemf.ir
|
||||||
|
email: hamid.r.norouzi AT gmail.com
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
Licence:
|
||||||
|
This file is part of phasicFlow code. It is a free software for simulating
|
||||||
|
granular and multiphase flows. You can redistribute it and/or modify it under
|
||||||
|
the terms of GNU General Public License v3 or any other later versions.
|
||||||
|
|
||||||
|
phasicFlow is distributed to help others in their research in the field of
|
||||||
|
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
|
||||||
|
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
|
||||||
|
-----------------------------------------------------------------------------*/
|
||||||
#include "localProcessors.hpp"
|
#include "localProcessors.hpp"
|
||||||
|
|
||||||
pFlow::localProcessors::localProcessors(const word &name)
|
pFlow::localProcessors::localProcessors(const word &name)
|
||||||
@ -8,8 +27,8 @@ pFlow::localProcessors::localProcessors(const word &name)
|
|||||||
name_(name)
|
name_(name)
|
||||||
{
|
{
|
||||||
#ifdef pFlow_Build_MPI
|
#ifdef pFlow_Build_MPI
|
||||||
parrentCommunicator_ = pFlow::MPI::CommWorld;
|
parrentCommunicator_ = MPI_COMM_WORLD;
|
||||||
localCommunicator_ = pFlow::MPI::CommWorld;
|
localCommunicator_ = MPI_COMM_WORLD;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -24,7 +43,7 @@ pFlow::localProcessors::localProcessors
|
|||||||
name_(name)
|
name_(name)
|
||||||
{
|
{
|
||||||
#ifdef pFlow_Build_MPI
|
#ifdef pFlow_Build_MPI
|
||||||
parrentCommunicator_ = pFlow::MPI::CommWorld;
|
parrentCommunicator_ = MPI_COMM_WORLD;
|
||||||
|
|
||||||
if(ranks.size()> this->globalSize() )
|
if(ranks.size()> this->globalSize() )
|
||||||
{
|
{
|
||||||
@ -33,17 +52,17 @@ pFlow::localProcessors::localProcessors
|
|||||||
fatalExit;
|
fatalExit;
|
||||||
}
|
}
|
||||||
|
|
||||||
pFlow::MPI::Group globalGroup;
|
MPI_Group globalGroup;
|
||||||
MPI_Comm_group(parrentCommunicator_, &globalGroup);
|
MPI_Comm_group(parrentCommunicator_, &globalGroup);
|
||||||
|
|
||||||
pFlow::MPI::Group localGroup;
|
MPI_Group localGroup;
|
||||||
MPI_Group_incl(globalGroup, ranks.size(), ranks.data(), &localGroup);
|
MPI_Group_incl(globalGroup, ranks.size(), ranks.data(), &localGroup);
|
||||||
|
|
||||||
// Create the new communicator from that group of processes.
|
// Create the new communicator from that group of processes.
|
||||||
|
|
||||||
CheckMPI( MPI_Comm_create(parrentCommunicator_, localGroup, &localCommunicator_), true);
|
CheckMPI( MPI_Comm_create(parrentCommunicator_, localGroup, &localCommunicator_), true);
|
||||||
|
|
||||||
isPartOfLocal_ = localCommunicator_ != MPI::CommNull;
|
isPartOfLocal_ = localCommunicator_ != MPI_COMM_NULL;
|
||||||
|
|
||||||
if(isPartOfLocal_)
|
if(isPartOfLocal_)
|
||||||
{
|
{
|
||||||
@ -71,7 +90,7 @@ pFlow::localProcessors::localProcessors
|
|||||||
|
|
||||||
pFlow::localProcessors::localProcessors
|
pFlow::localProcessors::localProcessors
|
||||||
(
|
(
|
||||||
pFlow::MPI::Comm parrentComm,
|
MPI_Comm parrentComm,
|
||||||
const word &name,
|
const word &name,
|
||||||
const std::vector<int> &ranks
|
const std::vector<int> &ranks
|
||||||
)
|
)
|
||||||
@ -92,17 +111,17 @@ pFlow::localProcessors::localProcessors
|
|||||||
fatalExit;
|
fatalExit;
|
||||||
}
|
}
|
||||||
|
|
||||||
pFlow::MPI::Group parGroup;
|
MPI_Group parGroup;
|
||||||
MPI_Comm_group(parrentCommunicator_, &parGroup);
|
MPI_Comm_group(parrentCommunicator_, &parGroup);
|
||||||
|
|
||||||
pFlow::MPI::Group localGroup;
|
MPI_Group localGroup;
|
||||||
MPI_Group_incl(parGroup, ranks.size(), ranks.data(), &localGroup);
|
MPI_Group_incl(parGroup, ranks.size(), ranks.data(), &localGroup);
|
||||||
|
|
||||||
// Create the new communicator from that group of processes.
|
// Create the new communicator from that group of processes.
|
||||||
|
|
||||||
CheckMPI( MPI_Comm_create(parrentCommunicator_, localGroup, &localCommunicator_), true);
|
CheckMPI( MPI_Comm_create(parrentCommunicator_, localGroup, &localCommunicator_), true);
|
||||||
|
|
||||||
isPartOfLocal_ = localCommunicator_ != MPI::CommNull;
|
isPartOfLocal_ = localCommunicator_ != MPI_COMM_NULL;
|
||||||
|
|
||||||
if(isPartOfLocal_)
|
if(isPartOfLocal_)
|
||||||
{
|
{
|
||||||
|
@ -25,7 +25,7 @@ Licence:
|
|||||||
#include "types.hpp"
|
#include "types.hpp"
|
||||||
|
|
||||||
#ifdef pFlow_Build_MPI
|
#ifdef pFlow_Build_MPI
|
||||||
#include "mpiTypes.hpp"
|
#include "mpi.h"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
@ -39,9 +39,9 @@ class localProcessors
|
|||||||
private:
|
private:
|
||||||
|
|
||||||
#ifdef pFlow_Build_MPI
|
#ifdef pFlow_Build_MPI
|
||||||
pFlow::MPI::Comm parrentCommunicator_;
|
MPI_Comm parrentCommunicator_;
|
||||||
|
|
||||||
pFlow::MPI::Comm localCommunicator_;
|
MPI_Comm localCommunicator_;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
int localSize_ = 1 ;
|
int localSize_ = 1 ;
|
||||||
@ -70,7 +70,7 @@ public:
|
|||||||
|
|
||||||
#ifdef pFlow_Build_MPI
|
#ifdef pFlow_Build_MPI
|
||||||
localProcessors(
|
localProcessors(
|
||||||
pFlow::MPI::Comm parrentComm,
|
MPI_Comm parrentComm,
|
||||||
const word& name,
|
const word& name,
|
||||||
const std::vector<int>& ranks);
|
const std::vector<int>& ranks);
|
||||||
#endif
|
#endif
|
||||||
|
@ -58,11 +58,13 @@ void pFlow::processors::initProcessors(int argc, char *argv[])
|
|||||||
if(processors::globalParallel())
|
if(processors::globalParallel())
|
||||||
{
|
{
|
||||||
pFlow::pOutput.activatePrefix();
|
pFlow::pOutput.activatePrefix();
|
||||||
pFlow::pOutput.setPrefixNum(processors::globalRank_);
|
pFlow::pOutput.setPrefixNum(processors::globalRank_);
|
||||||
|
pFlow::errReport.activatePrefix();
|
||||||
|
pFlow::errReport.setPrefixNum(processors::globalRank_);
|
||||||
}
|
}
|
||||||
|
|
||||||
pFlow::mOutput.setMasterSlave(processors::globalMaster());
|
pFlow::mOutput.setMasterSlave(processors::globalMaster());
|
||||||
pFlow::errReport.setMasterSlave(processors::globalMaster());
|
|
||||||
|
|
||||||
MPI_Type_contiguous(3, pFlow::MPI::Type<real>(), &pFlow::MPI::realx3Type__);
|
MPI_Type_contiguous(3, pFlow::MPI::Type<real>(), &pFlow::MPI::realx3Type__);
|
||||||
MPI_Type_commit(&pFlow::MPI::realx3Type__);
|
MPI_Type_commit(&pFlow::MPI::realx3Type__);
|
||||||
|
@ -41,7 +41,7 @@ public:
|
|||||||
*
|
*
|
||||||
* MasterProcessorDistribute: Read is done on master processor, but
|
* MasterProcessorDistribute: Read is done on master processor, but
|
||||||
* the data should be distributed between processors based on an externally
|
* the data should be distributed between processors based on an externally
|
||||||
* specified pattern. Write is done on master processors and the data is
|
* specified pattern. Write is done on master processor and the data is
|
||||||
* collected from all processors (collection is done based on the externally
|
* collected from all processors (collection is done based on the externally
|
||||||
* specified pattern).
|
* specified pattern).
|
||||||
*
|
*
|
||||||
@ -146,7 +146,6 @@ public:
|
|||||||
bool thisProcWriteData()const
|
bool thisProcWriteData()const
|
||||||
{
|
{
|
||||||
if(isAllProcessorsDifferent()) return true;
|
if(isAllProcessorsDifferent()) return true;
|
||||||
if(isMasterProcessorDistribute())return true;
|
|
||||||
return isMaster();
|
return isMaster();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -118,8 +118,9 @@ bool pFlow::IOobject::readObject(bool rdHdr)
|
|||||||
|
|
||||||
bool pFlow::IOobject::writeObject() const
|
bool pFlow::IOobject::writeObject() const
|
||||||
{
|
{
|
||||||
|
if(!implyWrite()) return true;
|
||||||
if(implyWrite()&& ioPattern().thisCallWrite())
|
|
||||||
|
if(ioPattern().thisCallWrite())
|
||||||
{
|
{
|
||||||
|
|
||||||
if(auto ptrOS = outStream(); ptrOS )
|
if(auto ptrOS = outStream(); ptrOS )
|
||||||
@ -160,7 +161,9 @@ bool pFlow::IOobject::readObject(iIstream& is, bool rdHdr)
|
|||||||
bool pFlow::IOobject::writeObject(iOstream& os) const
|
bool pFlow::IOobject::writeObject(iOstream& os) const
|
||||||
{
|
{
|
||||||
if(this->writeHeader() && ioPattern().thisProcWriteHeader())
|
if(this->writeHeader() && ioPattern().thisProcWriteHeader())
|
||||||
writeHeader(os, typeName());
|
{
|
||||||
|
writeHeader(os, typeName());
|
||||||
|
}
|
||||||
|
|
||||||
if(ioPattern().thisCallWrite())
|
if(ioPattern().thisCallWrite())
|
||||||
{
|
{
|
||||||
|
@ -199,7 +199,7 @@ bool pFlow::systemControl::operator++(int)
|
|||||||
|
|
||||||
if (time().timersReportTime() && timersReport())
|
if (time().timersReportTime() && timersReport())
|
||||||
{
|
{
|
||||||
timers_.write(output, true);
|
timers_.write(mOutput, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (time().finalTime())
|
else if (time().finalTime())
|
||||||
@ -212,7 +212,7 @@ bool pFlow::systemControl::operator++(int)
|
|||||||
}
|
}
|
||||||
writeToFileTimer_.end();
|
writeToFileTimer_.end();
|
||||||
|
|
||||||
timers_.write(output, true);
|
timers_.write(mOutput, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
return toContinue;
|
return toContinue;
|
||||||
|
@ -202,7 +202,7 @@ bool pFlow::dataIO<T>::writeData(iOstream& os, span<T> data)
|
|||||||
|
|
||||||
if( ioPattern_.thisProcWriteData())
|
if( ioPattern_.thisProcWriteData())
|
||||||
{
|
{
|
||||||
return writeDataAsciiBinary(os, data);
|
return writeDataAsciiBinary(os, bufferSpan_);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -215,7 +215,7 @@ inline
|
|||||||
bool pFlow::dataIO<pFlow::word>::writeData(iOstream& os, span<word> data)
|
bool pFlow::dataIO<pFlow::word>::writeData(iOstream& os, span<word> data)
|
||||||
{
|
{
|
||||||
|
|
||||||
if( ioPattern_.isParallel() )
|
if( ioPattern_.isParallel() && ioPattern_.isMasterProcessorDistribute())
|
||||||
{
|
{
|
||||||
notImplementedFunction<<
|
notImplementedFunction<<
|
||||||
"data transfer for type word is not supported in parallel mode!"<<endl;
|
"data transfer for type word is not supported in parallel mode!"<<endl;
|
||||||
@ -232,7 +232,7 @@ bool pFlow::dataIO<pFlow::word>::writeData(iOstream& os, span<word> data)
|
|||||||
|
|
||||||
if( ioPattern_.thisProcWriteData())
|
if( ioPattern_.thisProcWriteData())
|
||||||
{
|
{
|
||||||
return writeDataASCII(os, data);
|
return writeDataASCII(os, bufferSpan_);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -31,8 +31,6 @@ Licence:
|
|||||||
#include "virtualConstructor.hpp"
|
#include "virtualConstructor.hpp"
|
||||||
#include "pFlowProcessors.hpp"
|
#include "pFlowProcessors.hpp"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
namespace pFlow
|
namespace pFlow
|
||||||
{
|
{
|
||||||
|
|
||||||
|
@ -1,97 +0,0 @@
|
|||||||
#ifndef __datIOMPI_hpp__
|
|
||||||
#define __datIOMPI_hpp__
|
|
||||||
|
|
||||||
#include "dataIO.hpp"
|
|
||||||
#include "pFlowProcessors.hpp"
|
|
||||||
|
|
||||||
#ifdef pFlow_Build_MPI
|
|
||||||
#include "gatherMaster.hpp"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
namespace pFlow
|
|
||||||
{
|
|
||||||
|
|
||||||
template<typename T>
|
|
||||||
class dataIOMPI
|
|
||||||
:
|
|
||||||
public dataIO<T>
|
|
||||||
{
|
|
||||||
protected:
|
|
||||||
|
|
||||||
bool gatherData(span<T> data ) override
|
|
||||||
{
|
|
||||||
|
|
||||||
if(this->ioPattern_.isAllProcessorsDifferent())
|
|
||||||
{
|
|
||||||
this->bufferSpan_ = data;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if( this->ioPattern_.isMasterProcessorDistribute())
|
|
||||||
{
|
|
||||||
|
|
||||||
#ifdef pFlow_Build_MPI
|
|
||||||
|
|
||||||
auto gatherT = pFlow::MPI::gatherMaster<T>(pFlowProcessors());
|
|
||||||
|
|
||||||
if(!gatherT.gatherData(data))
|
|
||||||
{
|
|
||||||
fatalErrorInFunction<<"Error in gathering data to master"<<endl;
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
this->buffer_ = gatherT.moveData();
|
|
||||||
|
|
||||||
this->bufferSpan_ = makeSpan(this->buffer_);
|
|
||||||
|
|
||||||
return true;
|
|
||||||
#else
|
|
||||||
notImplementedFunction;
|
|
||||||
fatalExit;
|
|
||||||
return false;
|
|
||||||
#endif //pFlow_Build_MPI
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
if( this->ioPattern_.isMasterProcessorOnly() || this->ioPattern_.isAllProcessorSimilar() )
|
|
||||||
{
|
|
||||||
if( this->ioPattern_.isMaster() )
|
|
||||||
{
|
|
||||||
this->bufferSpan_ = data;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
this->bufferSpan_ = span<T>(nullptr, 0);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
public:
|
|
||||||
|
|
||||||
TypeInfo("dataIO<MPI>");
|
|
||||||
|
|
||||||
dataIOMPI(const IOPattern& iop)
|
|
||||||
:
|
|
||||||
dataIO<T>(iop)
|
|
||||||
{}
|
|
||||||
|
|
||||||
dataIOMPI(const dataIOMPI&) = default;
|
|
||||||
|
|
||||||
dataIOMPI(dataIOMPI&&) = default;
|
|
||||||
|
|
||||||
|
|
||||||
dataIOMPI& operator=(const dataIOMPI&) = default;
|
|
||||||
|
|
||||||
dataIOMPI& operator=(dataIOMPI&&) = default;
|
|
||||||
|
|
||||||
~dataIOMPI() = default;
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
#endif
|
|
@ -19,9 +19,15 @@ template class pFlow::dataIORegular<pFlow::int64>;
|
|||||||
template class pFlow::dataIO<pFlow::uint32>;
|
template class pFlow::dataIO<pFlow::uint32>;
|
||||||
template class pFlow::dataIORegular<pFlow::uint32>;
|
template class pFlow::dataIORegular<pFlow::uint32>;
|
||||||
|
|
||||||
|
template class pFlow::dataIO<pFlow::uint32x3>;
|
||||||
|
template class pFlow::dataIORegular<pFlow::uint32x3>;
|
||||||
|
|
||||||
template class pFlow::dataIO<pFlow::uint64>;
|
template class pFlow::dataIO<pFlow::uint64>;
|
||||||
template class pFlow::dataIORegular<pFlow::uint64>;
|
template class pFlow::dataIORegular<pFlow::uint64>;
|
||||||
|
|
||||||
|
template class pFlow::dataIO<size_t>;
|
||||||
|
template class pFlow::dataIORegular<size_t>;
|
||||||
|
|
||||||
template class pFlow::dataIO<pFlow::real>;
|
template class pFlow::dataIO<pFlow::real>;
|
||||||
template class pFlow::dataIORegular<pFlow::real>;
|
template class pFlow::dataIORegular<pFlow::real>;
|
||||||
|
|
||||||
|
@ -260,6 +260,8 @@ inline iIstream& operator>>( iIstream& is, float& val);
|
|||||||
|
|
||||||
inline iIstream& operator>>( iIstream& is, double& val);
|
inline iIstream& operator>>( iIstream& is, double& val);
|
||||||
|
|
||||||
|
inline iIstream& operator>>( iIstream& is, size_t& val);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
} // pFlow
|
} // pFlow
|
||||||
|
@ -295,3 +295,35 @@ inline pFlow::iIstream& pFlow::operator>>( iIstream& is, double& val)
|
|||||||
return is;
|
return is;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline pFlow::iIstream& pFlow::operator>>( iIstream& is, size_t& val)
|
||||||
|
{
|
||||||
|
token t(is);
|
||||||
|
if (!t.good())
|
||||||
|
{
|
||||||
|
ioErrorInFile(is.name(), is.lineNumber())
|
||||||
|
<< "Bad token - could not get double value";
|
||||||
|
fatalExit;
|
||||||
|
is.setBad();
|
||||||
|
return is;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (t.isNumber())
|
||||||
|
{
|
||||||
|
val = static_cast<size_t>(t.number());
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
ioErrorInFile(is.name(), is.lineNumber())
|
||||||
|
<< "Wrong token type - expected double value, found "
|
||||||
|
<< t;
|
||||||
|
fatalExit;
|
||||||
|
is.setBad();
|
||||||
|
return is;
|
||||||
|
}
|
||||||
|
|
||||||
|
is.check(FUNCTION_NAME);
|
||||||
|
return is;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -9,5 +9,5 @@ pFlow::processorOstream pFlow::pOutput(cout, "pFlow processorOstream");
|
|||||||
|
|
||||||
pFlow::Istream pFlow::input( std::cin, "sFlow Istream");
|
pFlow::Istream pFlow::input( std::cin, "sFlow Istream");
|
||||||
|
|
||||||
pFlow::masterOstream pFlow::errReport( std::cerr, "pFlow error report stream");
|
pFlow::processorOstream pFlow::errReport( std::cerr, "pFlow error report stream");
|
||||||
|
|
||||||
|
@ -28,12 +28,12 @@ namespace pFlow
|
|||||||
|
|
||||||
extern Istream input;
|
extern Istream input;
|
||||||
|
|
||||||
extern masterOstream errReport;
|
extern processorOstream errReport;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#define INFORMATION pFlow::mOutput<<boldChar<<magentaColor<<"> INFO: "<<defaultColor<<magentaColor
|
#define INFORMATION pFlow::pOutput<<boldChar<<magentaColor<<"> INFO: "<<defaultColor<<magentaColor
|
||||||
#define END_INFO defaultColor<<pFlow::endl
|
#define END_INFO defaultColor<<pFlow::endl
|
||||||
|
|
||||||
#define REPORT(n) pFlow::mOutput.space(2*n)
|
#define REPORT(n) pFlow::mOutput.space(2*n)
|
||||||
|
@ -191,7 +191,8 @@ pFlow::boundaryBase::boundaryBase
|
|||||||
internal_(internal),
|
internal_(internal),
|
||||||
boundaries_(bndrs),
|
boundaries_(bndrs),
|
||||||
thisBoundaryIndex_(thisIndex),
|
thisBoundaryIndex_(thisIndex),
|
||||||
mirrorProcessoNo_(dict.getVal<uint32>("mirrorProcessorNo")),
|
neighborProcessorNo_(dict.getVal<int32>("neighborProcessorNo")),
|
||||||
|
isBoundaryMaster_(thisProcessorNo()>=neighborProcessorNo()),
|
||||||
name_(dict.name()),
|
name_(dict.name()),
|
||||||
type_(dict.getVal<word>("type"))
|
type_(dict.getVal<word>("type"))
|
||||||
{
|
{
|
||||||
@ -231,13 +232,22 @@ pFlow::boundaryBase::thisPoints()const
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
typename pFlow::boundaryBase::pointFieldAccessType
|
pFlow::realx3Vector_D& pFlow::boundaryBase::neighborProcPoints()
|
||||||
pFlow::boundaryBase::neighborPoints()const
|
|
||||||
{
|
{
|
||||||
notImplementedFunction;
|
static realx3Vector_D dummyVector{"dummyVector"};
|
||||||
return pointFieldAccessType();
|
notImplementedFunction;
|
||||||
|
fatalExit;
|
||||||
|
return dummyVector;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const pFlow::realx3Vector_D&
|
||||||
|
pFlow::boundaryBase::neighborProcPoints()const
|
||||||
|
{
|
||||||
|
static realx3Vector_D dummyVector{"dummyVector"};
|
||||||
|
notImplementedFunction;
|
||||||
|
fatalExit;
|
||||||
|
return dummyVector;
|
||||||
|
}
|
||||||
|
|
||||||
pFlow::realx3 pFlow::boundaryBase::displacementVectroToMirror() const
|
pFlow::realx3 pFlow::boundaryBase::displacementVectroToMirror() const
|
||||||
{
|
{
|
||||||
|
@ -51,42 +51,48 @@ public:
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
||||||
// friend et al.
|
// friend et al.
|
||||||
friend boundaryList;
|
friend boundaryList;
|
||||||
|
|
||||||
const plane& boundaryPlane_;
|
const plane& boundaryPlane_;
|
||||||
|
|
||||||
/// list of particles indices on device
|
/// list of particles indices on device
|
||||||
uint32Vector_D indexList_;
|
uint32Vector_D indexList_;
|
||||||
|
|
||||||
/// list of particles indieces on host
|
/// list of particles indieces on host
|
||||||
mutable uint32Vector_H indexListHost_;
|
mutable uint32Vector_H indexListHost_;
|
||||||
|
|
||||||
/// device and host list are sync
|
/// device and host list are sync
|
||||||
mutable bool listsSync_ = false;
|
mutable bool listsSync_ = false;
|
||||||
|
|
||||||
/// The length defined for creating neighbor list
|
/// The length defined for creating neighbor list
|
||||||
real neighborLength_;
|
real neighborLength_;
|
||||||
|
|
||||||
/// a reference to internal points
|
/// a reference to internal points
|
||||||
internalPoints& internal_;
|
internalPoints& internal_;
|
||||||
|
|
||||||
/// a reference to the list of boundaries
|
/// a reference to the list of boundaries
|
||||||
/// (never use this in the constructor).
|
/// (never use this in the constructor).
|
||||||
boundaryList& boundaries_;
|
boundaryList& boundaries_;
|
||||||
|
|
||||||
uint32 thisBoundaryIndex_;
|
uint32 thisBoundaryIndex_;
|
||||||
|
|
||||||
uint32 mirrorProcessoNo_;
|
int neighborProcessorNo_;
|
||||||
|
|
||||||
word name_;
|
bool isBoundaryMaster_;
|
||||||
|
|
||||||
word type_;
|
word name_;
|
||||||
|
|
||||||
|
word type_;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
/// @brief set the size of indexList
|
/// @brief Set the size of indexList.
|
||||||
void setSize(uint32 newSize);
|
/// It is virtual to let derived classed to be aware of
|
||||||
|
/// the fact that the size of boundary points has been changed.
|
||||||
|
/// So, any drived class that override this method should call
|
||||||
|
/// boundaryBase::setSize(newSize) too.
|
||||||
|
virtual void setSize(uint32 newSize);
|
||||||
|
|
||||||
void setNewIndices(const uint32Vector_D& newIndices);
|
void setNewIndices(const uint32Vector_D& newIndices);
|
||||||
|
|
||||||
@ -116,6 +122,17 @@ protected:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Update this boundary data in two steps (1 and 2).
|
||||||
|
/// This is called after calling beforeIteration for
|
||||||
|
/// all boundaries, so any particle addition, deletion,
|
||||||
|
/// and transfer has been finished up to this point.
|
||||||
|
/// This two-step update help to have a flexible mechanism
|
||||||
|
/// for data transfer, mostly for MPI related jobs.
|
||||||
|
virtual
|
||||||
|
bool updataBoundary(int step)
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
@ -213,6 +230,24 @@ public:
|
|||||||
return indexList_.capacity();
|
return indexList_.capacity();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline
|
||||||
|
int neighborProcessorNo()const
|
||||||
|
{
|
||||||
|
return neighborProcessorNo_;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline
|
||||||
|
int thisProcessorNo()const
|
||||||
|
{
|
||||||
|
return pFlowProcessors().localRank();
|
||||||
|
}
|
||||||
|
|
||||||
|
inline
|
||||||
|
bool isBoundaryMaster()const
|
||||||
|
{
|
||||||
|
return isBoundaryMaster_;
|
||||||
|
}
|
||||||
|
|
||||||
inline
|
inline
|
||||||
uint32 thisBoundaryIndex()const
|
uint32 thisBoundaryIndex()const
|
||||||
{
|
{
|
||||||
@ -283,8 +318,22 @@ public:
|
|||||||
|
|
||||||
pointFieldAccessType thisPoints()const;
|
pointFieldAccessType thisPoints()const;
|
||||||
|
|
||||||
|
/// @brief Return number of points in the neighbor processor boundary
|
||||||
|
virtual
|
||||||
|
uint32 neighborProcSize()const
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// @brief Return a reference to point positions in the neighbor
|
||||||
|
/// processor boundary.
|
||||||
virtual
|
virtual
|
||||||
pointFieldAccessType neighborPoints()const;
|
realx3Vector_D& neighborProcPoints();
|
||||||
|
|
||||||
|
/// @brief Return a const reference to point positions in the
|
||||||
|
/// neighbor processor boundary.
|
||||||
|
virtual
|
||||||
|
const realx3Vector_D& neighborProcPoints()const;
|
||||||
|
|
||||||
/// - static create
|
/// - static create
|
||||||
static
|
static
|
||||||
|
@ -2,17 +2,17 @@
|
|||||||
O C enter of
|
O C enter of
|
||||||
O O E ngineering and
|
O O E ngineering and
|
||||||
O O M ultiscale modeling of
|
O O M ultiscale modeling of
|
||||||
OOOOOOO F luid flow
|
OOOOOOO F luid flow
|
||||||
------------------------------------------------------------------------------
|
------------------------------------------------------------------------------
|
||||||
Copyright (C): www.cemf.ir
|
Copyright (C): www.cemf.ir
|
||||||
email: hamid.r.norouzi AT gmail.com
|
email: hamid.r.norouzi AT gmail.com
|
||||||
------------------------------------------------------------------------------
|
------------------------------------------------------------------------------
|
||||||
Licence:
|
Licence:
|
||||||
This file is part of phasicFlow code. It is a free software for simulating
|
This file is part of phasicFlow code. It is a free software for simulating
|
||||||
granular and multiphase flows. You can redistribute it and/or modify it under
|
granular and multiphase flows. You can redistribute it and/or modify it under
|
||||||
the terms of GNU General Public License v3 or any other later versions.
|
the terms of GNU General Public License v3 or any other later versions.
|
||||||
|
|
||||||
phasicFlow is distributed to help others in their research in the field of
|
phasicFlow is distributed to help others in their research in the field of
|
||||||
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
|
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
|
||||||
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
|
||||||
@ -21,49 +21,44 @@ Licence:
|
|||||||
#include "boundaryList.hpp"
|
#include "boundaryList.hpp"
|
||||||
#include "pointStructure.hpp"
|
#include "pointStructure.hpp"
|
||||||
|
|
||||||
void pFlow::boundaryList::setExtendedDomain()
|
void
|
||||||
|
pFlow::boundaryList::setExtendedDomain()
|
||||||
{
|
{
|
||||||
if(!listSet_)
|
if (!listSet_)
|
||||||
{
|
{
|
||||||
fatalErrorInFunction<<
|
fatalErrorInFunction
|
||||||
"boundary list is not set yet and you used the objects."<<endl;
|
<< "boundary list is not set yet and you used the objects." << endl;
|
||||||
fatalExit;
|
fatalExit;
|
||||||
}
|
}
|
||||||
|
|
||||||
realx3 lowerExt =
|
realx3 lowerExt = boundary(0).boundaryExtensionLength() +
|
||||||
boundary(0).boundaryExtensionLength() +
|
boundary(2).boundaryExtensionLength() +
|
||||||
boundary(2).boundaryExtensionLength() +
|
boundary(4).boundaryExtensionLength();
|
||||||
boundary(4).boundaryExtensionLength();
|
|
||||||
|
realx3 upperExt = boundary(1).boundaryExtensionLength() +
|
||||||
realx3 upperExt =
|
boundary(3).boundaryExtensionLength() +
|
||||||
boundary(1).boundaryExtensionLength()+
|
boundary(5).boundaryExtensionLength();
|
||||||
boundary(3).boundaryExtensionLength()+
|
|
||||||
boundary(5).boundaryExtensionLength();
|
extendedDomain_ = pStruct_.simDomain().extendThisDomain(lowerExt, upperExt);
|
||||||
|
|
||||||
extendedDomain_ = pStruct_.simDomain().extendThisDomain
|
|
||||||
(
|
|
||||||
lowerExt,
|
|
||||||
upperExt
|
|
||||||
);
|
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool pFlow::boundaryList::resetLists()
|
bool
|
||||||
|
pFlow::boundaryList::resetLists()
|
||||||
{
|
{
|
||||||
clear();
|
clear();
|
||||||
listSet_ = false;
|
listSet_ = false;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool pFlow::boundaryList::updateLists()
|
bool
|
||||||
|
pFlow::boundaryList::updateNeighborLists()
|
||||||
{
|
{
|
||||||
if(!listSet_)
|
if (!listSet_)
|
||||||
{
|
{
|
||||||
setLists();
|
createBoundaries();
|
||||||
}
|
}
|
||||||
|
|
||||||
std::array<real,6> dist;
|
std::array<real, 6> dist;
|
||||||
dist[0] = boundary(0).neighborLength();
|
dist[0] = boundary(0).neighborLength();
|
||||||
dist[1] = boundary(1).neighborLength();
|
dist[1] = boundary(1).neighborLength();
|
||||||
dist[2] = boundary(2).neighborLength();
|
dist[2] = boundary(2).neighborLength();
|
||||||
@ -71,71 +66,66 @@ bool pFlow::boundaryList::updateLists()
|
|||||||
dist[4] = boundary(4).neighborLength();
|
dist[4] = boundary(4).neighborLength();
|
||||||
dist[5] = boundary(5).neighborLength();
|
dist[5] = boundary(5).neighborLength();
|
||||||
|
|
||||||
pStruct_.updateFlag(
|
pStruct_.updateFlag(extendedDomain_, dist);
|
||||||
extendedDomain_,
|
|
||||||
dist);
|
|
||||||
|
|
||||||
const auto& maskD = pStruct_.activePointsMaskDevice();
|
const auto& maskD = pStruct_.activePointsMaskDevice();
|
||||||
boundary(0).setSize( maskD.leftSize() );
|
boundary(0).setSize(maskD.leftSize());
|
||||||
boundary(1).setSize( maskD.rightSize() );
|
boundary(1).setSize(maskD.rightSize());
|
||||||
boundary(2).setSize( maskD.bottomSize() );
|
boundary(2).setSize(maskD.bottomSize());
|
||||||
boundary(3).setSize( maskD.topSize() );
|
boundary(3).setSize(maskD.topSize());
|
||||||
boundary(4).setSize( maskD.rearSize() );
|
boundary(4).setSize(maskD.rearSize());
|
||||||
boundary(5).setSize( maskD.frontSize() );
|
boundary(5).setSize(maskD.frontSize());
|
||||||
|
|
||||||
pStruct_.fillNeighborsLists(
|
pStruct_.fillNeighborsLists(
|
||||||
boundary(0).indexList().deviceViewAll(),
|
boundary(0).indexList().deviceViewAll(),
|
||||||
boundary(1).indexList().deviceViewAll(),
|
boundary(1).indexList().deviceViewAll(),
|
||||||
boundary(2).indexList().deviceViewAll(),
|
boundary(2).indexList().deviceViewAll(),
|
||||||
boundary(3).indexList().deviceViewAll(),
|
boundary(3).indexList().deviceViewAll(),
|
||||||
boundary(4).indexList().deviceViewAll(),
|
boundary(4).indexList().deviceViewAll(),
|
||||||
boundary(5).indexList().deviceViewAll());
|
boundary(5).indexList().deviceViewAll()
|
||||||
|
);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
pFlow::boundaryList::boundaryList
|
pFlow::boundaryList::boundaryList(pointStructure& pStruct)
|
||||||
(
|
: ListPtr<boundaryBase>(pStruct.simDomain().sizeOfBoundaries()),
|
||||||
pointStructure& pStruct
|
pStruct_(pStruct),
|
||||||
)
|
timeControl_(
|
||||||
:
|
pStruct.simDomain().subDict("boundaries"),
|
||||||
ListPtr<boundaryBase>(pStruct.simDomain().sizeOfBoundaries()),
|
"update",
|
||||||
pStruct_(pStruct),
|
pStruct.currentTime()
|
||||||
timeControl_
|
)
|
||||||
(
|
|
||||||
pStruct.simDomain().subDict("boundaries"),
|
|
||||||
"update",
|
|
||||||
pStruct.currentTime()
|
|
||||||
)
|
|
||||||
{}
|
|
||||||
|
|
||||||
bool pFlow::boundaryList::updateLists(uint32 iter, real t, real dt)
|
|
||||||
{
|
{
|
||||||
if( timeControl_.timeEvent(iter, t, dt))
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
pFlow::boundaryList::updateNeighborLists(uint32 iter, real t, real dt)
|
||||||
|
{
|
||||||
|
if (timeControl_.timeEvent(iter, t, dt))
|
||||||
{
|
{
|
||||||
return updateLists();
|
return updateNeighborLists();
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool pFlow::boundaryList::setLists()
|
bool
|
||||||
|
pFlow::boundaryList::createBoundaries()
|
||||||
{
|
{
|
||||||
if(listSet_)return true;
|
if (listSet_)
|
||||||
|
return true;
|
||||||
|
|
||||||
for(auto i=0; i<pStruct_.simDomain().sizeOfBoundaries();i++)
|
for (auto i = 0; i < pStruct_.simDomain().sizeOfBoundaries(); i++)
|
||||||
{
|
{
|
||||||
this->set
|
this->set(
|
||||||
(
|
i,
|
||||||
i,
|
boundaryBase::create(
|
||||||
boundaryBase::create
|
pStruct_.simDomain().boundaryDict(i),
|
||||||
(
|
pStruct_.simDomain().boundaryPlane(i),
|
||||||
pStruct_.simDomain().boundaryDict(i),
|
pStruct_,
|
||||||
pStruct_.simDomain().boundaryPlane(i),
|
*this,
|
||||||
pStruct_,
|
i
|
||||||
*this,
|
)
|
||||||
i
|
|
||||||
)
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
listSet_ = true;
|
listSet_ = true;
|
||||||
@ -146,81 +136,81 @@ bool pFlow::boundaryList::setLists()
|
|||||||
pFlow::box
|
pFlow::box
|
||||||
pFlow::boundaryList::internalDomainBox() const
|
pFlow::boundaryList::internalDomainBox() const
|
||||||
{
|
{
|
||||||
const auto& thisBox = pStruct_.thisDomain().domainBox();
|
const auto& thisBox = pStruct_.thisDomain().domainBox();
|
||||||
|
|
||||||
const realx3 lowerPointDisplacement = {
|
const realx3 lowerPointDisplacement = {
|
||||||
boundary(0).neighborLengthIntoInternal(),
|
boundary(0).neighborLengthIntoInternal(),
|
||||||
boundary(2).neighborLengthIntoInternal(),
|
boundary(2).neighborLengthIntoInternal(),
|
||||||
boundary(4).neighborLengthIntoInternal()};
|
boundary(4).neighborLengthIntoInternal()
|
||||||
|
};
|
||||||
|
|
||||||
const realx3 upperPointDisplacement = {
|
const realx3 upperPointDisplacement = {
|
||||||
boundary(1).neighborLengthIntoInternal(),
|
boundary(1).neighborLengthIntoInternal(),
|
||||||
boundary(3).neighborLengthIntoInternal(),
|
boundary(3).neighborLengthIntoInternal(),
|
||||||
boundary(5).neighborLengthIntoInternal()};
|
boundary(5).neighborLengthIntoInternal()
|
||||||
|
};
|
||||||
return {thisBox.minPoint() + lowerPointDisplacement,
|
|
||||||
thisBox.maxPoint() - upperPointDisplacement};
|
return { thisBox.minPoint() + lowerPointDisplacement,
|
||||||
|
thisBox.maxPoint() - upperPointDisplacement };
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
pFlow::boundaryList::beforeIteration(uint32 iter, real t, real dt)
|
pFlow::boundaryList::beforeIteration(uint32 iter, real t, real dt)
|
||||||
{
|
{
|
||||||
// it is time to update lists
|
// it is time to update lists
|
||||||
if(timeControl_.timeEvent(iter, t, dt))
|
if (timeControl_.timeEvent(iter, t, dt) && !updateNeighborLists())
|
||||||
{
|
{
|
||||||
if(!updateLists())
|
fatalErrorInFunction;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (auto bdry : *this)
|
||||||
|
{
|
||||||
|
if (!bdry->beforeIteration(iter, t, dt))
|
||||||
{
|
{
|
||||||
fatalErrorInFunction;
|
fatalErrorInFunction << "Error in beforeIteration in boundary "
|
||||||
|
<< bdry->name() << endl;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for(auto& bdry:*this)
|
for (auto bdry : *this)
|
||||||
{
|
{
|
||||||
if( !bdry->beforeIteration(iter, t, dt))
|
bdry->updataBoundary(1);
|
||||||
{
|
|
||||||
fatalErrorInFunction<<
|
|
||||||
"Error in beforeIteration in boundary "<<bdry->name()<<endl;
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
for (auto bdry : *this)
|
||||||
|
{
|
||||||
|
bdry->updataBoundary(2);
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool pFlow::boundaryList::iterate
|
bool
|
||||||
(
|
pFlow::boundaryList::iterate(uint32 iter, real t, real dt)
|
||||||
uint32 iter,
|
|
||||||
real t,
|
|
||||||
real dt
|
|
||||||
)
|
|
||||||
{
|
{
|
||||||
for(auto& bdry:*this)
|
for (auto& bdry : *this)
|
||||||
{
|
{
|
||||||
if( !bdry->iterate(iter, t, dt))
|
if (!bdry->iterate(iter, t, dt))
|
||||||
{
|
{
|
||||||
fatalErrorInFunction<<
|
fatalErrorInFunction << "Error in iterate in boundary "
|
||||||
"Error in iterate in boundary "<<bdry->name()<<endl;
|
<< bdry->name() << endl;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool pFlow::boundaryList::afterIteration
|
bool
|
||||||
(
|
pFlow::boundaryList::afterIteration(uint32 iter, real t, real dt)
|
||||||
uint32 iter,
|
{
|
||||||
real t,
|
for (auto& bdry : *this)
|
||||||
real dt
|
{
|
||||||
)
|
if (!bdry->afterIteration(iter, t, dt))
|
||||||
{
|
{
|
||||||
for(auto& bdry:*this)
|
fatalErrorInFunction << "Error in afterIteration in boundary "
|
||||||
{
|
<< bdry->name() << endl;
|
||||||
if( !bdry->afterIteration(iter, t, dt))
|
|
||||||
{
|
|
||||||
fatalErrorInFunction<<
|
|
||||||
"Error in afterIteration in boundary "<<bdry->name()<<endl;
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -40,24 +40,23 @@ class boundaryList
|
|||||||
private:
|
private:
|
||||||
|
|
||||||
//// - data members
|
//// - data members
|
||||||
pointStructure& pStruct_;
|
pointStructure& pStruct_;
|
||||||
|
|
||||||
baseTimeControl timeControl_;
|
baseTimeControl timeControl_;
|
||||||
|
|
||||||
domain extendedDomain_;
|
domain extendedDomain_;
|
||||||
|
|
||||||
box internalDomainBox_;
|
box internalDomainBox_;
|
||||||
|
|
||||||
bool listSet_ = false;
|
bool listSet_ = false;
|
||||||
|
|
||||||
void setExtendedDomain();
|
void setExtendedDomain();
|
||||||
|
|
||||||
bool resetLists();
|
bool resetLists();
|
||||||
|
|
||||||
/// @brief update neighbor list of boundaries regardless
|
|
||||||
/// of the time intervals
|
|
||||||
bool updateLists();
|
|
||||||
|
|
||||||
|
/// @brief update neighbor list of boundaries regardless
|
||||||
|
/// of the time intervals
|
||||||
|
bool updateNeighborLists();
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
@ -66,16 +65,16 @@ public:
|
|||||||
|
|
||||||
|
|
||||||
//// - Constructors
|
//// - Constructors
|
||||||
boundaryList(pointStructure& pStruct);
|
explicit boundaryList(pointStructure& pStruct);
|
||||||
|
|
||||||
|
|
||||||
~boundaryList() = default;
|
virtual ~boundaryList() = default;
|
||||||
|
|
||||||
/// @brief update neighbor list of boundaries based on
|
/// @brief update neighbor list of boundaries based on
|
||||||
/// the time intervals
|
/// the time intervals
|
||||||
bool updateLists(uint32 iter, real t, real dt);
|
bool updateNeighborLists(uint32 iter, real t, real dt);
|
||||||
|
|
||||||
bool setLists();
|
bool createBoundaries();
|
||||||
|
|
||||||
inline
|
inline
|
||||||
const pointStructure& pStruct()const
|
const pointStructure& pStruct()const
|
||||||
@ -115,8 +114,6 @@ public:
|
|||||||
|
|
||||||
box internalDomainBox()const;
|
box internalDomainBox()const;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
bool beforeIteration(uint32 iter, real t, real dt);
|
bool beforeIteration(uint32 iter, real t, real dt);
|
||||||
|
|
||||||
bool iterate(uint32 iter, real t, real dt);
|
bool iterate(uint32 iter, real t, real dt);
|
||||||
|
@ -51,9 +51,9 @@ bool pFlow::regularSimulationDomain::createBoundaryDicts()
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if(!bDict.add("mirrorProcessorNo",(uint32) processors::globalRank()))
|
if(!bDict.add("neighborProcessorNo",(uint32) processors::globalRank()))
|
||||||
{
|
{
|
||||||
fatalErrorInFunction<<"error in adding mirrorProcessorNo to "<< bName <<
|
fatalErrorInFunction<<"error in adding neighborProcessorNo to "<< bName <<
|
||||||
" in dictionary "<< boundaries.globalName()<<endl;
|
" in dictionary "<< boundaries.globalName()<<endl;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -84,7 +84,7 @@ bool pFlow::regularSimulationDomain::createBoundaryDicts()
|
|||||||
|
|
||||||
bool pFlow::regularSimulationDomain::setThisDomain()
|
bool pFlow::regularSimulationDomain::setThisDomain()
|
||||||
{
|
{
|
||||||
thisDomain_ = domain(globalBox_);
|
thisDomain_ = domain(globalBox());
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -126,11 +126,23 @@ pFlow::uint32 pFlow::regularSimulationDomain::numberToBeExported() const
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool pFlow::regularSimulationDomain::initialTransferBlockData
|
bool
|
||||||
(
|
pFlow::regularSimulationDomain::domainActive() const
|
||||||
span<char> src,
|
{
|
||||||
span<char> dst,
|
return true;
|
||||||
size_t sizeOfElement
|
}
|
||||||
|
|
||||||
|
const pFlow::domain&
|
||||||
|
pFlow::regularSimulationDomain::thisDomain() const
|
||||||
|
{
|
||||||
|
return thisDomain_;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
pFlow::regularSimulationDomain::initialTransferBlockData(
|
||||||
|
span<char> src,
|
||||||
|
span<char> dst,
|
||||||
|
size_t sizeOfElement
|
||||||
) const
|
) const
|
||||||
{
|
{
|
||||||
size_t requiredSize = sizeOfElement*initialNumberInThis();
|
size_t requiredSize = sizeOfElement*initialNumberInThis();
|
||||||
@ -209,7 +221,3 @@ const pFlow::dictionary &pFlow::regularSimulationDomain::thisBoundaryDict() cons
|
|||||||
return this->subDict("regularBoundaries");
|
return this->subDict("regularBoundaries");
|
||||||
}
|
}
|
||||||
|
|
||||||
bool pFlow::regularSimulationDomain::requiresDataTransfer()const
|
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
@ -2,17 +2,17 @@
|
|||||||
O C enter of
|
O C enter of
|
||||||
O O E ngineering and
|
O O E ngineering and
|
||||||
O O M ultiscale modeling of
|
O O M ultiscale modeling of
|
||||||
OOOOOOO F luid flow
|
OOOOOOO F luid flow
|
||||||
------------------------------------------------------------------------------
|
------------------------------------------------------------------------------
|
||||||
Copyright (C): www.cemf.ir
|
Copyright (C): www.cemf.ir
|
||||||
email: hamid.r.norouzi AT gmail.com
|
email: hamid.r.norouzi AT gmail.com
|
||||||
------------------------------------------------------------------------------
|
------------------------------------------------------------------------------
|
||||||
Licence:
|
Licence:
|
||||||
This file is part of phasicFlow code. It is a free software for simulating
|
This file is part of phasicFlow code. It is a free software for simulating
|
||||||
granular and multiphase flows. You can redistribute it and/or modify it under
|
granular and multiphase flows. You can redistribute it and/or modify it under
|
||||||
the terms of GNU General Public License v3 or any other later versions.
|
the terms of GNU General Public License v3 or any other later versions.
|
||||||
|
|
||||||
phasicFlow is distributed to help others in their research in the field of
|
phasicFlow is distributed to help others in their research in the field of
|
||||||
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
|
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
|
||||||
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
|
||||||
@ -26,81 +26,70 @@ Licence:
|
|||||||
namespace pFlow
|
namespace pFlow
|
||||||
{
|
{
|
||||||
|
|
||||||
class regularSimulationDomain
|
class regularSimulationDomain
|
||||||
:
|
:
|
||||||
public simulationDomain
|
public simulationDomain
|
||||||
{
|
{
|
||||||
protected:
|
private:
|
||||||
|
|
||||||
uint32 initialNumPoints_=0;
|
domain thisDomain_;
|
||||||
|
|
||||||
bool createBoundaryDicts() override;
|
uint32 initialNumPoints_ = 0;
|
||||||
|
|
||||||
bool setThisDomain()override;
|
bool createBoundaryDicts() final;
|
||||||
|
|
||||||
|
bool setThisDomain() final;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
TypeInfo("simulationDomain<regular>");
|
TypeInfo("simulationDomain<regular>");
|
||||||
|
|
||||||
regularSimulationDomain(systemControl& control);
|
explicit regularSimulationDomain(systemControl& control);
|
||||||
|
|
||||||
virtual
|
~regularSimulationDomain() final = default;
|
||||||
~regularSimulationDomain()=default;
|
|
||||||
|
|
||||||
add_vCtor
|
|
||||||
(
|
|
||||||
simulationDomain,
|
|
||||||
regularSimulationDomain,
|
|
||||||
systemControl
|
|
||||||
);
|
|
||||||
|
|
||||||
bool initialUpdateDomains(span<realx3> pointPos) override;
|
add_vCtor(simulationDomain, regularSimulationDomain, systemControl);
|
||||||
|
|
||||||
uint32 initialNumberInThis()const override;
|
bool initialUpdateDomains(span<realx3> pointPos) final;
|
||||||
|
|
||||||
bool initialThisDomainActive()const override
|
uint32 initialNumberInThis() const final;
|
||||||
{
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*bool updateDomains(
|
uint32 numberToBeImported() const final;
|
||||||
span<realx3> pointPos,
|
|
||||||
pFlagTypeHost flags) override;*/
|
|
||||||
|
|
||||||
uint32 numberToBeImported()const override;
|
|
||||||
|
|
||||||
uint32 numberToBeExported()const override;
|
uint32 numberToBeExported() const final;
|
||||||
|
|
||||||
|
/// @brief Is this domain active?
|
||||||
|
/// Active mean, there is particle in it and
|
||||||
|
/// boundaries and other entities of simulation domains are valid
|
||||||
|
bool domainActive() const final;
|
||||||
|
|
||||||
|
/// @brief return the simulation domain of this processor
|
||||||
|
const domain& thisDomain() const final;
|
||||||
|
|
||||||
bool initialTransferBlockData(
|
bool initialTransferBlockData(
|
||||||
span<char> src,
|
span<char> src,
|
||||||
span<char> dst,
|
span<char> dst,
|
||||||
size_t sizeOfElement) const override;
|
size_t sizeOfElement
|
||||||
|
) const final;
|
||||||
|
|
||||||
|
/// @brief
|
||||||
|
/// @param src
|
||||||
|
/// @param dst
|
||||||
|
/// @return
|
||||||
|
bool initialTransferBlockData(span<realx3> src, span<realx3> dst)
|
||||||
|
const final;
|
||||||
|
|
||||||
/// @brief
|
bool initialTransferBlockData(span<real> src, span<real> dst)
|
||||||
/// @param src
|
const final;
|
||||||
/// @param dst
|
|
||||||
/// @return
|
|
||||||
bool initialTransferBlockData(
|
|
||||||
span<realx3> src,
|
|
||||||
span<realx3> dst) const override;
|
|
||||||
|
|
||||||
bool initialTransferBlockData(
|
bool initialTransferBlockData(span<uint32> src, span<uint32> dst)
|
||||||
span<real> src,
|
const final;
|
||||||
span<real> dst) const override;
|
|
||||||
|
|
||||||
bool initialTransferBlockData(
|
bool initialTransferBlockData(span<int32> src, span<int32> dst)
|
||||||
span<uint32> src,
|
const final;
|
||||||
span<uint32> dst) const override;
|
|
||||||
|
|
||||||
bool initialTransferBlockData(
|
const dictionary& thisBoundaryDict() const final;
|
||||||
span<int32> src,
|
|
||||||
span<int32> dst) const override;
|
|
||||||
|
|
||||||
|
|
||||||
const dictionary& thisBoundaryDict()const override;
|
|
||||||
|
|
||||||
bool requiresDataTransfer() const override;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -52,8 +52,14 @@ pFlow::domain pFlow::simulationDomain::extendThisDomain
|
|||||||
return domain({minP, maxP});
|
return domain({minP, maxP});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const pFlow::plane&
|
||||||
|
pFlow::simulationDomain::boundaryPlane(uint32 i) const
|
||||||
|
{
|
||||||
|
return thisDomain().boundaryPlane(i);
|
||||||
|
}
|
||||||
|
|
||||||
pFlow::uniquePtr<pFlow::simulationDomain>
|
pFlow::uniquePtr<pFlow::simulationDomain>
|
||||||
pFlow::simulationDomain::create(systemControl &control)
|
pFlow::simulationDomain::create(systemControl& control)
|
||||||
{
|
{
|
||||||
word sType = angleBracketsNames(
|
word sType = angleBracketsNames(
|
||||||
"simulationDomain",
|
"simulationDomain",
|
||||||
|
@ -39,33 +39,27 @@ class simulationDomain
|
|||||||
:
|
:
|
||||||
public fileDictionary
|
public fileDictionary
|
||||||
{
|
{
|
||||||
private:
|
private:
|
||||||
|
|
||||||
static inline constexpr uint32 sizeOfBoundaries_ = 6;
|
/// @brief acutal limits of the global box of simulation
|
||||||
|
|
||||||
static
|
|
||||||
inline const std::array<word,6> boundaryNames_ =
|
|
||||||
{
|
|
||||||
"left", "right",
|
|
||||||
"bottom", "top",
|
|
||||||
"rear", "front"
|
|
||||||
};
|
|
||||||
|
|
||||||
protected:
|
|
||||||
|
|
||||||
//fileDictionary globalDomainDict_ ;
|
|
||||||
|
|
||||||
/// @brief acutal limits of the simulation domain
|
|
||||||
box globalBox_;
|
box globalBox_;
|
||||||
|
|
||||||
/// @brief the actual limits of this processor domain
|
|
||||||
domain thisDomain_;
|
|
||||||
|
|
||||||
bool boundariesSet_ = false;
|
static constexpr uint32 sizeOfBoundaries_ = 6;
|
||||||
|
|
||||||
|
static
|
||||||
|
inline const std::array<word,6> boundaryNames_ =
|
||||||
|
{
|
||||||
|
"left", "right",
|
||||||
|
"bottom", "top",
|
||||||
|
"rear", "front"
|
||||||
|
};
|
||||||
|
|
||||||
virtual bool createBoundaryDicts() = 0;
|
virtual
|
||||||
|
bool createBoundaryDicts() = 0;
|
||||||
|
|
||||||
virtual bool setThisDomain() = 0;
|
virtual
|
||||||
|
bool setThisDomain() = 0;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
@ -73,11 +67,10 @@ public:
|
|||||||
TypeInfo("simulationDomain");
|
TypeInfo("simulationDomain");
|
||||||
|
|
||||||
/// Constrcut from components
|
/// Constrcut from components
|
||||||
simulationDomain(systemControl& control);
|
explicit simulationDomain(systemControl& control);
|
||||||
|
|
||||||
/// Destructor
|
/// Destructor
|
||||||
virtual
|
~simulationDomain() override = default;
|
||||||
~simulationDomain() = default;
|
|
||||||
|
|
||||||
|
|
||||||
create_vCtor
|
create_vCtor
|
||||||
@ -88,6 +81,11 @@ public:
|
|||||||
(control)
|
(control)
|
||||||
);
|
);
|
||||||
|
|
||||||
|
const auto& globalBox()const
|
||||||
|
{
|
||||||
|
return globalBox_;
|
||||||
|
}
|
||||||
|
|
||||||
virtual
|
virtual
|
||||||
const dictionary& thisBoundaryDict()const = 0;
|
const dictionary& thisBoundaryDict()const = 0;
|
||||||
|
|
||||||
@ -97,9 +95,6 @@ public:
|
|||||||
virtual
|
virtual
|
||||||
uint32 initialNumberInThis()const = 0;
|
uint32 initialNumberInThis()const = 0;
|
||||||
|
|
||||||
virtual
|
|
||||||
bool initialThisDomainActive()const = 0;
|
|
||||||
|
|
||||||
virtual
|
virtual
|
||||||
bool initialTransferBlockData(
|
bool initialTransferBlockData(
|
||||||
span<char> src,
|
span<char> src,
|
||||||
@ -139,9 +134,7 @@ public:
|
|||||||
charSpan(dst),
|
charSpan(dst),
|
||||||
el
|
el
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/// @brief Number of points to be imported after updating domains
|
/// @brief Number of points to be imported after updating domains
|
||||||
/// @return number of points
|
/// @return number of points
|
||||||
@ -151,40 +144,46 @@ public:
|
|||||||
virtual
|
virtual
|
||||||
uint32 numberToBeExported()const = 0;
|
uint32 numberToBeExported()const = 0;
|
||||||
|
|
||||||
|
/// @brief Is this domain active?
|
||||||
|
/// Active mean, there is particle in it and
|
||||||
|
/// boundaries and other entities of simulation domains are valid
|
||||||
virtual
|
virtual
|
||||||
bool requiresDataTransfer() const = 0;
|
bool domainActive()const = 0;
|
||||||
|
|
||||||
inline
|
|
||||||
const auto& thisDomain()const
|
|
||||||
{
|
|
||||||
return thisDomain_;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
/// @brief return the simulation domain of this processor
|
||||||
|
virtual
|
||||||
|
const domain& thisDomain()const = 0;
|
||||||
|
|
||||||
|
|
||||||
domain extendThisDomain(
|
domain extendThisDomain(
|
||||||
const realx3& lowerPointExtension,
|
const realx3& lowerPointExtension,
|
||||||
const realx3& upperPointExtension)const;
|
const realx3& upperPointExtension)const;
|
||||||
|
|
||||||
|
/// @brief The original dictionary supplied by the user as input
|
||||||
inline
|
inline
|
||||||
const auto& globalBoundaryDict()const
|
const auto& globalBoundaryDict()const
|
||||||
{
|
{
|
||||||
return static_cast<const fileDictionary&>(*this);
|
return static_cast<const fileDictionary&>(*this);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// @brief return a const ref to dicrionary of boundary i of this processor
|
||||||
inline
|
inline
|
||||||
const dictionary& boundaryDict(uint32 i)const
|
const dictionary& boundaryDict(uint32 i)const
|
||||||
{
|
{
|
||||||
return thisBoundaryDict().subDict(bundaryName(i));
|
return thisBoundaryDict().subDict(bundaryName(i));
|
||||||
}
|
}
|
||||||
|
|
||||||
inline
|
/// @brief return a const ref to the plane of boundary i of this processor
|
||||||
const auto& boundaryPlane(uint32 i)const
|
const plane& boundaryPlane(uint32 i)const;
|
||||||
{
|
|
||||||
return thisDomain_.boundaryPlane(i);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
static
|
||||||
|
uniquePtr<simulationDomain> create(systemControl& control);
|
||||||
|
|
||||||
|
/// @brief Boundary name based on boundary index
|
||||||
|
/// @param i boundary index (range from 0 to 5)
|
||||||
|
/// @return const reference to name of the boundary
|
||||||
static inline
|
static inline
|
||||||
const word& bundaryName(uint32 i)
|
const word& bundaryName(uint32 i)
|
||||||
{
|
{
|
||||||
@ -197,9 +196,6 @@ public:
|
|||||||
return boundaryNames_.size();
|
return boundaryNames_.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
static
|
|
||||||
uniquePtr<simulationDomain> create(systemControl& control);
|
|
||||||
|
|
||||||
}; // simulationDomain
|
}; // simulationDomain
|
||||||
|
|
||||||
|
|
||||||
|
@ -55,7 +55,7 @@ bool pFlow::pointStructure::setupPointStructure(const PointsTypeHost &points)
|
|||||||
thisN,
|
thisN,
|
||||||
RESERVE()
|
RESERVE()
|
||||||
);
|
);
|
||||||
|
|
||||||
auto pSpan = points.getSpan();
|
auto pSpan = points.getSpan();
|
||||||
|
|
||||||
if(auto iSpan = internal.getSpan();
|
if(auto iSpan = internal.getSpan();
|
||||||
@ -72,7 +72,7 @@ bool pFlow::pointStructure::setupPointStructure(const PointsTypeHost &points)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
boundaries_.setLists();
|
boundaries_.createBoundaries();
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user