1 Commits

Author SHA1 Message Date
0d9de2c601 Merge pull request #222 from PhasicFlow/main
from main
2025-05-02 23:04:23 +03:30
3489 changed files with 231 additions and 324861 deletions

View File

@ -9,7 +9,7 @@ import sys
REPO_URL = "https://github.com/PhasicFlow/phasicFlow"
REPO_PATH = os.path.join(os.environ.get("GITHUB_WORKSPACE", ""), "repo")
WIKI_PATH = os.path.join(os.environ.get("GITHUB_WORKSPACE", ""), "wiki")
MAPPING_FILE = os.path.join(REPO_PATH, "doc/mdDocs/markdownList.yml")
MAPPING_FILE = os.path.join(REPO_PATH, ".github/workflows/markdownList.yml")
def load_mapping():
"""Load the markdown to wiki page mapping file."""

View File

@ -7,7 +7,7 @@ on:
paths:
- "**/*.md"
- ".github/workflows/sync-wiki.yml"
- "doc/mdDocs/markdownList.yml"
- ".github/workflows/markdownList.yml"
- ".github/scripts/sync-wiki.py"
workflow_dispatch:

8
.gitignore vendored
View File

@ -37,15 +37,11 @@
*.out
*.app
# Exclude specific directories wherever they appear
# directories
build/**
include/**
bin/**
lib/**
**/build/
**/include/
**/bin/
**/lib/
test*/**
**/**notnow
doc/code-documentation/
@ -65,5 +61,3 @@ doc/DTAGS
**/[0-9]*.[0-9][0-9][0-9][0-9][0-9][0-9][0-9]
**/[0-9]*.[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]
**/VTK

View File

@ -66,13 +66,12 @@ pFlow::uniquePtr<pFlow::DEMSystem>
word demSystemName,
const std::vector<box>& domains,
int argc,
char* argv[],
bool requireRVel
char* argv[]
)
{
if( wordvCtorSelector_.search(demSystemName) )
{
return wordvCtorSelector_[demSystemName] (demSystemName, domains, argc, argv, requireRVel);
return wordvCtorSelector_[demSystemName] (demSystemName, domains, argc, argv);
}
else
{

View File

@ -71,15 +71,13 @@ public:
word demSystemName,
const std::vector<box>& domains,
int argc,
char* argv[],
bool requireRVel
char* argv[]
),
(
demSystemName,
domains,
argc,
argv,
requireRVel
argv
));
realx3 g()const
@ -98,7 +96,7 @@ public:
return Control_();
}
auto inline constexpr usingDouble()const
auto inline constexpr usingDoulle()const
{
return pFlow::usingDouble__;
}
@ -121,10 +119,7 @@ public:
span<const int32> parIndexInDomain(int32 domIndx)const = 0;
virtual
span<real> diameter() = 0;
virtual
span<uint32> particleId() = 0;
span<real> diameter() = 0;
virtual
span<real> courseGrainFactor() = 0;
@ -181,8 +176,7 @@ public:
word demSystemName,
const std::vector<box>& domains,
int argc,
char* argv[],
bool requireRVel=false);
char* argv[]);
};

View File

@ -163,12 +163,6 @@ pFlow::grainDEMSystem::parIndexInDomain(int32 di)const
return particleDistribution_->particlesInDomain(di);
}
pFlow::span<pFlow::uint32> pFlow::grainDEMSystem::particleId()
{
return span<uint32>(particleIdHost_.data(), particleIdHost_.size());
}
pFlow::span<pFlow::real> pFlow::grainDEMSystem::diameter()
{
return span<real>(diameterHost_.data(), diameterHost_.size());
@ -239,7 +233,6 @@ bool pFlow::grainDEMSystem::beforeIteration()
velocityHost_ = std::as_const(particles_()).velocity().hostView();
positionHost_ = std::as_const(particles_()).pointPosition().hostView();
diameterHost_ = particles_->diameter().hostView();
particleIdHost_ = particles_->particleId().hostView();
if(requireRVel_)
rVelocityHost_ = std::as_const(particles_()).rVelocity().hostView();

View File

@ -63,8 +63,6 @@ protected:
ViewType1D<real, HostSpace> diameterHost_;
ViewType1D<uint32, HostSpace> particleIdHost_;
bool requireRVel_ = false;
ViewType1D<realx3, HostSpace> rVelocityHost_;
@ -124,8 +122,6 @@ public:
span<const int32> parIndexInDomain(int32 di)const override;
span<uint32> particleId() override;
span<real> diameter() override;
span<real> courseGrainFactor() override;

View File

@ -165,11 +165,6 @@ pFlow::sphereDEMSystem::parIndexInDomain(int32 di)const
return particleDistribution_->particlesInDomain(di);
}
pFlow::span<pFlow::uint32> pFlow::sphereDEMSystem::particleId()
{
return span<uint32>();
}
pFlow::span<pFlow::real> pFlow::sphereDEMSystem::diameter()
{
return span<real>(diameterHost_.data(), diameterHost_.size());
@ -240,7 +235,6 @@ bool pFlow::sphereDEMSystem::beforeIteration()
velocityHost_ = std::as_const(particles_()).velocity().hostView();
positionHost_ = std::as_const(particles_()).pointPosition().hostView();
diameterHost_ = particles_->diameter().hostView();
particleIdHost_ = particles_->particleId().hostView();
if(requireRVel_)
rVelocityHost_ = std::as_const(particles_()).rVelocity().hostView();

View File

@ -63,8 +63,6 @@ protected:
ViewType1D<real, HostSpace> diameterHost_;
ViewType1D<uint32, HostSpace> particleIdHost_;
bool requireRVel_ = false;
ViewType1D<realx3, HostSpace> rVelocityHost_;
@ -124,8 +122,6 @@ public:
span<const int32> parIndexInDomain(int32 di)const override;
span<uint32> particleId() override;
span<real> diameter() override;
span<real> courseGrainFactor() override;

View File

@ -19,7 +19,7 @@ export pFlow_SRC_DIR="$pFlow_PROJECT_DIR/src"
export Kokkos_DIR="$kokkosDir"
#export Zoltan_DIR="$projectDir/Zoltan"
export Zoltan_DIR="$projectDir/Zoltan"
# Cleanup variables (done as final statement for a clean exit code)
unset projectDir

View File

@ -1,44 +0,0 @@
# Macro to check for Zoltan installation and build it if needed
# Usage: zoltan_find_or_build(ZOLTAN_DIR)
# Returns: ZOLTAN_INCLUDE_DIR, ZOLTAN_LIBRARY
macro(zoltan_find_or_build ZOLTAN_DIR)
# Set the Zoltan directory
set(ZOLTAN_PREFIX "${ZOLTAN_DIR}" CACHE STRING "Zoltan install directory")
message(STATUS "Zoltan install directory is ${ZOLTAN_PREFIX}")
# Check if the Zoltan library is already built
find_path(ZOLTAN_INCLUDE_DIR zoltan.h PATHS "${ZOLTAN_PREFIX}/include")
message(STATUS "Zoltan include path: ${ZOLTAN_INCLUDE_DIR}")
find_library(ZOLTAN_LIBRARY zoltan PATHS "${ZOLTAN_PREFIX}/lib")
message(STATUS "Zoltan lib path: ${ZOLTAN_LIBRARY}")
# Check if Zoltan library exists, if not compile it using buildlib script
if(NOT ZOLTAN_LIBRARY)
message(STATUS "Zoltan library not found. Compiling from source using buildlib script...")
# Execute the buildlib bash script
execute_process(
COMMAND bash ${ZOLTAN_PREFIX}/buildlib
WORKING_DIRECTORY ${ZOLTAN_PREFIX}
RESULT_VARIABLE ZOLTAN_BUILD_RESULT
OUTPUT_VARIABLE ZOLTAN_BUILD_OUTPUT
ERROR_VARIABLE ZOLTAN_BUILD_ERROR
)
if(NOT ZOLTAN_BUILD_RESULT EQUAL 0)
message(FATAL_ERROR "Failed to build Zoltan library using buildlib script. Error: ${ZOLTAN_BUILD_ERROR}")
endif()
# Try to find the library again after building
find_library(ZOLTAN_LIBRARY zoltan PATHS "${ZOLTAN_PREFIX}/lib" NO_DEFAULT_PATH)
find_path(ZOLTAN_INCLUDE_DIR zoltan.h PATHS "${ZOLTAN_PREFIX}/include" NO_DEFAULT_PATH)
if(NOT ZOLTAN_LIBRARY)
message(FATAL_ERROR "Failed to locate Zoltan library after building")
endif()
message(STATUS "Successfully built Zoltan library at ${ZOLTAN_LIBRARY}")
endif()
endmacro()

View File

@ -1,71 +0,0 @@
#include "processorAB2BoundaryIntegration.hpp"
#include "AdamsBashforth2.hpp"
#include "AB2Kernels.hpp"
#include "boundaryConfigs.hpp"
pFlow::processorAB2BoundaryIntegration::processorAB2BoundaryIntegration(
const boundaryBase &boundary,
const pointStructure &pStruct,
const word &method,
integration& intgrtn
)
:
boundaryIntegration(boundary, pStruct, method, intgrtn)
{}
bool pFlow::processorAB2BoundaryIntegration::correct(
real dt,
const realx3PointField_D& y,
const realx3PointField_D& dy
)
{
#ifndef BoundaryModel1
if(this->isBoundaryMaster())
{
const uint32 thisIndex = thisBoundaryIndex();
const auto& AB2 = static_cast<const AdamsBashforth2&>(Integration());
const auto& dy1View = AB2.BoundaryField(thisIndex).neighborProcField().deviceView();
const auto& dyView = dy.BoundaryField(thisIndex).neighborProcField().deviceView();
const auto& yView = y.BoundaryField(thisIndex).neighborProcField().deviceView();
const rangeU32 aRange(0u, dy1View.size());
return AB2Kernels::intAllActive(
"AB2Integration::correct."+this->boundaryName(),
dt,
aRange,
yView,
dyView,
dy1View
);
}
#endif //BoundaryModel1
return true;
}
bool pFlow::processorAB2BoundaryIntegration::correctPStruct(real dt, const realx3PointField_D &vel)
{
#ifndef BoundaryModel1
if(this->isBoundaryMaster())
{
const uint32 thisIndex = thisBoundaryIndex();
const auto& AB2 = static_cast<const AdamsBashforth2&>(Integration());
const auto& dy1View = AB2.BoundaryField(thisIndex).neighborProcField().deviceView();
const auto& velView = vel.BoundaryField(thisIndex).neighborProcField().deviceView();
const auto& xposView = boundary().neighborProcPoints().deviceView();
const rangeU32 aRange(0u, dy1View.size());
return AB2Kernels::intAllActive(
"AB2Integration::correctPStruct."+this->boundaryName(),
dt,
aRange,
xposView,
velView,
dy1View
);
}
#endif //BoundaryModel1
return true;
}

View File

@ -1,51 +0,0 @@
#ifndef __processorAB2BoundaryIntegration_hpp__
#define __processorAB2BoundaryIntegration_hpp__
#include "boundaryIntegration.hpp"
namespace pFlow
{
class processorAB2BoundaryIntegration
:
public boundaryIntegration
{
public:
TypeInfo("boundaryIntegration<processor,AdamsBashforth2>");
processorAB2BoundaryIntegration(
const boundaryBase& boundary,
const pointStructure& pStruct,
const word& method,
integration& intgrtn
);
~processorAB2BoundaryIntegration()override=default;
bool correct(
real dt,
const realx3PointField_D& y,
const realx3PointField_D& dy)override;
bool correctPStruct(real dt, const realx3PointField_D& vel)override;
add_vCtor(
boundaryIntegration,
processorAB2BoundaryIntegration,
boundaryBase
);
};
}
#endif

View File

@ -1,111 +0,0 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#include "processorBoundaryContactSearch.hpp"
#include "contactSearch.hpp"
#include "particles.hpp"
//#include "pointStructure.hpp"
//#include "geometry.hpp"
void pFlow::processorBoundaryContactSearch::setSearchBox()
{
auto l = boundary().neighborLength();
auto n = boundary().boundaryPlane().normal();
auto pp1 = boundary().boundaryPlane().parallelPlane(l);
auto pp2 = boundary().boundaryPlane().parallelPlane(-l);
realx3 minP1 = min(min(min(pp1.p1(), pp1.p2()), pp1.p3()), pp1.p4());
realx3 maxP1 = max(max(max(pp1.p1(), pp1.p2()), pp1.p3()), pp1.p4());
realx3 minP2 = min(min(min(pp2.p1(), pp2.p2()), pp2.p3()), pp2.p4());
realx3 maxP2 = max(max(max(pp2.p1(), pp2.p2()), pp2.p3()), pp2.p4());
auto minP = min(minP1, minP2) - l*(realx3(1.0)-abs(n));
auto maxP = max(maxP1, maxP2) + l*(realx3(1.0)-abs(n));
searchBox_={minP, maxP};
}
pFlow::processorBoundaryContactSearch::processorBoundaryContactSearch(
const dictionary &dict,
const boundaryBase &boundary,
const contactSearch &cSearch)
:
boundaryContactSearch(dict, boundary, cSearch),
diameter_(cSearch.Particles().boundingSphere()),
masterSearch_(this->isBoundaryMaster()),
sizeRatio_(dict.getVal<real>("sizeRatio"))
{
if(masterSearch_)
{
setSearchBox();
real minD;
real maxD;
cSearch.Particles().boundingSphereMinMax(minD, maxD);
ppContactSearch_ = makeUnique<twoPartContactSearch>(
searchBox_,
maxD,
sizeRatio_);
}
else
{
searchBox_={{0,0,0},{0,0,0}};
}
}
bool pFlow::processorBoundaryContactSearch::broadSearch
(
uint32 iter,
real t,
real dt,
csPairContainerType &ppPairs,
csPairContainerType &pwPairs,
bool force
)
{
if(masterSearch_)
{
const auto thisPoints = boundary().thisPoints();
const auto& neighborProcPoints = boundary().neighborProcPoints();
const auto& bDiams = diameter_.BoundaryField(thisBoundaryIndex());
const auto thisDiams = bDiams.thisField();
const auto& neighborProcDiams = bDiams.neighborProcField();
ppContactSearch_().broadSearchPP(
ppPairs,
thisPoints,
thisDiams,
neighborProcPoints,
neighborProcDiams,
boundaryName()
);
//pOutput<<"ppSize "<< ppPairs.size()<<endl;
return true;
}else
{
return true;
}
}

View File

@ -1,76 +0,0 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __processorBoundaryContactSearch_hpp__
#define __processorBoundaryContactSearch_hpp__
#include "boundaryContactSearch.hpp"
#include "pointFields.hpp"
#include "twoPartContactSearch.hpp"
namespace pFlow
{
class processorBoundaryContactSearch : public boundaryContactSearch
{
private:
box searchBox_;
uniquePtr<twoPartContactSearch> ppContactSearch_ = nullptr;
const realPointField_D& diameter_;
bool masterSearch_;
real sizeRatio_;
void setSearchBox();
public:
TypeInfo("boundaryContactSearch<MPI,processor>")
processorBoundaryContactSearch(
const dictionary& dict,
const boundaryBase& boundary,
const contactSearch& cSearch
);
~processorBoundaryContactSearch() override = default;
add_vCtor(
boundaryContactSearch,
processorBoundaryContactSearch,
boundaryBase
);
bool broadSearch(
uint32 iter,
real t,
real dt,
csPairContainerType& ppPairs,
csPairContainerType& pwPairs,
bool force = false
) override;
};
}
#endif //__processorBoundaryContactSearch_hpp__

View File

@ -1,163 +0,0 @@
#include "twoPartContactSearch.hpp"
#include "twoPartContactSearchKernels.hpp"
#include "phasicFlowKokkos.hpp"
#include "streams.hpp"
void pFlow::twoPartContactSearch::checkAllocateNext(uint32 n)
{
if( nextCapacity_ < n)
{
nextCapacity_ = n;
reallocNoInit(next_, n);
}
}
void pFlow::twoPartContactSearch::nullifyHead()
{
fill(head_, static_cast<uint32>(-1));
}
void pFlow::twoPartContactSearch::nullifyNext(uint32 n)
{
fill(next_, 0u, n, static_cast<uint32>(-1));
}
void pFlow::twoPartContactSearch::buildList(
const deviceScatteredFieldAccess<realx3> &points)
{
if(points.empty())return;
uint32 n = points.size();
checkAllocateNext(n);
nullifyNext(n);
nullifyHead();
pFlow::twoPartContactSearchKernels::buildNextHead(
points,
searchCells_,
head_,
next_
);
}
pFlow::twoPartContactSearch::twoPartContactSearch
(
const box &domain,
real cellSize,
real sizeRatio
)
:
searchCells_(domain, cellSize),
head_("periodic:head",searchCells_.nx(), searchCells_.ny(), searchCells_.nz()),
sizeRatio_(sizeRatio)
{
}
bool pFlow::twoPartContactSearch::broadSearchPP
(
csPairContainerType &ppPairs,
const deviceScatteredFieldAccess<realx3> &points1,
const deviceScatteredFieldAccess<real>& diams1,
const deviceScatteredFieldAccess<realx3> &points2,
const deviceScatteredFieldAccess<real>& diams2,
const realx3& transferVec
)
{
if(points1.empty())return true;
if(points2.empty()) return true;
buildList(points1);
uint32 nNotInserted = 1;
// loop until the container size fits the numebr of contact pairs
while (nNotInserted > 0)
{
nNotInserted = pFlow::twoPartContactSearchKernels::broadSearchPP
(
ppPairs,
points1,
diams1,
points2,
diams2,
transferVec,
head_,
next_,
searchCells_,
sizeRatio_
);
if(nNotInserted)
{
// - resize the container
// note that getFull now shows the number of failed insertions.
uint32 len = max(nNotInserted,100u) ;
auto oldCap = ppPairs.capacity();
ppPairs.increaseCapacityBy(len);
INFORMATION<< "Particle-particle contact pair container capacity increased from "<<
oldCap << " to "<<ppPairs.capacity()<<" in contact search in boundary region."<<END_INFO;
}
}
return true;
}
bool pFlow::twoPartContactSearch::broadSearchPP
(
csPairContainerType &ppPairs,
const deviceScatteredFieldAccess<realx3> &points1,
const deviceScatteredFieldAccess<real> &diams1,
const realx3Vector_D& points2,
const realVector_D& diams2,
const word& name
)
{
buildList(points1);
uint32 nNotInserted = 1;
// loop until the container size fits the numebr of contact pairs
while (nNotInserted > 0)
{
nNotInserted = pFlow::twoPartContactSearchKernels::broadSearchPP
(
ppPairs,
points1,
diams1,
points2,
diams2,
head_,
next_,
searchCells_,
sizeRatio_
);
if(nNotInserted)
{
// - resize the container
// note that getFull now shows the number of failed insertions.
uint32 len = max(nNotInserted,100u) ;
auto oldCap = ppPairs.capacity();
ppPairs.increaseCapacityBy(len);
INFORMATION<< "Particle-particle contact pair container capacity increased from "<<
oldCap << " to "<<ppPairs.capacity()<<" in boundary contact search in "<< name <<END_INFO;
}
}
return true;
}

View File

@ -1,104 +0,0 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __twoPartContactSearch_hpp__
#define __twoPartContactSearch_hpp__
#include "contactSearchGlobals.hpp"
#include "scatteredFieldAccess.hpp"
#include "cells.hpp"
#include "VectorSingles.hpp"
namespace pFlow
{
class twoPartContactSearch
{
public:
using HeadType = deviceViewType3D<uint32>;
using NextType = deviceViewType1D<uint32>;
private:
cells searchCells_;
HeadType head_{ "periodic::head", 1, 1, 1 };
NextType next_{ "periodic::next", 1 };
real sizeRatio_ = 1.0;
uint32 nextCapacity_ = 0;
void checkAllocateNext(uint32 n);
void nullifyHead();
void nullifyNext(uint32 n);
void buildList(
const deviceScatteredFieldAccess<realx3> &points);
public:
twoPartContactSearch(
const box &domain,
real cellSize,
real sizeRatio = 1.0);
/// @brief Perform a broad-search for spheres in two adjacent regions.
/// Region 1 is considered as the master (primary) region and region 2 as slave
/// @param ppPairs pairs container which holds i and j
/// @param points1 point positions in region 1
/// @param diams1 diameter of spheres in region 1
/// @param points2 point positions in region 2
/// @param diams2 diameter of spheres in region 2
/// @param transferVec a vector to transfer points from region 2 to region 1
/// @return true if it is successful
bool broadSearchPP(
csPairContainerType &ppPairs,
const deviceScatteredFieldAccess<realx3> &points1,
const deviceScatteredFieldAccess<real> &diams1,
const deviceScatteredFieldAccess<realx3> &points2,
const deviceScatteredFieldAccess<real> &diams2,
const realx3 &transferVec);
bool broadSearchPP(
csPairContainerType &ppPairs,
const deviceScatteredFieldAccess<realx3> &points1,
const deviceScatteredFieldAccess<real> &diams1,
const realx3Vector_D& points2,
const realVector_D& diams2,
const word& name);
const auto& searchCells()const
{
return searchCells_;
}
real sizeRatio()const
{
return sizeRatio_;
}
};
}
#endif //__twoPartContactSearch_hpp__

View File

@ -1,186 +0,0 @@
#include "twoPartContactSearchKernels.hpp"
INLINE_FUNCTION_HD
bool
sphereSphereCheckB(
const pFlow::realx3& p1,
const pFlow::realx3 p2,
pFlow::real d1,
pFlow::real d2
)
{
return pFlow::length(p2 - p1) < 0.5 * (d2 + d1);
}
void
pFlow::twoPartContactSearchKernels::buildNextHead(
const deviceScatteredFieldAccess<realx3>& points,
const cells& searchCells,
deviceViewType3D<uint32>& head,
deviceViewType1D<uint32>& next
)
{
uint32 n = points.size();
Kokkos::parallel_for(
"pFlow::ppwBndryContactSearch::buildList",
deviceRPolicyStatic(0, n),
LAMBDA_HD(uint32 i) {
int32x3 ind;
if (searchCells.pointIndexInDomain(points[i], ind))
{
// discards points out of searchCell
uint32 old =
Kokkos::atomic_exchange(&head(ind.x(), ind.y(), ind.z()), i);
next[i] = old;
}
}
);
Kokkos::fence();
}
pFlow::uint32
pFlow::twoPartContactSearchKernels::broadSearchPP(
csPairContainerType& ppPairs,
const deviceScatteredFieldAccess<realx3>& points,
const deviceScatteredFieldAccess<real>& diams,
const deviceScatteredFieldAccess<realx3>& mirrorPoints,
const deviceScatteredFieldAccess<real>& mirrorDiams,
const realx3& transferVec,
const deviceViewType3D<uint32>& head,
const deviceViewType1D<uint32>& next,
const cells& searchCells,
const real sizeRatio
)
{
if (points.empty())
return 0;
if (mirrorPoints.empty())
return 0;
auto nMirror = mirrorPoints.size();
uint32 getFull = 0;
Kokkos::parallel_reduce(
"pFlow::twoPartContactSearchKernels::broadSearchPP",
deviceRPolicyStatic(0, nMirror),
LAMBDA_HD(const uint32 mrrI, uint32& getFullUpdate) {
realx3 p_m = mirrorPoints(mrrI) + transferVec;
int32x3 ind_m;
if (!searchCells.pointIndexInDomain(p_m, ind_m))
return;
real d_m = sizeRatio * mirrorDiams[mrrI];
for (int ii = -1; ii < 2; ii++)
{
for (int jj = -1; jj < 2; jj++)
{
for (int kk = -1; kk < 2; kk++)
{
auto ind = ind_m + int32x3{ ii, jj, kk };
if (!searchCells.inCellRange(ind))
continue;
uint32 thisI = head(ind.x(), ind.y(), ind.z());
while (thisI != static_cast<uint32>(-1))
{
auto d_n = sizeRatio * diams[thisI];
// first item is for this boundary and second itme,
// for mirror
if(sphereSphereCheckB(p_m, points[thisI], d_m, d_n)&&
ppPairs.insert(thisI,mrrI) == static_cast<uint32>(-1))
{
getFullUpdate++;
}
thisI = next(thisI);
}
}
}
}
},
getFull
);
return getFull;
}
pFlow::uint32
pFlow::twoPartContactSearchKernels::broadSearchPP(
csPairContainerType& ppPairs,
const deviceScatteredFieldAccess<realx3>& points1,
const deviceScatteredFieldAccess<real>& diams1,
const realx3Vector_D& points2,
const realVector_D& diams2,
const deviceViewType3D<uint32>& head,
const deviceViewType1D<uint32>& next,
const cells& searchCells,
real sizeRatio
)
{
if (points1.empty())
return 0;
if (points2.empty())
return 0;
auto nP2 = points2.size();
auto points2View = points2.deviceView();
auto diams2View = diams2.deviceView();
uint32 getFull = 0;
Kokkos::parallel_reduce(
"pFlow::twoPartContactSearchKernels::broadSearchPP",
deviceRPolicyStatic(0, nP2),
LAMBDA_HD(const uint32 i2, uint32& getFullUpdate) {
realx3 p_m = points2View(i2);
int32x3 ind_m;
if (!searchCells.pointIndexInDomain(p_m, ind_m))
return;
real d_m = sizeRatio * diams2View[i2];
for (int ii = -1; ii < 2; ii++)
{
for (int jj = -1; jj < 2; jj++)
{
for (int kk = -1; kk < 2; kk++)
{
auto ind = ind_m + int32x3{ ii, jj, kk };
if (!searchCells.inCellRange(ind))
{
continue;
}
uint32 i1 = head(ind.x(), ind.y(), ind.z());
while (i1 != static_cast<uint32>(-1))
{
auto d_n = sizeRatio * diams1[i1];
// first item is for this boundary and second itme,
// for mirror
if(sphereSphereCheckB(p_m, points1[i1], d_m, d_n)&&
ppPairs.insert(i1,i2) == static_cast<uint32>(-1))
{
getFullUpdate++;
}
i1 = next(i1);
}
}
}
}
},
getFull
);
return getFull;
}

View File

@ -1,49 +0,0 @@
#ifndef __twoPartContactSearchKernels_hpp__
#define __twoPartContactSearchKernels_hpp__
#include "contactSearchGlobals.hpp"
#include "cells.hpp"
#include "contactSearchFunctions.hpp"
#include "scatteredFieldAccess.hpp"
#include "VectorSingles.hpp"
namespace pFlow::twoPartContactSearchKernels
{
void buildNextHead(
const deviceScatteredFieldAccess<realx3> &points,
const cells &searchCells,
deviceViewType3D<uint32> &head,
deviceViewType1D<uint32> &next );
uint32 broadSearchPP
(
csPairContainerType &ppPairs,
const deviceScatteredFieldAccess<realx3> &points,
const deviceScatteredFieldAccess<real> &diams,
const deviceScatteredFieldAccess<realx3> &mirrorPoints,
const deviceScatteredFieldAccess<real> &mirrorDiams,
const realx3 &transferVec,
const deviceViewType3D<uint32> &head,
const deviceViewType1D<uint32> &next,
const cells &searchCells,
real sizeRatio
);
uint32
broadSearchPP(
csPairContainerType& ppPairs,
const deviceScatteredFieldAccess<realx3>& points1,
const deviceScatteredFieldAccess<real>& diams1,
const realx3Vector_D& points2,
const realVector_D& diams2,
const deviceViewType3D<uint32>& head,
const deviceViewType1D<uint32>& next,
const cells& searchCells,
real sizeRatio
);
}
#endif //__twoPartContactSearchKernels_hpp__

View File

@ -1,132 +0,0 @@
#ifndef __processorBoundarySIKernels_hpp__
#define __processorBoundarySIKernels_hpp__
namespace pFlow::MPI::processorBoundarySIKernels
{
template<typename ContactListType, typename ContactForceModel>
inline
void sphereSphereInteraction
(
const word& kernalName,
real dt,
const ContactListType& cntctList,
const ContactForceModel& forceModel,
const deviceScatteredFieldAccess<realx3>& thisPoints,
const deviceViewType1D<real>& thisDiam,
const deviceViewType1D<uint32>& thisPropId,
const deviceViewType1D<realx3>& thisVel,
const deviceViewType1D<realx3>& thisRVel,
const deviceViewType1D<realx3>& thisCForce,
const deviceViewType1D<realx3>& thisCTorque,
const deviceViewType1D<realx3>& neighborPoints,
const deviceViewType1D<real>& neighborDiam,
const deviceViewType1D<uint32>& neighborPropId,
const deviceViewType1D<realx3>& neighborVel,
const deviceViewType1D<realx3>& neighborRVel,
const deviceViewType1D<realx3>& neighborCForce,
const deviceViewType1D<realx3>& neighborCTorque
)
{
using ValueType = typename ContactListType::ValueType;
uint32 ss = cntctList.size();
if(ss == 0u)return;
uint32 lastItem = cntctList.loopCount();
Kokkos::parallel_for(
kernalName,
deviceRPolicyDynamic(0,lastItem),
LAMBDA_HD(uint32 n)
{
if(!cntctList.isValid(n))return;
auto [i,j] = cntctList.getPair(n);
uint32 ind_i = thisPoints.index(i);
uint32 ind_j = j;
real Ri = 0.5*thisDiam[ind_i];
real Rj = 0.5*neighborDiam[ind_j];
realx3 xi = thisPoints.field()[ind_i];
realx3 xj = neighborPoints[ind_j];
real dist = length(xj-xi);
real ovrlp = (Ri+Rj) - dist;
if( ovrlp >0.0 )
{
auto Nij = (xj-xi)/max(dist,smallValue);
auto wi = thisRVel[ind_i];
auto wj = neighborRVel[ind_j];
auto Vr = thisVel[ind_i] - neighborVel[ind_j] + cross((Ri*wi+Rj*wj), Nij);
auto history = cntctList.getValue(n);
int32 propId_i = thisPropId[ind_i];
int32 propId_j = neighborPropId[ind_j];
realx3 FCn, FCt, Mri, Mrj, Mij, Mji;
// calculates contact force
forceModel.contactForce(
dt, i, j,
propId_i, propId_j,
Ri, Rj,
ovrlp,
Vr, Nij,
history,
FCn, FCt);
forceModel.rollingFriction(
dt, i, j,
propId_i, propId_j,
Ri, Rj,
wi, wj,
Nij,
FCn,
Mri, Mrj);
auto M = cross(Nij,FCt);
Mij = Ri*M+Mri;
Mji = Rj*M+Mrj;
auto FC = FCn + FCt;
Kokkos::atomic_add(&thisCForce[ind_i].x_,FC.x_);
Kokkos::atomic_add(&thisCForce[ind_i].y_,FC.y_);
Kokkos::atomic_add(&thisCForce[ind_i].z_,FC.z_);
Kokkos::atomic_add(&neighborCForce[ind_j].x_,-FC.x_);
Kokkos::atomic_add(&neighborCForce[ind_j].y_,-FC.y_);
Kokkos::atomic_add(&neighborCForce[ind_j].z_,-FC.z_);
Kokkos::atomic_add(&thisCTorque[ind_i].x_, Mij.x_);
Kokkos::atomic_add(&thisCTorque[ind_i].y_, Mij.y_);
Kokkos::atomic_add(&thisCTorque[ind_i].z_, Mij.z_);
Kokkos::atomic_add(&neighborCTorque[ind_j].x_, Mji.x_);
Kokkos::atomic_add(&neighborCTorque[ind_j].y_, Mji.y_);
Kokkos::atomic_add(&neighborCTorque[ind_j].z_, Mji.z_);
cntctList.setValue(n,history);
}
else
{
cntctList.setValue(n, ValueType());
}
});
Kokkos::fence();
}
} //pFlow::MPI::processorBoundarySIKernels
#endif //__processorBoundarySIKernels_hpp__

View File

@ -1,256 +0,0 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#include "processorBoundarySIKernels.hpp"
template <typename cFM, typename gMM>
pFlow::MPI::processorBoundarySphereInteraction<cFM, gMM>::processorBoundarySphereInteraction(
const boundaryBase &boundary,
const sphereParticles &sphPrtcls,
const GeometryMotionModel &geomMotion)
:
boundarySphereInteraction<cFM,gMM>(
boundary,
sphPrtcls,
geomMotion
),
masterInteraction_(boundary.isBoundaryMaster())
{
if(masterInteraction_)
{
this->allocatePPPairs();
this->allocatePWPairs();
}
}
#ifdef BoundaryModel1
template <typename cFM, typename gMM>
bool pFlow::MPI::processorBoundarySphereInteraction<cFM, gMM>::sphereSphereInteraction
(
real dt,
const ContactForceModel &cfModel,
uint32 step
)
{
// master processor calculates the contact force/torque and sends data back to the
// neighbor processor (slave processor).
// slave processor recieves the data and adds the data to the internalField
if(masterInteraction_)
{
if(step==1)return true;
const auto & sphPar = this->sphParticles();
uint32 thisIndex = this->boundary().thisBoundaryIndex();
const auto& cfBndry = static_cast<const processorBoundaryField<realx3>&> (
sphPar.contactForce().BoundaryField(thisIndex));
const auto& ctBndry = static_cast<const processorBoundaryField<realx3>&> (
sphPar.contactTorque().BoundaryField(thisIndex));
if(step == 2 )
{
iter++;
pFlow::MPI::processorBoundarySIKernels::sphereSphereInteraction(
"ppBoundaryInteraction."+this->boundaryName(),
dt,
this->ppPairs(),
cfModel,
this->boundary().thisPoints(),
sphPar.diameter().deviceViewAll(),
sphPar.propertyId().deviceViewAll(),
sphPar.velocity().deviceViewAll(),
sphPar.rVelocity().deviceViewAll(),
sphPar.contactForce().deviceViewAll(),
sphPar.contactTorque().deviceViewAll(),
this->boundary().neighborProcPoints().deviceViewAll(),
sphPar.diameter().BoundaryField(thisIndex).neighborProcField().deviceViewAll(),
sphPar.propertyId().BoundaryField(thisIndex).neighborProcField().deviceViewAll(),
sphPar.velocity().BoundaryField(thisIndex).neighborProcField().deviceViewAll(),
sphPar.rVelocity().BoundaryField(thisIndex).neighborProcField().deviceViewAll(),
cfBndry.neighborProcField().deviceViewAll(),
ctBndry.neighborProcField().deviceViewAll()
);
return true;
}
else if(step == 3 )
{
cfBndry.sendBackData();
ctBndry.sendBackData();
return true;
}
return false;
}
else
{
if(step == 1 )
{
const auto & sphPar = this->sphParticles();
uint32 thisIndex = this->boundary().thisBoundaryIndex();
const auto& cfBndry = static_cast<const processorBoundaryField<realx3>&>(
sphPar.contactForce().BoundaryField(thisIndex));
const auto& ctBndry = static_cast<const processorBoundaryField<realx3>&> (
sphPar.contactTorque().BoundaryField(thisIndex));
cfBndry.recieveBackData();
ctBndry.recieveBackData();
return false;
}
else if(step == 11)
{
const auto & sphPar = this->sphParticles();
uint32 thisIndex = this->boundary().thisBoundaryIndex();
const auto& cfBndry = static_cast<const processorBoundaryField<realx3>&>(
sphPar.contactForce().BoundaryField(thisIndex));
const auto& ctBndry = static_cast<const processorBoundaryField<realx3>&> (
sphPar.contactTorque().BoundaryField(thisIndex));
cfBndry.addBufferToInternalField();
ctBndry.addBufferToInternalField();
return true;
}
return false;
}
return false;
}
#else
template <typename cFM, typename gMM>
bool pFlow::MPI::processorBoundarySphereInteraction<cFM, gMM>::sphereSphereInteraction
(
real dt,
const ContactForceModel &cfModel,
uint32 step
)
{
// master processor calculates the contact force/torque and sends data back to the
// neighbor processor (slave processor).
// slave processor recieves the data and adds the data to the internalField
if(masterInteraction_)
{
if(step==1)return true;
const auto & sphPar = this->sphParticles();
uint32 thisIndex = this->boundary().thisBoundaryIndex();
const auto& cfBndry = static_cast<const processorBoundaryField<realx3>&> (
sphPar.contactForce().BoundaryField(thisIndex));
const auto& ctBndry = static_cast<const processorBoundaryField<realx3>&> (
sphPar.contactTorque().BoundaryField(thisIndex));
if(step == 2 )
{
pFlow::MPI::processorBoundarySIKernels::sphereSphereInteraction(
"ppBoundaryInteraction."+this->boundaryName(),
dt,
this->ppPairs(),
cfModel,
this->boundary().thisPoints(),
sphPar.diameter().deviceViewAll(),
sphPar.propertyId().deviceViewAll(),
sphPar.velocity().deviceViewAll(),
sphPar.rVelocity().deviceViewAll(),
sphPar.contactForce().deviceViewAll(),
sphPar.contactTorque().deviceViewAll(),
this->boundary().neighborProcPoints().deviceViewAll(),
sphPar.diameter().BoundaryField(thisIndex).neighborProcField().deviceViewAll(),
sphPar.propertyId().BoundaryField(thisIndex).neighborProcField().deviceViewAll(),
sphPar.velocity().BoundaryField(thisIndex).neighborProcField().deviceViewAll(),
sphPar.rVelocity().BoundaryField(thisIndex).neighborProcField().deviceViewAll(),
cfBndry.neighborProcField().deviceViewAll(),
ctBndry.neighborProcField().deviceViewAll()
);
return true;
}
else if(step == 3 )
{
cfBndry.sendBackData();
ctBndry.sendBackData();
return true;
}
else if(step == 11 )
{
cfBndry.updateBoundaryFromSlave();
ctBndry.updateBoundaryFromSlave();
return true;
}
return false;
}
else
{
if(step == 1 )
{
const auto & sphPar = this->sphParticles();
uint32 thisIndex = this->boundary().thisBoundaryIndex();
const auto& cfBndry = static_cast<const processorBoundaryField<realx3>&>(
sphPar.contactForce().BoundaryField(thisIndex));
const auto& ctBndry = static_cast<const processorBoundaryField<realx3>&> (
sphPar.contactTorque().BoundaryField(thisIndex));
cfBndry.recieveBackData();
ctBndry.recieveBackData();
return false;
}
else if(step == 11)
{
const auto & sphPar = this->sphParticles();
uint32 thisIndex = this->boundary().thisBoundaryIndex();
const auto& cfBndry = static_cast<const processorBoundaryField<realx3>&>(
sphPar.contactForce().BoundaryField(thisIndex));
const auto& ctBndry = static_cast<const processorBoundaryField<realx3>&> (
sphPar.contactTorque().BoundaryField(thisIndex));
cfBndry.addBufferToInternalField();
cfBndry.updateBoundaryToMaster();
ctBndry.addBufferToInternalField();
ctBndry.updateBoundaryToMaster();
return true;
}
return false;
}
return false;
}
#endif

View File

@ -1,93 +0,0 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __processorBoundarySphereInteraction_hpp__
#define __processorBoundarySphereInteraction_hpp__
#include "boundarySphereInteraction.hpp"
#include "processorBoundaryField.hpp"
#include "boundaryProcessor.hpp"
namespace pFlow::MPI
{
template<typename contactForceModel,typename geometryMotionModel>
class processorBoundarySphereInteraction
:
public boundarySphereInteraction<contactForceModel, geometryMotionModel>
{
public:
using PBSInteractionType =
processorBoundarySphereInteraction<contactForceModel,geometryMotionModel>;
using BSInteractionType =
boundarySphereInteraction<contactForceModel, geometryMotionModel>;
using GeometryMotionModel = typename BSInteractionType::GeometryMotionModel;
using ContactForceModel = typename BSInteractionType::ContactForceModel;
using MotionModel = typename geometryMotionModel::MotionModel;
using ModelStorage = typename ContactForceModel::contactForceStorage;
using IdType = typename BSInteractionType::IdType;
using IndexType = typename BSInteractionType::IndexType;
using ContactListType = typename BSInteractionType::ContactListType;
private:
bool masterInteraction_;
public:
TypeInfoTemplate22("boundarySphereInteraction", "processor",ContactForceModel, MotionModel);
processorBoundarySphereInteraction(
const boundaryBase& boundary,
const sphereParticles& sphPrtcls,
const GeometryMotionModel& geomMotion
);
add_vCtor
(
BSInteractionType,
PBSInteractionType,
boundaryBase
);
~processorBoundarySphereInteraction()override = default;
bool sphereSphereInteraction(
real dt,
const ContactForceModel& cfModel,
uint32 step)override;
};
}
#include "processorBoundarySphereInteraction.cpp"
#endif //__processorBoundarySphereInteraction_hpp__

View File

@ -1,17 +0,0 @@
#include "processorBoundarySphereInteraction.hpp"
#include "geometryMotions.hpp"
#include "contactForceModels.hpp"
template class pFlow::MPI::processorBoundarySphereInteraction
<
pFlow::cfModels::limitedNonLinearNormalRolling,
pFlow::rotationAxisMotionGeometry
>;
template class pFlow::MPI::processorBoundarySphereInteraction
<
pFlow::cfModels::nonLimitedNonLinearNormalRolling,
pFlow::rotationAxisMotionGeometry
>;

View File

@ -359,7 +359,7 @@ bool pFlow::sphereInteraction<cFM,gMM, cLT>::hearChanges
if(msg.equivalentTo(message::ITEMS_REARRANGE))
{
notImplementedFunction;
return true;
return false;
}
fatalErrorInFunction<<"Event "<< msg.eventNames()<<

View File

@ -1,46 +0,0 @@
#include "processorBoundarySphereParticles.hpp"
#include "sphereParticles.hpp"
#include "boundaryProcessor.hpp"
pFlow::processorBoundarySphereParticles::processorBoundarySphereParticles(
const boundaryBase &boundary,
sphereParticles &prtcls
)
:
boundarySphereParticles(boundary, prtcls)
{
}
bool pFlow::processorBoundarySphereParticles::acceleration(const timeInfo &ti, const realx3& g)
{
#ifndef BoundaryModel1
if(isBoundaryMaster())
{
auto thisIndex = thisBoundaryIndex();
auto mass = Particles().mass().BoundaryField(thisIndex).neighborProcField().deviceView();
auto I = Particles().I().BoundaryField(thisIndex).neighborProcField().deviceView();
auto cf = Particles().contactForce().BoundaryField(thisIndex).neighborProcField().deviceView();
auto ct = Particles().contactTorque().BoundaryField(thisIndex).neighborProcField().deviceView();
auto acc = Particles().acceleration().BoundaryField(thisIndex).neighborProcField().deviceView();
auto rAcc = Particles().rAcceleration().BoundaryField(thisIndex).neighborProcField().deviceView();
Kokkos::parallel_for(
"processorBoundary::acceleration."+this->boundaryName(),
deviceRPolicyStatic(0,mass.size()),
LAMBDA_HD(uint32 i){
acc[i] = cf[i]/mass[i] + g;
rAcc[i] = ct[i]/I[i];
});
Kokkos::fence();
}
#endif
return true;
}

View File

@ -1,38 +0,0 @@
#ifndef __processorBoundarySphereParticles_hpp__
#define __processorBoundarySphereParticles_hpp__
#include "boundarySphereParticles.hpp"
namespace pFlow
{
class processorBoundarySphereParticles
:
public boundarySphereParticles
{
public:
/// type info
TypeInfo("boundarySphereParticles<MPI,processor>");
processorBoundarySphereParticles(
const boundaryBase &boundary,
sphereParticles& prtcls
);
add_vCtor(
boundarySphereParticles,
processorBoundarySphereParticles,
boundaryBase
);
bool acceleration(const timeInfo& ti, const realx3& g)override;
};
}
#endif

View File

@ -1,70 +0,0 @@
#include "MPIParticleIdHandler.hpp"
#include "procCommunication.hpp"
pFlow::MPI::MPIParticleIdHandler::MPIParticleIdHandler
(
pointStructure& pStruct
)
:
particleIdHandler(pStruct)
{
initialIdCheck();
}
pFlow::Pair<pFlow::uint32, pFlow::uint32>
pFlow::MPI::MPIParticleIdHandler::getIdRange(uint32 nNewParticles)
{
uint32 startId;
if(maxId_==-1)
{
startId = 0;
}
else
{
startId = maxId_+1;
}
uint32 endId = startId+nNewParticles-1;
maxId_ = endId;
return {startId, endId};
}
bool pFlow::MPI::MPIParticleIdHandler::initialIdCheck()
{
/// empty point structure / no particles in simulation
uint32 maxId = -1;
if( !pStruct().empty() )
{
maxId = max( *this );
}
auto maxIdAll = procVector<uint32>(pFlowProcessors());
auto numAll = procVector<uint32>(pFlowProcessors());
auto comm = procCommunication(pFlowProcessors());
comm.collectAllToAll(maxId, maxIdAll);
comm.collectAllToAll(size(),numAll);
uint32 n = 0;
for(uint32 i=0; i<maxIdAll.size(); i++)
{
if( maxIdAll[i]==-1 && numAll[i]!= 0)
{
if(comm.localRank() == i)
{
fillSequence(*this, n);
maxId_ = size()-1 + n;
}
}
else
{
if(comm.localRank() == i)
{
maxId_ = maxIdAll[i];
}
}
n += numAll[i];
}
return true;
}

View File

@ -1,60 +0,0 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __MPIParticleIdHandler_hpp__
#define __MPIParticleIdHandler_hpp__
#include "particleIdHandler.hpp"
namespace pFlow::MPI
{
class MPIParticleIdHandler : public particleIdHandler
{
private:
uint32 maxId_ = -1;
bool initialIdCheck() override;
public:
ClassInfo("particleIdHandler<MPI>");
explicit MPIParticleIdHandler(pointStructure& pStruct);
~MPIParticleIdHandler() override = default;
add_vCtor(
particleIdHandler,
MPIParticleIdHandler,
pointStructure
);
Pair<uint32, uint32> getIdRange(uint32 nNewParticles) override;
uint32 maxId() const override
{
return maxId_;
}
};
}
#endif //__MPIParticleIdHandler_hpp__

View File

@ -185,18 +185,6 @@ public:
return contactTorque_;
}
inline
uint32PointField_D& particleId()
{
return idHandler_();
}
inline
const uint32PointField_D& particleId() const
{
return idHandler_();
}
inline
uint32 maxId()const
{

View File

@ -9,7 +9,6 @@ set(SourceFiles
# Regions
region/regionPoints/regionPoints/regionPoints.cpp
region/regionPoints/sphereRegionPoints/sphereRegionPoints.cpp
region/regionPoints/boxRegionPoints/boxRegionPoints.cpp
region/regionPoints/lineRegionPoints/lineRegionPoints.cpp
region/regionPoints/centerPointsRegionPoints/centerPointsRegionPoints.cpp
region/regionPoints/multipleSpheresRegionPoints/multipleSpheresRegionPoints.cpp

View File

@ -467,7 +467,7 @@ pFlow::postprocessData::fieldsDataBase::fieldsDataBase
systemControl& control,
const dictionary& postDict,
bool inSimulation,
TimeValueType startTime
timeValue startTime
)
:
time_(control.time()),
@ -492,7 +492,7 @@ pFlow::postprocessData::fieldsDataBase::fieldsDataBase
}
}
pFlow::TimeValueType pFlow::postprocessData::fieldsDataBase::currentTime() const
pFlow::timeValue pFlow::postprocessData::fieldsDataBase::currentTime() const
{
return time_.currentTime();
}
@ -914,7 +914,7 @@ pFlow::uniquePtr<pFlow::postprocessData::fieldsDataBase>
systemControl& control,
const dictionary& postDict,
bool inSimulation,
TimeValueType startTime
timeValue startTime
)
{
word dbType;

View File

@ -78,7 +78,7 @@ private:
anyList allFields_;
/// Map to store the last capture time of each field
wordMap<TimeValueType> captureTime_;
wordMap<timeValue> captureTime_;
/// Reference to the Time object
Time& time_;
@ -178,7 +178,7 @@ public:
systemControl& control,
const dictionary& postDict,
bool inSimulation,
TimeValueType startTime);
timeValue startTime);
/// no copy constructor
fieldsDataBase(const fieldsDataBase&) = delete;
@ -203,7 +203,7 @@ public:
systemControl& control,
const dictionary& postDict,
bool inSimulation,
TimeValueType startTime
timeValue startTime
),
(control, postDict, inSimulation, startTime)
);
@ -211,7 +211,7 @@ public:
// - Public Access Functions
/// returns the current time
TimeValueType currentTime()const;
timeValue currentTime()const;
/// const ref to object Time
const Time& time()const
@ -282,7 +282,7 @@ public:
/// Get the next avaiable time folder after the current time folder
/// This is only used for post-simulation processing
virtual
TimeValueType getNextTimeFolder()const
timeValue getNextTimeFolder()const
{
return -1.0;
}
@ -291,7 +291,7 @@ public:
/// This is used only for post-simulation processing
/// @returns the time value of the next folder.
virtual
TimeValueType setToNextTimeFolder()
timeValue setToNextTimeFolder()
{
return -1.0;
}
@ -300,7 +300,7 @@ public:
/// This is used only for post-simulation processing
/// @returns the time value of the skipped folder
virtual
TimeValueType skipNextTimeFolder()
timeValue skipNextTimeFolder()
{
return -1.0;
}
@ -316,7 +316,7 @@ public:
systemControl& control,
const dictionary& postDict,
bool inSimulation,
TimeValueType startTime);
timeValue startTime);
};
} // namespace pFlow::postprocessData

View File

@ -49,7 +49,7 @@ pFlow::postprocessData::simulationFieldsDataBase::simulationFieldsDataBase
systemControl &control,
const dictionary& postDict,
bool inSimulation,
TimeValueType startTime
timeValue startTime
)
:
fieldsDataBase(control, postDict, inSimulation, startTime),

View File

@ -60,7 +60,7 @@ public:
systemControl& control,
const dictionary& postDict,
bool inSimulation,
TimeValueType startTime);
timeValue startTime);
~simulationFieldsDataBase() override = default;

View File

@ -86,12 +86,12 @@ private:
word fieldName_;
/// Timestamp when mask was last updated (-1 indicates never updated)
TimeValueType lastUpdated_ = -1;
timeValue lastUpdated_ = -1;
/// Updates the mask based on current field values if needed, returns true if successful
bool updateMask()
{
TimeValueType t = database().currentTime();
timeValue t = database().currentTime();
if( equal( t, lastUpdated_)) return true;
@ -206,11 +206,11 @@ private:
std::vector<bool> mask_;
TimeValueType lastUpdated_ = -1;
timeValue lastUpdated_ = -1;
bool updateMask()
{
TimeValueType t = database().currentTime();
timeValue t = database().currentTime();
if( equal( t, lastUpdated_)) return true;

View File

@ -46,7 +46,7 @@ inline
bool writeField
(
iOstream& os,
TimeValueType t,
timeValue t,
const regionField<T> field,
uint32 threshold,
const T& defValue=T{}

View File

@ -52,7 +52,7 @@ template<typename T>
inline bool writeField
(
iOstream& os,
TimeValueType t,
timeValue t,
const regionField<T>& field,
const regionPoints& regPoints,
const T& invalidVal = T{}

View File

@ -27,7 +27,7 @@ Licence:
pFlow::postprocessData::postprocessData::postprocessData
(
const systemControl &control,
TimeValueType startTime
timeValue startTime
)
:
auxFunctions(control),
@ -50,7 +50,7 @@ pFlow::postprocessData::postprocessData::postprocessData
if( !dict_.fileExist() || !dict_.headerOk() )
{
WARNING<<"You requested postprocessData function while,"
<<" the dictionary settings/postprocessDataDict does not exist."
<<" the dictionary system/postprocessDataDict does not exist."
<<" This feature is disabled in the current run."<<END_WARNING;
return;
}

View File

@ -85,7 +85,7 @@ public:
/// this constructor is used when postprocesing is active
/// during simulation.
/// @param control const reference to systemControl
postprocessData(const systemControl& control, TimeValueType startTime = -1.0);
postprocessData(const systemControl& control, timeValue startTime = -1.0);
~postprocessData()override = default;

View File

@ -5,14 +5,15 @@ The `PostprocessData` module in phasicFlow provides powerful tools for analyzing
- in-simulation: this is postprocessing that is active during simulation. When running a solver, it allows for real-time data analysis and adjustments based on the simulation's current state. See below to see how you can activate in-simulation postprocessing.
- post-simulation: this is postprocessing that is done after the simulation is completed. It allows for detailed analysis of the simulation results, including data extraction and visualization based on the results that are stored in time-folders. If you want to use post-simulation, you need to run utility `postprocessPhasicFlow` in terminal (in the simulation case setup folder) to run the postprocessing. This utility reads the `postprocessDataDict` file and performs the specified operations on the simulation data.
## 1. Overview
### Important Notes
Postprocessing in phasicFlow allows you to:
* **NOTE 1:**
postprocessing for in-simulation, is not implemented for MPI execution. So, do not use it when using MPI execution. For post-simulation postprocessing, you can use the `postprocessPhasicFlow` utility without MPI, even though the actual simulation has been done using MPI.
* **NOTE 2:**
In post-simulation mode, all timeControl settings are ignored. The postprocessing will be done for all the time folders that are available in the case directory or if you specify the time range in the command line, the postprocessing will be done for the time folders that are in the specified range of command line.
- Extract information about particles in specific regions of the domain
- Calculate statistical properties such as averages and sums of particle attributes
- Track specific particles throughout the simulation
- Apply different weighing methods when calculating statistics
- Perform postprocessing at specific time intervals
## Table of Contents
@ -39,16 +40,6 @@ In post-simulation mode, all timeControl settings are ignored. The postprocessin
- [9. Mathematical Formulations](#9-mathematical-formulations)
- [10. A complete dictionary file (postprocessDataDict)](#10-a-complete-dictionary-file-postprocessdatadict)
## 1. Overview
Postprocessing in phasicFlow allows you to:
- Extract information about particles in specific regions of the domain
- Calculate statistical properties such as averages and sums of particle attributes
- Track specific particles throughout the simulation
- Apply different weighing methods when calculating statistics
- Perform postprocessing at specific time intervals
## 2. Setting Up Postprocessing
Postprocessing is configured through a dictionary file named `postprocessDataDict` which should be placed in the `settings` directory. Below is a detailed explanation of the configuration options.
@ -127,17 +118,12 @@ Regions define where in the domain the postprocessing operations are applied:
| Region Type | Description | Required Parameters | Compatible with |
|-------------|-------------|---------------------|-----------------|
| `sphere` | A spherical region | `radius`, `center` defined in `sphereInfo` dict| bulk |
| `multipleSpheres` | Multiple spherical regions | `centers`, `radii` defined in `multiplSpheresInfo` dict | bulk |
| `line` | Spheres along a line with specified radius | `p1`, `p2`, `nSpheres`, `radius` defined in `lineInfo` dict| bulk |
| `box`| A cuboid region | `min`, `max` defined in `boxInfo` dict | bulk |
| `centerPoints`* | Specific particles selected by ID | `ids` | individual |
| `centerPoints`* | Specific particles selected by center points located in a box | `boxInfo` | individual |
| `centerPoints`* | Specific particles selected by center points located in a sphere | `sphereInfo` | individual |
| `centerPoints`* | Specific particles selected by center points located in a cylinder | `cylinderInfo` | individual |
| <td colspan="4">\* Particles selection is done when simulation reaches the time that is specified by `startTime` of the post-process component and this selection remains intact up to the end of simulation. This is very good for particle tracking purposes or when you want to analyze specific particles behavior over time.</td> |
| `sphere` | A spherical region | `radius`, `center` | bulk |
| `multipleSpheres` | Multiple spherical regions | `centers`, `radii` | bulk |
| `line` | Spheres along a line with specified radius | `p1`, `p2`, `nSpheres`, `radius` | bulk |
| `centerPoints` | Specific particles selected by ID | `ids` | individual |
## 6. Processing Operations for Bulk Properties
## 6. Processing Operations
Within each processing region of type `bulk`, you can define multiple operations to be performed:
@ -461,7 +447,7 @@ components
processMethod particleProbe;
processRegion centerPoints;
selector id;
field component(velocity,y);
field component(position,y);
ids (0 10 100);
timeControl default; // other options are settings, timeStep, simulationTime
// settings: uses parameters from settingsDict file
@ -470,35 +456,6 @@ components
// default: uses the default time control (defined in defaultTimeControl).
// default behavior: if you do not specify it, parameters in defaultTimeControl is used.
}
particlesTrack
{
processMethod particleProbe;
processRegion centerPoints;
// all particles whose ceters are located inside this box
// are selected. Selection occurs at startTime: particles
// that are inside the box at t = startTime.
selector box;
boxInfo
{
min (0 0 0);
max (0.1 0.05 0.05);
}
// center position of selected particles are processed
field position;
timeControl simulationTime;
// execution starts at 1.0 s
startTime 1.0;
// execution ends at 10 s
endTime 10;
// execution interval of this compoenent
executionInterval 0.02;
}
on_single_sphere
{
@ -608,4 +565,5 @@ components
}
);
```

View File

@ -1,52 +0,0 @@
#include "boxRegionPoints.hpp"
#include "fieldsDataBase.hpp"
#include "numericConstants.hpp"
namespace pFlow::postprocessData
{
boxRegionPoints::boxRegionPoints
(
const dictionary &dict,
fieldsDataBase &fieldsDataBase
)
:
regionPoints(dict, fieldsDataBase),
boxRegion_(dict.subDict("boxInfo")),
volume_
(
(boxRegion_.maxPoint().x() - boxRegion_.minPoint().x()) *
(boxRegion_.maxPoint().y() - boxRegion_.minPoint().y()) *
(boxRegion_.maxPoint().z() - boxRegion_.minPoint().z())
),
diameter_(2 * pow(3 * volume_ / 4.0 / Pi, 1.0 / 3.0)),
selectedPoints_("selectedPoints")
{
}
bool boxRegionPoints::update()
{
const auto points = database().updatePoints();
selectedPoints_.clear();
for(uint32 i = 0; i < points.size(); ++i)
{
if( boxRegion_.isInside(points[i]))
{
selectedPoints_.push_back(i);
}
}
return true;
}
bool boxRegionPoints::write(iOstream &os) const
{
os <<"# Single box\n";
os <<"# min point: "<< boxRegion_.minPoint() <<endl;
os <<"# max point: "<< boxRegion_.maxPoint() << endl;
os <<"time"<< tab <<"value"<<endl;
return true;
}
} // End namespace pFlow::postprocessData

View File

@ -1,171 +0,0 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
/**
* @file boxRegionPoints.hpp
* @brief A class representing a box region for point selection
*
* This class provides functionality to select points within a box region
* and to compute related properties such as volume and equivalent diameter.
* It inherits from regionPoints and implements all required virtual methods.
*
* @see regionPoints
* @see box
* @see fieldsDataBase
*/
#ifndef __boxRegionPoints_hpp__
#define __boxRegionPoints_hpp__
#include "regionPoints.hpp"
#include "box.hpp"
#include "Vectors.hpp"
namespace pFlow::postprocessData
{
class boxRegionPoints
:
public regionPoints
{
private:
/// box object defining the region for point selection
box boxRegion_;
/// Volume of the box region
real volume_;
/// Diameter of the box region
real diameter_;
/// Indices of points that are selected by this region
uint32Vector selectedPoints_;
public:
TypeInfo(box::TYPENAME());
/**
* @brief Construct a box region for point selection
*
* @param dict Dictionary containing boxInfo dictionary
* @param fieldsDataBase Database containing fields data
*/
boxRegionPoints(
const dictionary& dict,
fieldsDataBase& fieldsDataBase);
/// Destructor
~boxRegionPoints() override = default;
/**
* @brief Get the number of regions (always 1 for box)
* @return Always returns 1
*/
uint32 size()const override
{
return 1;
}
/**
* @brief Check if the region is empty
* @return Always returns false
*/
bool empty()const override
{
return false;
}
/**
* @brief Get the volume of the box region
* @return A span containing the volume of the region
*/
span<const real> volumes()const override
{
return span<const real>(&volume_, 1);
}
/**
* @brief Get the equivalent diameter of the box region
* @return A span containing the diameter of the region
*/
span<const real> eqDiameters()const override
{
return span<const real>(&diameter_, 1);
}
/**
* @brief Get the center of the box region
* @return A span containing the center point of the region
*/
span<const realx3> centers()const override
{
realx3 center = 0.5 * (boxRegion_.minPoint() + boxRegion_.maxPoint());
return span<const realx3>(&center, 1);
}
/**
* @brief Get the indices of points within the region (const version)
* @param elem Element index (ignored as there's only one box)
* @return A span containing indices of points within the region
*/
span<const uint32> indices(uint32 elem)const override
{
return span<const uint32>(selectedPoints_.data(), selectedPoints_.size());
}
/**
* @brief Get the indices of points within the region (non-const version)
* @param elem Element index (ignored as there's only one box)
* @return A span containing indices of points within the region
*/
span<uint32> indices(uint32 elem) override
{
return span<uint32>(selectedPoints_.data(), selectedPoints_.size());
}
/**
* @brief Update the points selected by this region
* @return True if update was successful
*/
bool update()override;
/**
* @brief Determine if data should be written to the same time file
* @return Always returns true
*/
bool writeToSameTimeFile()const override
{
return true;
}
/**
* @brief Write region data to output stream
* @param os Output stream to write to
* @return True if write was successful
*/
bool write(iOstream& os)const override;
};
}
#endif // __boxRegionPoints_hpp__

View File

@ -8,8 +8,6 @@ namespace pFlow::postprocessData
bool centerPointsRegionPoints::selectIds()
{
// check if it is already found the ids of particles
// if not, then find the ids of particles
if(!firstTimeUpdate_) return true;
firstTimeUpdate_ = false;
@ -28,20 +26,16 @@ bool centerPointsRegionPoints::selectIds()
}
}
else
// TODO: this should be corrected to select ids of particles
// that are selected based on the selector (this is visa versa)
{
auto selectorPtr = pStructSelector::create(
selector,
database().pStruct(),
probDict_.subDict(selector+"Info"));
auto selectedPoints = selectorPtr->selectedPoints();
const auto& idField = database().updateFieldUint32(idName_);
ids_.clear();
ids_.reserve(selectedPoints.size());
for( auto& pntIndex: selectedPoints)
{
ids_.push_back(idField[pntIndex]);
}
ids_.resize(selectedPoints.size());
ids_.assign(selectedPoints.begin(), selectedPoints.end());
}
volume_.resize(ids_.size(),1.0);
@ -68,12 +62,11 @@ bool centerPointsRegionPoints::update()
const auto& idField = database().updateFieldUint32(idName_);
selectedPoints_.fill(-1);
for( uint32 j=0; j< ids_.size(); ++j)
for(uint32 i = 0; i < idField.size(); ++i)
{
auto id = ids_[j];
for( uint32 i=0; i< idField.size(); i++)
for( uint32 j=0; j< ids_.size(); ++j)
{
if(idField[i] == id)
if(idField[i] == ids_[j])
{
selectedPoints_[j] = i;
break;

View File

@ -36,7 +36,7 @@ components
processMethod particleProbe;
processRegion centerPoints;
selector id;
field component(velocity,y);
field component(position,y);
ids (0 10 100);
timeControl default; // other options are settings, timeStep, simulationTime
// settings: uses parameters from settingsDict file
@ -45,35 +45,6 @@ components
// default: uses the default time control (defined in defaultTimeControl).
// default behavior: if you do not specify it, parameters in defaultTimeControl is used.
}
particlesTrack
{
processMethod particleProbe;
processRegion centerPoints;
// all particles whose ceters are located inside this box
// are selected. Selection occurs at startTime: particles
// that are inside the box at t = startTime.
selector box;
boxInfo
{
min (0 0 0);
max (0.1 0.05 0.05);
}
// center position of selected particles are processed
field position;
timeControl simulationTime;
// execution starts at 1.0 s
startTime 1.0;
// execution ends at 10 s
endTime 10;
// execution interval of this compoenent
executionInterval 0.02;
}
on_single_sphere
{

View File

@ -1,3 +1,4 @@
list(APPEND SourceFiles
types/basicTypes/bTypesFunctions.cpp
types/basicTypes/Logical.cpp
@ -118,27 +119,35 @@ set(link_libs)
set(link_libs Kokkos::kokkos tbb)
# for MPI parallelization
if(pFlow_Build_MPI)
# Include the Zoltan installation check macro
include(${CMAKE_SOURCE_DIR}/cmake/zoltanInstallCheck.cmake)
# set the Zoltan Directory and check/build if needed
set(Zoltan_Install_DIR ${CMAKE_SOURCE_DIR}/thirdParty/Zoltan)
# Call the macro to find or build Zoltan
zoltan_find_or_build(${Zoltan_Install_DIR})
set(Zoltan_Install_DIR)
if(DEFINED ENV{Zoltan_DIR})
set(Zoltan_Install_DIR $ENV{Zoltan_DIR})
else()
set(Zoltan_Install_DIR $ENV{HOME}/PhasicFlow/Zoltan)
endif()
message(STATUS "Zoltan install directory is ${Zoltan_Install_DIR}")
set(ZOLTAN_PREFIX "${Zoltan_Install_DIR}" CACHE STRING "Zoltan install directory")
find_path(ZOLTAN_INCLUDE_DIR zoltan.h PATHS "${ZOLTAN_PREFIX}/include")
message(STATUS "Zoltan include path: ${ZOLTAN_INCLUDE_DIR}")
find_library(ZOLTAN_LIBRARY zoltan PATHS "${ZOLTAN_PREFIX}/lib")
message(STATUS "Zoltan lib path: ${ZOLTAN_LIBRARY}")
list(APPEND SourceFiles
MPIParallelization/domain/partitioning/partitioning.cpp
MPIParallelization/domain/partitioning/rcb1DPartitioning.cpp
MPIParallelization/domain/MPISimulationDomain.cpp
MPIParallelization/dataIOMPI/dataIOMPIs.cpp
MPIParallelization/MPI/procCommunication.cpp
MPIParallelization/MPI/scatteredMasterDistributeChar.cpp
MPIParallelization/pointStructure/boundaries/boundaryProcessor.cpp
MPIParallelization/pointField/processorBoundaryFields.cpp
MPIParallelization/domain/partitioning/partitioning.cpp
MPIParallelization/domain/partitioning/rcb1DPartitioning.cpp
MPIParallelization/domain/MPISimulationDomain.cpp
MPIParallelization/dataIOMPI/dataIOMPIs.cpp
MPIParallelization/MPI/procCommunication.cpp
MPIParallelization/MPI/scatteredMasterDistributeChar.cpp
MPIParallelization/pointStructure/boundaries/boundaryProcessor.cpp
MPIParallelization/pointField/processorBoundaryFields.cpp
)
list(APPEND link_libs MPI::MPI_CXX ${ZOLTAN_LIBRARY} -lm )
@ -146,10 +155,8 @@ if(pFlow_Build_MPI)
target_include_directories(phasicFlow PUBLIC ./globals ${ZOLTAN_INCLUDE_DIR})
else()
pFlow_add_library_install(phasicFlow SourceFiles link_libs)
pFlow_add_library_install(phasicFlow SourceFiles link_libs)
target_include_directories(phasicFlow PUBLIC ./globals)
endif()

View File

@ -1,106 +0,0 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __gatherMaster_hpp__
#define __gatherMaster_hpp__
#include <numeric>
#include "procCommunication.hpp"
#include "stdVectorHelper.hpp"
namespace pFlow::MPI
{
template<typename T>
class gatherMaster
:
public procCommunication
{
protected:
std::vector<T> buffer_;
public:
gatherMaster(const localProcessors& procs)
:
procCommunication(procs)
{}
span<T> getData()
{
if(this->localMaster())
return span<T>( buffer_.data(), buffer_.size());
else
return span<T>(nullptr, 0);
}
std::vector<T> moveData()
{
return std::move(buffer_);
}
bool gatherData(span<T> data)
{
int thisN = data.size();
bool succss;
procVector<int> numElems(this->processors(), true);
procVector<int> displ(this->processors(), true);
if( !this->collectAllToMaster(thisN, numElems) )
{
fatalErrorInFunction<<
"error in collecting number of elements from processors"<<endl;
return false;
}
auto totalN = std::accumulate(
numElems.begin(),
numElems.end(),
static_cast<int>(0));
buffer_.resize(totalN);
std::exclusive_scan(
numElems.begin(),
numElems.end(),
displ.begin(),
0);
auto bufferSpan = span<T>(this->buffer_.data(),this->buffer_.size() );
return CheckMPI(
Gatherv(
data,
bufferSpan,
numElems.getSpan(),
displ.getSpan(),
this->localMasterNo(),
this->localCommunicator()),
false);
}
};
}
#endif

View File

@ -1,463 +0,0 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __mpiCommunication_H__
#define __mpiCommunication_H__
#include "mpiTypes.hpp"
#include "types.hpp"
#include "span.hpp"
namespace pFlow::MPI
{
extern DataType realx3Type__;
extern DataType realx4Type__;
extern DataType int32x3Type__;
extern DataType uint32x3Type__;
template<typename T>
auto constexpr Type()
{
return MPI_BYTE;
}
template<typename T>
auto constexpr sFactor()
{
return sizeof(T);
}
template<char>
auto constexpr Type()
{
return MPI_CHAR;
}
template<char>
auto constexpr sFactor()
{
return 1;
}
template<short>
auto constexpr Type()
{
return MPI_SHORT;
}
template<short>
auto constexpr sFactor()
{
return 1;
}
template<unsigned short>
auto constexpr Type()
{
return MPI_UNSIGNED_SHORT;
}
template<unsigned short>
auto constexpr sFactor()
{
return 1;
}
template<int>
auto constexpr Type()
{
return MPI_INT;
}
template<int>
auto constexpr sFactor()
{
return 1;
}
template<>
auto constexpr Type<unsigned int>()
{
return MPI_UNSIGNED;
}
template<>
auto constexpr sFactor<unsigned int>()
{
return 1;
}
template<>
auto constexpr Type<long>()
{
return MPI_LONG;
}
template<>
auto constexpr sFactor<long>()
{
return 1;
}
template<>
auto constexpr Type<unsigned long>()
{
return MPI_UNSIGNED_LONG;
}
template<>
auto constexpr sFactor<unsigned long>()
{
return 1;
}
template<>
auto constexpr Type<float>()
{
return MPI_FLOAT;
}
template<>
auto constexpr sFactor<float>()
{
return 1;
}
template<>
auto constexpr Type<double>()
{
return MPI_DOUBLE;
}
template<>
auto constexpr sFactor<double>()
{
return 1;
}
template<>
inline
auto Type<realx3>()
{
return realx3Type__;
}
template<>
auto constexpr sFactor<realx3>()
{
return 1;
}
template<>
inline
auto Type<realx4>()
{
return realx4Type__;
}
template<>
auto constexpr sFactor<realx4>()
{
return 1;
}
template<>
inline
auto Type<int32x3>()
{
return int32x3Type__;
}
template<>
auto constexpr sFactor<int32x3>()
{
return 1;
}
template<>
inline
auto Type<uint32x3>()
{
return uint32x3Type__;
}
template<>
auto constexpr sFactor<uint32x3>()
{
return 1;
}
/*inline
auto createByteSequence(int sizeOfElement)
{
DataType newType;
MPI_Type_contiguous(sizeOfElement, MPI_CHAR, &newType);
MPI_Type_commit(&newType);
return newType;
}*/
inline
auto TypeCommit(DataType* type)
{
return MPI_Type_commit(type);
}
inline
auto TypeFree(DataType* type)
{
return MPI_Type_free(type);
}
template<typename T>
inline auto getCount(Status* status, int& count)
{
int lCount;
auto res = MPI_Get_count(status, Type<T>(), &lCount);
count = lCount/sFactor<T>();
return res;
}
template<typename T>
inline int convertIndex(const int& ind)
{
return ind*sFactor<T>();
}
template<typename T>
inline auto send(span<T> data, int dest, int tag, Comm comm)
{
return MPI_Send(
data.data(),
sFactor<T>()*data().size(),
Type<T>(),
dest,
tag,
comm);
}
template<typename T>
inline auto send(const T& data, int dest, int tag, Comm comm)
{
return MPI_Send(
&data,
sFactor<T>(),
Type<T>(),
dest,
tag,
comm);
}
template<typename T>
inline auto Isend(span<T> data, int dest, int tag, Comm comm, Request* req)
{
return MPI_Isend(
data.data(),
sFactor<T>()*data.size(),
Type<T>(),
dest,
tag,
comm,
req);
}
template<typename T>
inline auto Isend(const T& data, int dest, int tag, Comm comm, Request* req)
{
return MPI_Isend(
&data,
sFactor<T>(),
Type<T>(),
dest,
tag,
comm,
req);
}
template<typename T>
inline auto recv(span<T> data, int source, int tag, Comm comm, Status *status)
{
return MPI_Recv(
data.data(),
sFactor<T>()*data.size(),
Type<T>(),
source,
tag,
comm,
status);
}
template<typename T>
inline auto recv(T& data, int source, int tag, Comm comm, Status *status)
{
return MPI_Recv(
&data,
sFactor<T>(),
Type<T>(),
source,
tag,
comm,
status);
}
template<typename T>
inline auto Irecv(T& data, int source, int tag, Comm comm, Request* req)
{
return MPI_Irecv(
&data,
sFactor<T>(),
Type<T>(),
source,
tag,
comm,
req);
}
template<typename T>
inline auto Irecv(span<T> data, int source, int tag, Comm comm, Request* req)
{
return MPI_Irecv(
data.data(),
sFactor<T>()*data.size(),
Type<T>(),
source,
tag,
comm,
req);
}
template<typename T>
inline auto scan(T sData, T& rData, Comm comm, Operation op = SumOp)
{
return MPI_Scan(&sData, &rData, sFactor<T>()*1, Type<T>(), op , comm );
}
// gathering one scalar data to root processor
template<typename T>
inline auto gather(T sendData, span<T>& recvData, int root, Comm comm)
{
return MPI_Gather(
&sendData,
sFactor<T>()*1,
Type<T>(),
recvData.data(),
sFactor<T>()*1,
Type<T>(),
root,
comm);
}
template<typename T>
inline auto allGather(T sendData, span<T>& recvData, Comm comm)
{
return MPI_Allgather(
&sendData,
sFactor<T>()*1,
Type<T>(),
recvData.data(),
sFactor<T>()*1,
Type<T>(),
comm);
}
template<typename T>
inline auto scatter(span<T> sendData, T& recvData, int root, Comm comm)
{
return MPI_Scatter(
sendData.data(),
sFactor<T>()*1,
Type<T>(),
&recvData,
sFactor<T>()*1,
Type<T>(),
root,
comm);
}
template<typename T>
inline auto Bcast(T& sendData, int root, Comm comm)
{
return MPI_Bcast(
&sendData, sFactor<T>()*1, Type<T>(), root, comm);
}
template<typename T>
bool typeCreateIndexedBlock(
span<int32> index,
DataType &newType)
{
auto res = MPI_Type_create_indexed_block(
index.size(),
sFactor<T>(),
index.data(),
Type<T>(),
&newType);
if(res == Success)
{
TypeCommit(&newType);
}
else
{
return false;
}
return true;
}
template<typename T>
inline auto Gatherv
(
span<T> sendData,
span<T>& recvData,
span<int> recvCounts,
span<int> displs,
int root,
Comm comm)
{
return MPI_Gatherv(
sendData.data(),
sendData.size()*sFactor<T>(),
Type<T>(),
recvData.data(),
recvCounts.data(),
displs.data(),
Type<T>(),
root,
comm
);
}
inline auto Wait(Request* request, Status* status)
{
return MPI_Wait(request, status);
}
}
#endif //__mpiCommunication_H__

View File

@ -1,71 +0,0 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __mpiTypes_H__
#define __mpiTypes_H__
#include <mpi.h>
namespace pFlow::MPI
{
// types
using Comm = MPI_Comm;
using Group = MPI_Group;
using Status = MPI_Status;
using Offset = MPI_Offset;
using Request = MPI_Request;
using Operation = MPI_Op;
using Information = MPI_Info;
using DataType = MPI_Datatype;
inline Comm CommWorld = MPI_COMM_WORLD;
// all nulls
inline auto ProcNull = MPI_PROC_NULL;
inline auto InfoNull = MPI_INFO_NULL;
inline auto RequestNull = MPI_REQUEST_NULL;
inline auto StatusIgnore = MPI_STATUS_IGNORE;
inline auto StatusesIgnore = MPI_STATUSES_IGNORE;
inline auto FileNull = MPI_FILE_NULL;
inline Comm CommNull = MPI_COMM_NULL;
inline auto TypeNull = MPI_DATATYPE_NULL;
// errors
inline const auto Success = MPI_SUCCESS;
inline const auto ErrOp = MPI_ERR_OP;
inline const auto SumOp = MPI_SUM;
inline const auto MaxOp = MPI_MAX;
inline const auto MinOp = MPI_MIN;
inline const size_t MaxNoProcessors = 2048;
}
#endif //__mpiTypes_H__

View File

@ -1,30 +0,0 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#include "procCommunication.hpp"
pFlow::MPI::procCommunication::procCommunication
(
const localProcessors& proc
)
:
processors_(proc)
{}

View File

@ -1,178 +0,0 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __procCommunication_hpp__
#define __procCommunication_hpp__
#include "procVector.hpp"
#include "localProcessors.hpp"
#include "mpiCommunication.hpp"
namespace pFlow::MPI
{
class procCommunication
{
protected:
const localProcessors& processors_;
public:
procCommunication(const localProcessors& proc);
~procCommunication()=default;
/// @brief Tell if this processor is master processor in the local
/// communicator
/// @return true if this processor is master
inline
const auto& processors()const
{
return processors_;
}
inline
bool localMaster()const
{
return processors_.localMaster();;
}
inline
auto localSize()const
{
return processors_.localSize();
}
inline
auto localRank()const
{
return processors_.localRank();
}
inline
auto localCommunicator()const
{
return processors_.localCommunicator();
}
/// @brief return the master number in the local communicator
auto localMasterNo()const
{
return processors_.localMasterNo();
}
/// Send a single val to all processors including itself (local communicator)
template<typename T>
std::pair<T,bool> distributeMasterToAll(const T& val)
{
T retVal = val;
auto res = CheckMPI(
Bcast(retVal, localMasterNo(),localCommunicator() ),
false);
return {retVal, res};
}
/// @brief Send a single value to all processor including master (in local communicator)
/// @param val value to be sent
/// @param recvVal recieved value
/// @return true if successful and false if fail
template<typename T>
bool distributeMasterToAll(const T& val, T& recvVal)
{
recvVal = val;
return CheckMPI(
Bcast(recvVal, localMasterNo(), localCommunicator()),
false);
}
/// @brief values in the vector (size is equal to number of
// processors in local communicator) to each processor
template<typename T>
std::pair<T,bool> distributeMasterToAll(const procVector<T>& vals)
{
T val;
auto vec = vals.getSpan();
auto res = CheckMPI(
scatter(vec, val, localMasterNo(), localCommunicator()),
false);
return {val, res};
}
/// @brief Each processor in the local communicator calls this funtion with a value
/// and the values are distributed among all processors
template<typename T>
std::pair<procVector<T>, bool> collectAllToAll(const T& val)
{
procVector<T> allVec(processors_);
auto vec = allVec.getSpan();
auto res = CheckMPI(
allGather(val, vec, localCommunicator()),
false);
return {allVec, res};
}
/// @brief Each processor in the local communicator calls this funtion with a value
/// and the values are distributed among all processors
template<typename T>
bool collectAllToAll(const T& val, procVector<T>& allVec)
{
auto vec = allVec.getSpan();
return CheckMPI(
allGather(val, vec, localCommunicator()),
false);
}
/// @brief Each processor in the local communicator calls this function with a value
/// and all values are collected in the master processor
template<typename T>
std::pair<procVector<T>,bool> collectAllToMaster(const T& val)
{
// only on master processor
procVector<T> masterVec(processors_, true);
auto masterSpan = masterVec.getSpan();
auto res = CheckMPI(
gather(val,masterSpan, localMasterNo(), localCommunicator()),
false);
return {masterVec, res};
}
template<typename T>
bool collectAllToMaster(const T& val, procVector<T>& masterVec)
{
// only on master processor
auto [vec, res] = collectAllToMaster(val);
masterVec = vec;
return res;
}
}; //procCommunication
} // pFlow::MPI
#endif //__procCommunication_hpp__

View File

@ -1,199 +0,0 @@
#ifndef __procVector_hpp__
#define __procVector_hpp__
// from PhasicFlow
#include "localProcessors.hpp"
#include "span.hpp"
#include "streams.hpp"
#include "IOPattern.hpp"
#include "mpiTypes.hpp"
namespace pFlow::MPI
{
template<typename T>
class procVector
:
public std::vector<T>
{
public:
using ProcVectorType = procVector<T>;
using VectorType = std::vector<T>;
protected:
int rank_ = 0;
bool isMaster_ = false;
using VectorType::reserve;
using VectorType::resize;
using VectorType::assign;
using VectorType::clear;
using VectorType::erase;
public:
procVector(
const localProcessors& procs,
bool onlyMaster = false)
:
rank_(procs.localRank()),
isMaster_(procs.localMaster())
{
if( onlyMaster && !isMaster_ ) return;
this->reserve(procs.localSize());
this->resize(procs.localSize());
}
procVector(
const T& val,
const localProcessors& procs,
bool onlyMaster = false)
:
procVector(procs, onlyMaster)
{
std::fill(this->begin(), this->end(), val);
}
procVector(const T& val, const procVector& src)
{
this->reserve(src.size());
this->resize(src.size());
std::fill(this->begin(), this->end(), val);
}
procVector(const localProcessors& procs, const VectorType& src)
:
procVector(procs)
{
if(src.size()!= this->size())
{
fatalErrorInFunction<<
"Size of std::vector and procVector does not match in construction"<<endl;
fatalExit;
}
this->assign(src.begin(), src.end());
}
procVector(const procVector&) = default;
procVector(procVector&&) = default;
procVector& operator=(const procVector&) = default;
procVector& operator=(procVector&&) = default;
procVector& operator=(const VectorType& src)
{
if(src.size() != this->size())
{
fatalErrorInFunction<<
"Size of std::vector and procVector does not match in copy assignment"<<endl;
fatalExit;
}
static_cast<VectorType&>(*this).operator=(src);
return *this;
}
procVector& operator=(VectorType&& src)
{
if(src.size() != this->size())
{
fatalErrorInFunction<<
"Size of std::vector and procVector does not match in move assignment"
<<endl;
fatalExit;
}
static_cast<VectorType&>(*this).operator=(std::move(src));
return *this;
}
procVector(const localProcessors& procs, VectorType&& src)
:
VectorType(std::move(src))
{
if(this->size()!= static_cast<size_t>(procs.localSize()))
{
fatalErrorInFunction<<
"Size of std::vector and procVector does not match in move"<<endl;
fatalExit;
}
isMaster_ = procs.localMaster();
rank_ = procs.localRank();
}
~procVector()=default;
inline
auto& thisValue()
{
return VectorType::operator[](rank_);
}
inline
const auto& thisValue()const
{
return VectorType::operator[](rank_);
}
inline
auto size()const
{
return VectorType::size();
}
inline
auto rank()const
{
return rank_;
}
inline
auto getSpan()
{
return span<T>(this->data(), this->size());
}
inline
auto getSpan()const
{
return span<T>(const_cast<T*>(this->data()), this->size());
}
bool write(
iOstream& os,
const IOPattern& iop ) const
{
return writeStdVector(os, *this, iop);
}
};
template<typename T>
inline iOstream& operator << (iOstream& os, const procVector<T>& ovec )
{
if( !ovec.write(os, IOPattern::AllProcessorsDifferent) )
{
ioErrorInFile(os.name(), os.lineNumber());
fatalExit;
}
return os;
}
}
#endif

View File

@ -1,158 +0,0 @@
template<typename T>
pFlow::MPI::scatteredMasterDistribute<T>::scatteredMasterDistribute
(
const localProcessors& procs
)
:
procCommunication(procs),
indexedMap_(TypeNull, procs, true)
{
}
template<typename T>
bool pFlow::MPI::scatteredMasterDistribute<T>::setDataMaps
(
procVector<span<uint32>>& maps
)
{
if(this->localMaster())
{
if(maps.size() != this->localSize() )
{
fatalErrorInFunction<<"size mismatch";
return false;
}
std::vector<int32> index;
freeIndexedMap();
for(auto proc = 0; proc< maps.size(); proc++)
{
auto m = maps[proc];
index.resize(m.size());
for(auto i=0; i<index.size(); i++ )
{
index[i] = m[i];
}
DataType dt;
if(! typeCreateIndexedBlock<T>( makeSpan(index), dt))
{
fatalErrorInFunction;
return false;
}
else
{
indexedMap_[proc] = dt;
}
}
}
return true;
}
template<typename T>
bool pFlow::MPI::scatteredMasterDistribute<T>::setDataMaps
(
procVector<span<int32>>& maps
)
{
if(this->localMaster())
{
if(maps.size() != this->localSize() )
{
fatalErrorInFunction<<"size mismatch";
return false;
}
freeIndexedMap();
for(auto proc = 0; proc< maps.size(); proc++)
{
DataType dt;
if( !typeCreateIndexedBlock<T>(maps[proc], dt) )
{
fatalErrorInFunction;
return false;
}
else
{
indexedMap_[proc] = dt;
}
}
}
return true;
}
template<typename T>
void pFlow::MPI::scatteredMasterDistribute<T>::freeIndexedMap()
{
for(auto i=0; i<indexedMap_.size(); i++)
{
if(indexedMap_[i]!= TypeNull)
{
TypeFree(&indexedMap_[i]);
indexedMap_[i] = TypeNull;
}
}
}
template<typename T>
bool pFlow::MPI::scatteredMasterDistribute<T>::distribute
(
span<T>& sendBuff,
span<T>& recvb
)
{
procVector<Request> requests(processors(), true);
procVector<Status> statuses(processors(), true);
if(this->localMaster())
{
bool res = true;
for(int32 i = indexedMap_.size()-1; i>=0; i--)
{
res = res&&CheckMPI(
MPI_Issend(
sendBuff.data(),
1,
indexedMap_[i],
i,
0,
localCommunicator(),
&requests[i]),
false);
}
if(!res)return false;
}
Status stat;
bool sucss = CheckMPI(
MPI_Recv(
recvb.data(),
recvb.size()*sFactor<T>(),
Type<T>(),
0,
0,
localCommunicator(),
&stat),
false);
if(this->localMaster())
{
CheckMPI(
MPI_Waitall(requests.size(), requests.data(), statuses.data()),
false
);
}
return sucss;
}

View File

@ -1,67 +0,0 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __scatteredMasterDistribute_hpp__
#define __scatteredMasterDistribute_hpp__
#include "mpiCommunication.hpp"
#include "procCommunication.hpp"
#include "procVector.hpp"
#include "stdVectorHelper.hpp"
#include "streams.hpp"
namespace pFlow::MPI
{
template<typename T>
class scatteredMasterDistribute : public procCommunication
{
protected:
procVector<DataType> indexedMap_;
void freeIndexedMap();
public:
scatteredMasterDistribute(const localProcessors& procs);
~scatteredMasterDistribute()
{
freeIndexedMap();
}
scatteredMasterDistribute(const scatteredMasterDistribute&) = delete;
scatteredMasterDistribute& operator=(const scatteredMasterDistribute&) =
delete;
bool setDataMaps(procVector<span<uint32>>& maps);
bool setDataMaps(procVector<span<int32>>& maps);
bool distribute(span<T>& sendBuff, span<T>& recvb);
};
} // pFlow::MPI
#include "scatteredMasterDistribute.cpp"
#endif //__scatteredMasterDistribute_hpp__

View File

@ -1,166 +0,0 @@
#include "scatteredMasterDistributeChar.hpp"
pFlow::MPI::scatteredMasterDistribute<char>::scatteredMasterDistribute
(
size_t sizeOfElement,
const localProcessors& procs
)
:
procCommunication(procs),
indexedMap_(TypeNull, procs, true),
sizeOfElement_(sizeOfElement)
{}
bool pFlow::MPI::scatteredMasterDistribute<char>::setDataMaps
(
procVector<span<uint32>>& maps
)
{
if(this->localMaster())
{
if(maps.size() != this->localSize() )
{
fatalErrorInFunction<<"size mismatch";
return false;
}
freeIndexedMap();
std::vector<MPI_Aint> index;
for(auto proc = 0; proc< maps.size(); proc++)
{
auto m = maps[proc];
index.resize(m.size());
for(auto i=0; i<index.size(); i++ )
{
index[i] = m[i]*sizeOfElement_;
}
DataType dt;
MPI_Type_create_hindexed_block(
m.size(),
sizeOfElement_,
index.data(),
MPI_BYTE,
&dt);
MPI_Type_commit(&dt);
indexedMap_[proc] = dt;
}
}
return true;
}
bool pFlow::MPI::scatteredMasterDistribute<char>::setDataMaps
(
procVector<span<int32>>& maps
)
{
if(this->localMaster())
{
if(maps.size() != this->localSize() )
{
fatalErrorInFunction<<"size mismatch";
return false;
}
std::vector<MPI_Aint> index;
freeIndexedMap();
for(auto proc = 0; proc< maps.size(); proc++)
{
auto m = maps[proc];
index.resize(m.size());
for(auto i=0; i<index.size(); i++ )
{
index[i] = m[i]*sizeOfElement_;
}
DataType dt;
MPI_Type_create_hindexed_block(
index.size(),
sizeOfElement_,
index.data(),
MPI_CHAR,
&dt);
MPI_Type_commit(&dt);
indexedMap_[proc] = dt;
}
}
return true;
}
void pFlow::MPI::scatteredMasterDistribute<char>::freeIndexedMap()
{
for(auto i=0; i<indexedMap_.size(); i++)
{
if(indexedMap_[i]!= TypeNull)
{
TypeFree(&indexedMap_[i]);
indexedMap_[i] = TypeNull;
}
}
}
bool pFlow::MPI::scatteredMasterDistribute<char>::distribute
(
span<char>& sendBuff,
span<char>& recvb
)
{
procVector<Request> requests(processors(), true);
procVector<Status> statuses(processors(), true);
if(this->localMaster())
{
bool res = true;
for(int32 i = indexedMap_.size()-1; i>=0; i--)
{
res = res&&CheckMPI(
MPI_Issend(
sendBuff.data(),
1,
indexedMap_[i],
i,
0,
localCommunicator(),
&requests[i]),
false);
}
if(!res)return false;
}
Status stat;
bool sucss = CheckMPI(
MPI_Recv(
recvb.data(),
recvb.size(),
MPI_CHAR,
0,
0,
localCommunicator(),
&stat),
true);
if(this->localMaster())
{
CheckMPI(
MPI_Waitall(requests.size(), requests.data(), statuses.data()),
false
);
}
return sucss;
}

View File

@ -1,66 +0,0 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __scatteredMasterDistributeChar_hpp__
#define __scatteredMasterDistributeChar_hpp__
#include "scatteredMasterDistribute.hpp"
namespace pFlow::MPI
{
template<>
class scatteredMasterDistribute<char> : public procCommunication
{
protected:
procVector<DataType> indexedMap_;
size_t sizeOfElement_;
void freeIndexedMap();
public:
scatteredMasterDistribute(
size_t sizeOfElement,
const localProcessors& procs
);
~scatteredMasterDistribute()
{
freeIndexedMap();
}
scatteredMasterDistribute(const scatteredMasterDistribute&) = delete;
scatteredMasterDistribute& operator=(const scatteredMasterDistribute&) =
delete;
bool setDataMaps(procVector<span<uint32>>& maps);
bool setDataMaps(procVector<span<int32>>& maps);
bool distribute(span<char>& sendBuff, span<char>& recvb);
};
} // pFlow::MPI
#endif //__scatteredMasterDistributeChar_hpp__

View File

@ -1,52 +0,0 @@
template<typename T>
bool pFlow::MPI::dataIOMPI<T>::gatherData(span<T> data )
{
if(this->ioPattern_.isAllProcessorsDifferent())
{
this->bufferSpan_ = data;
return true;
}
if( this->ioPattern_.isMasterProcessorDistribute())
{
auto gatherT = pFlow::MPI::gatherMaster<T>(pFlowProcessors());
if(!gatherT.gatherData(data))
{
fatalErrorInFunction<<"Error in gathering data to master"<<endl;
return false;
}
this->buffer_ = gatherT.moveData();
this->bufferSpan_ = span<T>(this->buffer_.data(),this->buffer_.size() );
return true;
}
if( this->ioPattern_.isMasterProcessorOnly() || this->ioPattern_.isAllProcessorSimilar() )
{
if( this->ioPattern_.isMaster() )
{
this->bufferSpan_ = data;
return true;
}
else
{
this->bufferSpan_ = span<T>(nullptr, 0);
return true;
}
}
return false;
}
template<typename T>
pFlow::MPI::dataIOMPI<T>::dataIOMPI(const IOPattern& iop)
:
dataIO<T>(iop)
{}

View File

@ -1,58 +0,0 @@
#ifndef __datIOMPI_hpp__
#define __datIOMPI_hpp__
#include "dataIO.hpp"
#include "pFlowProcessors.hpp"
#include "gatherMaster.hpp"
namespace pFlow::MPI
{
template<typename T>
class dataIOMPI
:
public dataIO<T>
{
public:
using DataIOType = dataIO<T>;
using DataIOMPIType = dataIOMPI<T>;
protected:
bool gatherData(span<T> data ) override;
public:
TypeInfoTemplate111("dataIO",T,"MPI");
explicit dataIOMPI(const IOPattern& iop);
dataIOMPI(const dataIOMPI&) = default;
dataIOMPI(dataIOMPI&&) = default;
dataIOMPI& operator=(const dataIOMPI&) = default;
dataIOMPI& operator=(dataIOMPI&&) = default;
~dataIOMPI() = default;
add_vCtor
(
DataIOType,
DataIOMPIType,
IOPattern
);
}; //dataIOMPI
} //namespace pFlow::MPI
#include "dataIOMPI.cpp"
#endif //__datIOMPI_hpp__

View File

@ -1,27 +0,0 @@
#include "types.hpp"
#include "dataIOMPI.hpp"
template class pFlow::MPI::dataIOMPI<pFlow::uint8>;
template class pFlow::MPI::dataIOMPI<pFlow::int8>;
template class pFlow::MPI::dataIOMPI<pFlow::int32>;
template class pFlow::MPI::dataIOMPI<pFlow::int64>;
template class pFlow::MPI::dataIOMPI<pFlow::uint32>;
template class pFlow::MPI::dataIOMPI<pFlow::uint32x3>;
template class pFlow::MPI::dataIOMPI<pFlow::uint64>;
//template class pFlow::MPI::dataIOMPI<pFlow::size_t>;
template class pFlow::MPI::dataIOMPI<pFlow::real>;
template class pFlow::MPI::dataIOMPI<pFlow::realx3>;
template class pFlow::MPI::dataIOMPI<pFlow::realx4>;
template class pFlow::MPI::dataIOMPI<pFlow::word>;

View File

@ -1,431 +0,0 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#include "MPISimulationDomain.hpp"
#include "systemControl.hpp"
#include "rcb1DPartitioning.hpp"
#include "scatteredMasterDistribute.hpp"
#include "scatteredMasterDistributeChar.hpp"
pFlow::MPI::MPISimulationDomain::MPISimulationDomain(systemControl& control, real maxBSphere)
:
simulationDomain(control, maxBSphere),
communication_(pFlowProcessors()),
subDomainsAll_(pFlowProcessors()),
numPointsAll_(pFlowProcessors()),
domainPartitioning_( makeUnique<rcb1DPartitioning>(subDict("decomposition"), globalBox()))
{}
bool pFlow::MPI::MPISimulationDomain::createBoundaryDicts()
{
dictionary& boundaries = this->subDict("boundaries");
dictionary& thisBoundaries = this->subDict(thisBoundariesDictName());
auto neighbors = findPlaneNeighbors();
for(uint32 i=0; i<sizeOfBoundaries(); i++)
{
word bName = bundaryName(i);
auto& bDict = thisBoundaries.subDict(bName);
if( thisDomainActive_ )
{
if( neighbors[i] == -1 )
{
bDict.add("neighborProcessorNo", processors::globalRank());
}
else
{
bDict.add("neighborProcessorNo", neighbors[i]);
bDict.addOrReplace("type", "processor");
}
}
else
{
bDict.add("neighborProcessorNo", processors::globalRank());
bDict.addOrReplace("type", "none");
}
if( bDict.getVal<word>("type") == "periodic")
{
fatalErrorInFunction<<
"periodic is not implemented "<<endl;
fatalExit;
}
}
return true;
}
bool pFlow::MPI::MPISimulationDomain::setThisDomain()
{
thisDomain_ = domain(domainPartitioning_->localBox());
uint32 thisNumPoints = initialNumberInThis();
if(!communication_.collectAllToAll(thisNumPoints, numPointsAll_))
{
fatalErrorInFunction<<
"Failed to distribute number of points."<<endl;
return false;
}
uint32 allNumPoints = std::accumulate(numPointsAll_.begin(), numPointsAll_.end(), 0u);
if( thisNumPoints != 0u )
{
thisDomainActive_ = true;
}
else
{
if(communication_.localMaster()&& allNumPoints == 0u)
thisDomainActive_ = true;
else
thisDomainActive_ = false;
}
if( thisDomainActive_ )
{
bool allInactive = true;
for(int32 i=0; i<communication_.localSize(); i++ )
{
if(i == communication_.localRank() )continue;
if(numPointsAll_[i]!=0)
{
allInactive = false;
break;
}
}
if(allInactive)
{
thisDomain_ = domain(globalBox());
}
}
if(!communication_.collectAllToAll(thisDomain_, subDomainsAll_))
{
fatalErrorInFunction<< "Failed to distributed domains"<<endl;
return false;
}
return true;
}
std::vector<int> pFlow::MPI::MPISimulationDomain::findPlaneNeighbors() const
{
std::vector<int> neighbors(sizeOfBoundaries(), -2);
domain gDomain(globalBox());
// left
if( thisDomain_.left().parallelTouch( gDomain.left() ) )
{
neighbors[0] = -1;
}
for(int i=0; i<subDomainsAll_.size(); i++)
{
if(i == subDomainsAll_.rank())continue;
if( thisDomain_.left().parallelTouch(
subDomainsAll_[i].right()) )
{
neighbors[0] = i;
break;
}
}
// right
if( thisDomain_.right().parallelTouch( gDomain.right() ) )
{
neighbors[1] = -1;
}
for(int i=0; i<subDomainsAll_.size(); i++)
{
if(i == subDomainsAll_.rank())continue;
if( thisDomain_.right().parallelTouch(
subDomainsAll_[i].left()) )
{
neighbors[1] = i;
break;
}
}
// bottom
if( thisDomain_.bottom().parallelTouch( gDomain.bottom() ) )
{
neighbors[2] = -1;
}
for(int i=0; i<subDomainsAll_.size(); i++)
{
if(i == subDomainsAll_.rank())continue;
if( thisDomain_.bottom().parallelTouch(
subDomainsAll_[i].top()) )
{
neighbors[2] = i;
break;
}
}
// top
if( thisDomain_.top().parallelTouch( gDomain.top() ) )
{
neighbors[3] = -1;
}
for(int i=0; i<subDomainsAll_.size(); i++)
{
if(i == subDomainsAll_.rank())continue;
if( thisDomain_.top().parallelTouch(
subDomainsAll_[i].bottom()) )
{
neighbors[3] = i;
break;
}
}
// rear
if( thisDomain_.rear().parallelTouch( gDomain.rear() ) )
{
neighbors[4] = -1;
}
for(int i=0; i<subDomainsAll_.size(); i++)
{
if(i == subDomainsAll_.rank())continue;
if( thisDomain_.rear().parallelTouch(
subDomainsAll_[i].front()) )
{
neighbors[4] = i;
break;
}
}
// front
if( thisDomain_.front().parallelTouch( gDomain.front() ) )
{
neighbors[5] = -1;
}
for(int i=0; i<subDomainsAll_.size(); i++)
{
if(i == subDomainsAll_.rank())continue;
if( thisDomain_.front().parallelTouch(
subDomainsAll_[i].rear()) )
{
neighbors[5] = i;
break;
}
}
return neighbors;
}
bool pFlow::MPI::MPISimulationDomain::initialUpdateDomains(span<realx3> pointPos)
{
pFlagTypeHost flags(pointPos.size(), 0 , pointPos.size());
initialNumPoints_ = pointPos.size();
if( !domainPartitioning_->partition(pointPos, flags) )
{
fatalErrorInFunction<<
"Point partitioning failed."<<endl;
return false;
}
if(!setThisDomain()) return false;
if(!createBoundaryDicts()) return false;
return true;
}
pFlow::uint32 pFlow::MPI::MPISimulationDomain::initialNumberInThis() const
{
uint32 numImport = domainPartitioning_->numberImportThisProc();
uint32 numExport = domainPartitioning_->numberExportThisProc();
return max(initialNumPoints_+ numImport - numExport, 0u);
}
bool pFlow::MPI::MPISimulationDomain::initialTransferBlockData
(
span<char> src,
span<char> dst,
size_t sizeOfElement
)const
{
MPI::scatteredMasterDistribute<char> dataDist(sizeOfElement, pFlowProcessors());
auto lists = domainPartitioning_->allExportLists();
if(!dataDist.setDataMaps( lists ))
{
fatalErrorInFunction;
return false;
}
if(!dataDist.distribute(src, dst))
{
fatalErrorInFunction<<
"Error in distribute"<<endl;
return false;
}
return true;
}
bool pFlow::MPI::MPISimulationDomain::initialTransferBlockData
(
span<realx3> src,
span<realx3> dst
)const
{
MPI::scatteredMasterDistribute<realx3>
dataDist(pFlowProcessors());
auto lists = domainPartitioning_->allExportLists();
if(!dataDist.setDataMaps( lists ))
{
fatalErrorInFunction;
return false;
}
if(!dataDist.distribute(src, dst))
{
fatalErrorInFunction<<
"Error in distribute"<<endl;
return false;
}
return true;
}
bool pFlow::MPI::MPISimulationDomain::initialTransferBlockData
(
span<real> src,
span<real> dst
)const
{
MPI::scatteredMasterDistribute<real>
dataDist(pFlowProcessors());
auto lists = domainPartitioning_->allExportLists();
if(!dataDist.setDataMaps( lists ))
{
fatalErrorInFunction;
return false;
}
if(!dataDist.distribute(src, dst))
{
fatalErrorInFunction<<
"Error in distribute"<<endl;
return false;
}
return true;
}
bool pFlow::MPI::MPISimulationDomain::initialTransferBlockData
(
span<uint32> src,
span<uint32> dst
)const
{
MPI::scatteredMasterDistribute<uint32>
dataDist(pFlowProcessors());
auto lists = domainPartitioning_->allExportLists();
if(!dataDist.setDataMaps( lists ))
{
fatalErrorInFunction;
return false;
}
if(!dataDist.distribute(src, dst))
{
fatalErrorInFunction<<
"Error in distribute"<<endl;
return false;
}
return true;
}
bool pFlow::MPI::MPISimulationDomain::initialTransferBlockData
(
span<int32> src,
span<int32> dst
)const
{
MPI::scatteredMasterDistribute<int32>
dataDist(pFlowProcessors());
auto lists = domainPartitioning_->allExportLists();
if(!dataDist.setDataMaps( lists ))
{
fatalErrorInFunction;
return false;
}
if(!dataDist.distribute(src, dst))
{
fatalErrorInFunction<<
"Error in distribute"<<endl;
return false;
}
return true;
}
pFlow::uint32 pFlow::MPI::MPISimulationDomain::numberToBeImported() const
{
return domainPartitioning_->numberImportThisProc();
}
pFlow::uint32 pFlow::MPI::MPISimulationDomain::numberToBeExported() const
{
return domainPartitioning_->numberExportThisProc();
}
bool
pFlow::MPI::MPISimulationDomain::domainActive() const
{
return thisDomainActive_;
}
const pFlow::domain&
pFlow::MPI::MPISimulationDomain::thisDomain() const
{
return thisDomain_;
}

View File

@ -1,116 +0,0 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __MPISimulationDomain_hpp__
#define __MPISimulationDomain_hpp__
#include "partitioning.hpp"
#include "procCommunication.hpp"
#include "procVector.hpp"
#include "simulationDomain.hpp"
namespace pFlow::MPI
{
class MPISimulationDomain : public simulationDomain
{
private:
/// a processor communcator for simulation domain
procCommunication communication_;
/// sub-domain (thisDomain_ for all processors)
procVector<domain> subDomainsAll_;
/// number of points in all processors
procVector<uint32> numPointsAll_;
/// partitioning object
uniquePtr<partitioning> domainPartitioning_ = nullptr;
/// the acutal limits of the simulation domain in this processor
domain thisDomain_;
uint32 initialNumPoints_ = 0;
bool thisDomainActive_ = false;
bool createBoundaryDicts() final;
bool setThisDomain() final;
std::vector<int> findPlaneNeighbors() const;
public:
TypeInfo("simulationDomain<MPI>");
explicit MPISimulationDomain(systemControl& control, real maxBSphere);
~MPISimulationDomain() final = default;
add_vCtor
(
simulationDomain,
MPISimulationDomain,
systemControl
);
/// @brief
/// @param pointPos
/// @return
bool initialUpdateDomains(span<realx3> pointPos) final;
/// @brief
/// @return
uint32 initialNumberInThis() const final;
bool initialTransferBlockData(
span<char> src,
span<char> dst,
size_t sizeOfElement
) const final;
bool initialTransferBlockData(span<realx3> src, span<realx3> dst)
const final;
bool initialTransferBlockData(span<real> src, span<real> dst)
const final;
bool initialTransferBlockData(span<uint32> src, span<uint32> dst)
const final;
bool initialTransferBlockData(span<int32> src, span<int32> dst)
const final;
uint32 numberToBeImported() const final;
uint32 numberToBeExported() const final;
/// @brief Is this domain active?
/// Active mean, there is particle in it and
/// boundaries and other entities of simulation domains are valid
bool domainActive() const final;
const domain& thisDomain()const final;
};
} // namespace pFlow::MPI
#endif //

View File

@ -1,113 +0,0 @@
#include "partitioning.hpp"
#include "error.hpp"
#include "streams.hpp"
void pFlow::partitioning::freeZoltan()
{
if(validPointers_)
{
Zoltan::LB_Free_Part(&importGlobalGids_, &importLocalGids_,
&importProcs_, &importToPart_);
Zoltan::LB_Free_Part(&exportGlobalGids_, &exportLocalGids_,
&exportProcs_, &exportToPart_);
validPointers_ = false;
}
zoltan_.release();
}
pFlow::partitioning::partitioning
(
const dictionary& dict,
const box& globalBox
)
:
globalBox_(globalBox)
{
if(!zoltanInitialized__)
{
auto rc = Zoltan_Initialize
(
processors::argc(),
processors::argv(),
&version_
);
if (rc != ZOLTAN_OK)
{
fatalErrorInFunction<<"Cannot initialize zoltan"<<endl;
fatalExit;
}
zoltanInitialized__ = true;
}
// Creates Zoltan object
zoltan_ = std::make_unique<Zoltan>(pFlowProcessors().localCommunicator());
zoltan_->Set_Param("DEBUG_LEVEL", "0");
zoltan_->Set_Param("LB_METHOD", "RCB");
zoltan_->Set_Param("NUM_GID_ENTRIES", "1");
zoltan_->Set_Param("NUM_LID_ENTRIES", "1");
zoltan_->Set_Param("OBJ_WEIGHT_DIM", "0");
zoltan_->Set_Param("RETURN_LISTS", "ALL");
}
bool pFlow::partitioning::partition(span<realx3> points, pFlagTypeHost flags)
{
pointCollection pointCollctn{points, flags};
return partition(pointCollctn);
}
int GetObjectSize
(
void *data,
int num_gid_entries,
int num_lid_entries,
ZOLTAN_ID_PTR global_id,
ZOLTAN_ID_PTR local_id,
int *ierr
)
{
*ierr = ZOLTAN_OK;
pFlow::uint32 s = *(static_cast<pFlow::uint32*>(data));
return static_cast<int>(s);
}
void PackObject
(
void *data,
int num_gid_entries,
int num_lid_entries,
ZOLTAN_ID_PTR global_id,
ZOLTAN_ID_PTR local_id,
int dest,
int size,
char *buf,
int *ierr
)
{
}
bool pFlow::partitioning::migrateData(span<char> src, span<char> dst, uint32 elementSize)
{
dataCollection data{src, dst, elementSize};
zoltan_->Set_Obj_Size_Fn(GetObjectSize, &elementSize);
return false;
}
pFlow::partitioning::~partitioning()
{
freeZoltan();
}
void pFlow::partitioning::printBox()const
{
pOutput<< "localBox:" << localBox_<<endl;
}

View File

@ -1,168 +0,0 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __partitioning_hpp__
#define __partitioning_hpp__
#include "zoltan_cpp.h"
#include "pFlowProcessors.hpp"
#include "virtualConstructor.hpp"
#include "box.hpp"
#include "span.hpp"
#include "pointFlag.hpp"
#include "procVector.hpp"
namespace pFlow
{
struct pointCollection
{
span<realx3> points_;
pFlagTypeHost pFlag_;
uint32 numActivePoints()const
{
return pFlag_.numActive();
}
};
struct dataCollection
{
span<char> srcData_;
span<char> dstData_;
uint32 elementSize_;
};
class partitioning
{
protected:
float version_ = 0.0;
std::unique_ptr<Zoltan> zoltan_ = nullptr;
bool validPointers_ = false;
box globalBox_;
box localBox_;
int32 changes_, numImport_, numExport_;
id_t *importGlobalGids_, *importLocalGids_, *exportGlobalGids_, *exportLocalGids_;
int32 *importProcs_, *importToPart_, *exportProcs_, *exportToPart_;
uint32 numBeforePartition_ = 0 ;
static inline bool zoltanInitialized__ = false;
void freeZoltan();
virtual
bool partition(pointCollection& points) = 0;
public:
partitioning(
const dictionary& dict,
const box& globalBox);
virtual
~partitioning();
create_vCtor(
partitioning,
dictionary,
(
const dictionary& dict,
const box& globalBox
),
(dict, globalBox));
bool partition(
span<realx3> points,
pFlagTypeHost flags);
bool migrateData(span<char> src, span<char> dst, uint32 elementSize);
inline
auto localBox()const
{
return localBox_;
}
inline
const auto& globalBox()const
{
return globalBox_;
}
inline
bool partitionsChanged()const
{
return changes_ == 1;
}
uint32 numberImportThisProc()const
{
return numImport_;
}
uint32 numberExportThisProc()const
{
return numExport_;
}
virtual
span<int32> exportList(int procNo)const = 0;
virtual
pFlow::MPI::procVector<span<int32>> allExportLists()const=0;
void printBox()const;
};
}
#endif //__partitioning_hpp__
/*static
int getNumberOfPoints(void *data, int32 *ierr);
static
void getPointList(
void *data,
int32 sizeGID,
int32 sizeLID,
id_t* globalID,
id_t* localID,
int32 wgt_dim,
float *obj_wgts,
int32 *ierr);*/

View File

@ -1,330 +0,0 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#include "zoltan_cpp.h"
#include "error.hpp"
#include "processors.hpp"
#include "rcb1DPartitioning.hpp"
bool pFlow::rcb1DPartitioning::partition(pointCollection &points)
{
zoltan_->Set_Param("RCB_OUTPUT_LEVEL", "0");
zoltan_->Set_Param("RCB_RECTILINEAR_BLOCKS", "1");
zoltan_->Set_Param("KEEP_CUTS", "1");
zoltan_->Set_Param("REDUCE_DIMENSIONS", "1");
zoltan_->Set_Param("RCB_RECOMPUTE_BOX", "1");
zoltan_->Set_Param("AVERAGE_CUTS", "0");
zoltan_->Set_Param("MIGRATE_ONLY_PROC_CHANGES", "0");
zoltan_->Set_Num_Obj_Fn(rcb1DPartitioning::getNumberOfPoints, &points);
zoltan_->Set_Obj_List_Fn(rcb1DPartitioning::getPointList, &points);
zoltan_->Set_Num_Geom_Fn(rcb1DPartitioning::getNumGeometry, &points);
switch (direction_)
{
case Direction::X:
zoltan_->Set_Geom_Multi_Fn(rcb1DPartitioning::getGeometryList_x, &points);
break;
case Direction::Y:
zoltan_->Set_Geom_Multi_Fn(rcb1DPartitioning::getGeometryList_y, &points);
break;
case Direction::Z:
zoltan_->Set_Geom_Multi_Fn(rcb1DPartitioning::getGeometryList_z, &points);
break;
}
int numGidEntries_, numLidEntries_;
int rc = zoltan_->LB_Partition(changes_, numGidEntries_, numLidEntries_,
numImport_, importGlobalGids_, importLocalGids_, importProcs_, importToPart_,
numExport_, exportGlobalGids_, exportLocalGids_, exportProcs_, exportToPart_);
if (rc != ZOLTAN_OK)
{
fatalErrorInFunction<< "Zoltan faild to perform partitioning."<<endl;
return false;
}
for(auto& ids:exportIds_)
{
ids.clear();
}
std::vector<int32> thisProc(points.numActivePoints(),-1);
for(auto i =0; i<numExport_; i++)
{
exportIds_[exportProcs_[i]].push_back(exportGlobalGids_[i]);
thisProc[exportGlobalGids_[i]] = exportGlobalGids_[i];
}
for(int i=0; i<thisProc.size(); i++)
{
if(thisProc[i]==-1)
exportIds_[0].push_back(i);
}
validPointers_ = true;
int nDim;
double x0;
double y0;
double z0;
double x1;
double y1;
double z1;
zoltan_->RCB_Box
(
processors::globalRank(),
nDim,
x0, y0, z0,
x1, y1, z1
);
localBox_ = globalBox_;
if(equal(x0, x1))
{
x0 = x0 - 0.00001;
x1 = x1 + 0.00001;
}
switch (direction_)
{
case Direction::X :
localBox_.minPoint().x_ = x0;
localBox_.maxPoint().x_ = x1;
break;
case Direction::Y :
localBox_.minPoint().y_ = x0;
localBox_.maxPoint().y_ = x1;
break;
case Direction::Z :
localBox_.minPoint().z_ = x0;
localBox_.maxPoint().z_ = x1;
break;
}
localBox_.minPoint() = max(localBox_.minPoint(), globalBox_.minPoint());
localBox_.maxPoint() = min(localBox_.maxPoint(), globalBox_.maxPoint());
return true;
}
pFlow::rcb1DPartitioning::rcb1DPartitioning
(
const dictionary &dict,
const box &globalBox
)
:
partitioning(dict, globalBox),
exportIds_(pFlowProcessors())
{
word directionName = dict.getVal<word>("direction");
if(toUpper(directionName)== "X")
{
direction_ = Direction::X;
dirVector_ ={1.0, 0.0, 0.0};
}
else if( toUpper(directionName) == "Y")
{
direction_ = Direction::Y;
dirVector_ ={0.0, 1.0, 0.0};
}
else if( toUpper(directionName) == "Z")
{
direction_ = Direction::Z;
dirVector_ ={0.0, 0.0, 1.0};
}
else
{
fatalErrorInFunction<< "wrong direction in dictionary "<<
dict.globalName()<<". Directions should be one of x, y, or z."<<endl;
fatalError;
}
}
int pFlow::rcb1DPartitioning::getNumGeometry(void *data, int *ierr)
{
*ierr = ZOLTAN_OK;
return 1;
}
int pFlow::rcb1DPartitioning::getNumberOfPoints(void *data, int *ierr)
{
auto *obj = static_cast<pointCollection *>(data);
*ierr = ZOLTAN_OK;
return obj->numActivePoints();
}
void pFlow::rcb1DPartitioning::getPointList
(
void *data,
int sizeGID,
int sizeLID,
ZOLTAN_ID_PTR globalID,
ZOLTAN_ID_PTR localID,
int wgt_dim,
float *obj_wgts,
int *ierr
)
{
auto* obj = static_cast<pointCollection *>(data);
*ierr = ZOLTAN_OK;
auto activeRange = obj->pFlag_.activeRange();
uint32 n = 0;
for (auto i=activeRange.start(); i<activeRange.end(); i++)
{
if( obj->pFlag_.isActive(i) )
{
globalID[n] = i;
localID[n] = n;
n++;
}
}
}
void pFlow::rcb1DPartitioning::getGeometryList_x
(
void *data,
int sizeGID,
int sizeLID,
int num_obj,
ZOLTAN_ID_PTR globalID,
ZOLTAN_ID_PTR localID,
int num_dim,
double *geom_vec,
int *ierr
)
{
auto* obj = static_cast<pointCollection *>(data);
if ( (sizeGID != 1) || (sizeLID != 1) || (num_dim != 1))
{
*ierr = ZOLTAN_FATAL;
return;
}
auto activeRange = obj->pFlag_.activeRange();
uint32 n = 0;
for (auto i=activeRange.start(); i<activeRange.end(); i++)
{
if( obj->pFlag_.isActive(i) )
{
geom_vec[n] = obj->points_[i].x_;
n++;
}
}
*ierr = ZOLTAN_OK;
return;
}
void pFlow::rcb1DPartitioning::getGeometryList_y
(
void *data,
int sizeGID,
int sizeLID,
int num_obj,
ZOLTAN_ID_PTR globalID,
ZOLTAN_ID_PTR localID,
int num_dim,
double *geom_vec,
int *ierr
)
{
auto* obj = static_cast<pointCollection *>(data);
if ( (sizeGID != 1) || (sizeLID != 1) || (num_dim != 1))
{
*ierr = ZOLTAN_FATAL;
return;
}
auto activeRange = obj->pFlag_.activeRange();
uint32 n = 0;
for (auto i=activeRange.start(); i<activeRange.end(); i++)
{
if( obj->pFlag_.isActive(i) )
{
geom_vec[n] = obj->points_[i].y_;
n++;
}
}
*ierr = ZOLTAN_OK;
return;
}
void pFlow::rcb1DPartitioning::getGeometryList_z
(
void *data,
int sizeGID,
int sizeLID,
int num_obj,
ZOLTAN_ID_PTR globalID,
ZOLTAN_ID_PTR localID,
int num_dim,
double *geom_vec,
int *ierr
)
{
auto* obj = static_cast<pointCollection *>(data);
if ( (sizeGID != 1) || (sizeLID != 1) || (num_dim != 1))
{
*ierr = ZOLTAN_FATAL;
return;
}
auto activeRange = obj->pFlag_.activeRange();
uint32 n = 0;
for (auto i=activeRange.start(); i<activeRange.end(); i++)
{
if( obj->pFlag_.isActive(i) )
{
geom_vec[n] = obj->points_[i].z_;
n++;
}
}
*ierr = ZOLTAN_OK;
return;
}

View File

@ -1,240 +0,0 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __rcb1DPartitioning_hpp__
#define __rcb1DPartitioning_hpp__
#include "partitioning.hpp"
#include "procVector.hpp"
namespace pFlow
{
class rcb1DPartitioning
:
public partitioning
{
public:
enum Direction
{
X = 0,
Y = 1,
Z = 2
};
protected:
/// Direction of partitioning
Direction direction_ = Direction::X;
realx3 dirVector_ = {1.0, 0.0, 0.0};
word directionName_ = "x";
MPI::procVector<std::vector<int>> exportIds_;
bool partition(pointCollection& points) override;
public:
rcb1DPartitioning(
const dictionary& dict,
const box& globalBox);
~rcb1DPartitioning() override=default;
span<int32> exportList(int procNo)const override
{
return span<int32>(
const_cast<int32*>(exportIds_[procNo].data()),
exportIds_[procNo].size());
}
pFlow::MPI::procVector<span<int32>> allExportLists()const override
{
pFlow::MPI::procVector<span<int32>> allList(pFlowProcessors());
for(int i=0; i<allList.size(); i++)
allList[i]= exportList(i);
return allList;
}
static
int getNumGeometry(void *data, int *ierr);
static
int getNumberOfPoints(void *data, int *ierr);
static
void getPointList
(
void *data,
int sizeGID,
int sizeLID,
ZOLTAN_ID_PTR globalID,
ZOLTAN_ID_PTR localID,
int wgt_dim,
float *obj_wgts,
int *ierr
);
static
void getGeometryList_x(
void *data,
int sizeGID,
int sizeLID,
int num_obj,
ZOLTAN_ID_PTR globalID,
ZOLTAN_ID_PTR localID,
int num_dim,
double *geom_vec,
int *ierr);
static
void getGeometryList_y(
void *data,
int sizeGID,
int sizeLID,
int num_obj,
ZOLTAN_ID_PTR globalID,
ZOLTAN_ID_PTR localID,
int num_dim,
double *geom_vec,
int *ierr);
static
void getGeometryList_z(
void *data,
int sizeGID,
int sizeLID,
int num_obj,
ZOLTAN_ID_PTR globalID,
ZOLTAN_ID_PTR localID,
int num_dim,
double *geom_vec,
int *ierr);
};
/*class RCB_y_partitioning
:
public partitioning
{
public:
RCB_y_partitioning(int argc, char *argv[], pointCollection& collection, const box& gBox)
:
partitioning(argc, argv, collection, gBox)
{}
virtual
~RCB_y_partitioning()=default;
bool partition() override;
static
void getGeometryList(
void *data,
int sizeGID,
int sizeLID,
int num_obj,
ZOLTAN_ID_PTR globalID,
ZOLTAN_ID_PTR localID,
int num_dim,
double *geom_vec,
int *ierr)
{
auto* obj = static_cast<pointCollection *>(data);
if ( (sizeGID != 1) || (sizeLID != 1) || (num_dim != 1))
{
*ierr = ZOLTAN_FATAL;
return;
}
*ierr = ZOLTAN_OK;
for (int i=0; i < num_obj ; i++)
{
geom_vec[i] = obj->pointList()[i].y_;
}
return;
}
static
int getNumGeometry(void *data, int *ierr)
{
*ierr = ZOLTAN_OK;
return 1;
}
};
class RCB_x_partitioning
:
public partitioning
{
public:
RCB_x_partitioning(int argc, char *argv[], pointCollection& collection, const box& gBox)
:
partitioning(argc, argv, collection, gBox)
{}
virtual
~RCB_x_partitioning()=default;
bool partition() override;
static
void getGeometryList(
void *data,
int sizeGID,
int sizeLID,
int num_obj,
ZOLTAN_ID_PTR globalID,
ZOLTAN_ID_PTR localID,
int num_dim,
double *geom_vec,
int *ierr);
static
int getNumGeometry(void *data, int *ierr);
};*/
} // pFlow
#endif //__rcb1DPartitioning_hpp__

View File

@ -1,289 +0,0 @@
#include "processorBoundaryField.hpp"
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
template<class T, class MemorySpace>
void
pFlow::MPI::processorBoundaryField<T, MemorySpace>::checkDataRecieved() const
{
if (!dataRecieved_)
{
uint32 nRecv = neighborProcField_.waitBufferForUse();
dataRecieved_ = true;
if (nRecv != this->neighborProcSize())
{
fatalErrorInFunction<<
"number of recived data is "<< nRecv <<" and expected number is "<<
this->neighborProcSize()<< " in "<<this->name() <<endl;
fatalExit;
}
//pOutput<<"field data "<< this->name()<<" has recieved with size "<< nRecv<<endl;
}
}
template<class T, class MemorySpace>
bool
pFlow::MPI::processorBoundaryField<T, MemorySpace>::updateBoundary(
int step,
DataDirection direction
)
{
#ifndef BoundaryModel1
if(!this->boundary().performBoundaryUpdate())
return true;
#endif
if (step == 1)
{
// Isend
if (direction == DataDirection::TwoWay ||
( this->isBoundaryMaster() && direction == DataDirection::MasterToSlave) ||
(!this->isBoundaryMaster() && direction == DataDirection::SlaveToMaster))
{
thisFieldInNeighbor_.sendData(pFlowProcessors(), this->thisField(), this->name());
dataRecieved_ = false;
//pOutput<<"request for boundary update "<< this->name()<<" direction "<< (int)direction<<endl;
}
}
else if (step == 2)
{
// Irecv
if (direction == DataDirection::TwoWay ||
(!this->isBoundaryMaster() && direction == DataDirection::MasterToSlave) ||
( this->isBoundaryMaster() && direction == DataDirection::SlaveToMaster))
{
neighborProcField_.recieveData(pFlowProcessors(), this->neighborProcSize(), this->name());
dataRecieved_ = false;
//pOutput<<"request for boundary update "<< this->name()<<" direction "<< (int)direction<<endl;
}
}
else
{
fatalErrorInFunction << "Invalid step number " << step << endl;
return false;
}
return true;
}
template<class T, class MemorySpace>
pFlow::MPI::processorBoundaryField<T, MemorySpace>::processorBoundaryField(
const boundaryBase& boundary,
const pointStructure& pStruct,
InternalFieldType& internal
)
: BoundaryFieldType(boundary, pStruct, internal),
thisFieldInNeighbor_(
groupNames("sendBuffer", this->name()),
boundary.neighborProcessorNo(),
boundary.thisBoundaryIndex()
),
neighborProcField_(
groupNames("recieveBuffer", boundary.name()),
boundary.neighborProcessorNo(),
boundary.mirrorBoundaryIndex()
)
{
this->addEvent(message::BNDR_PROCTRANSFER_SEND).
addEvent(message::BNDR_PROCTRANSFER_RECIEVE).
addEvent(message::BNDR_PROCTRANSFER_WAITFILL).
addEvent(message::BNDR_PROC_SIZE_CHANGED);
}
template<class T, class MemorySpace>
typename pFlow::MPI::processorBoundaryField<T, MemorySpace>::ProcVectorType&
pFlow::MPI::processorBoundaryField<T, MemorySpace>::neighborProcField()
{
checkDataRecieved();
return neighborProcField_.buffer();
}
template<class T, class MemorySpace>
const typename pFlow::MPI::processorBoundaryField<T, MemorySpace>::
ProcVectorType&
pFlow::MPI::processorBoundaryField<T, MemorySpace>::neighborProcField() const
{
checkDataRecieved();
return neighborProcField_.buffer();
}
template<class T, class MemorySpace>
bool pFlow::MPI::processorBoundaryField<T, MemorySpace>::hearChanges(
const timeInfo & ti,
const message& msg,
const anyList& varList
)
{
if(msg.equivalentTo(message::BNDR_PROC_SIZE_CHANGED))
{
auto newProcSize = varList.getObject<uint32>(
message::eventName(message::BNDR_PROC_SIZE_CHANGED));
neighborProcField_.resize(newProcSize);
}
else if(msg.equivalentTo(message::BNDR_PROCTRANSFER_SEND))
{
const auto& indices = varList.getObject<uint32Vector_D>(
message::eventName(message::BNDR_PROCTRANSFER_SEND)
);
if constexpr( isDeviceAccessible<execution_space>())
{
FieldAccessType transferData(
indices.size(),
indices.deviceViewAll(),
this->internal().deviceViewAll()
);
thisFieldInNeighbor_.sendData(pFlowProcessors(),transferData);
}
else
{
FieldAccessType transferData(
indices.size(),
indices.hostViewAll(),
this->internal().deviceViewAll()
);
thisFieldInNeighbor_.sendData(pFlowProcessors(),transferData);
}
}
else if(msg.equivalentTo(message::BNDR_PROCTRANSFER_RECIEVE))
{
uint32 numRecieved = varList.getObject<uint32>(
message::eventName(message::BNDR_PROCTRANSFER_RECIEVE)
);
neighborProcField_.recieveData(pFlowProcessors(), numRecieved);
}
else if(msg.equivalentTo(message::BNDR_PROCTRANSFER_WAITFILL))
{
uint32 numRecieved = neighborProcField_.waitBufferForUse();
if(numRecieved == 0u)
{
return true;
}
if(msg.equivalentTo(message::RANGE_CHANGED))
{
auto newRange = varList.getObject<rangeU32>(
message::eventName(message::RANGE_CHANGED));
this->internal().field().resize(newRange.end());
}
if(msg.equivalentTo(message::ITEMS_INSERT))
{
const auto& indices = varList.getObject<uint32IndexContainer>(
message::eventName(message::ITEMS_INSERT));
this->internal().field().insertSetElement(
indices,
neighborProcField_.buffer().deviceView());
}
}
else
{
if(!BoundaryFieldType::hearChanges(ti, msg,varList) )
{
return false;
}
}
return true;
}
template <class T, class MemorySpace>
void pFlow::MPI::processorBoundaryField<T, MemorySpace>::sendBackData() const
{
neighborProcField_.sendBackData(pFlowProcessors());
dataRecieved_ = false;
}
template <class T, class MemorySpace>
void pFlow::MPI::processorBoundaryField<T, MemorySpace>::recieveBackData() const
{
thisFieldInNeighbor_.recieveBackData(pFlowProcessors(), this->size());
}
template <class T, class MemorySpace>
void pFlow::MPI::processorBoundaryField<T, MemorySpace>::addBufferToInternalField()const
{
using RPolicy = Kokkos::RangePolicy<
execution_space,
Kokkos::Schedule<Kokkos::Static>,
Kokkos::IndexType<pFlow::uint32>>;
//pOutput<<"waiting for buffer to be recived in addBufferToInternalField "<<this->name()<<endl;
thisFieldInNeighbor_.waitBufferForUse();
const auto& buffView = thisFieldInNeighbor_.buffer().deviceViewAll();
const auto& field = this->internal().deviceViewAll();
if constexpr( isDeviceAccessible<execution_space> )
{
const auto& indices = this->indexList().deviceViewAll();
Kokkos::parallel_for(
"recieveBackData::"+this->name(),
RPolicy(0,this->size()),
LAMBDA_HD(uint32 i)
{
field[indices[i]] += buffView[i];
}
);
Kokkos::fence();
}
else
{
const auto& indices = this->boundary().indexListHost().deviceViewAll();
Kokkos::parallel_for(
"recieveBackData::"+this->name(),
RPolicy(0,this->size()),
LAMBDA_HD(uint32 i)
{
field[indices[i]] += buffView[i];
}
);
Kokkos::fence();
}
}
template <class T, class MemorySpace>
void pFlow::MPI::processorBoundaryField<T, MemorySpace>::updateBoundaryToMaster()const
{
if (!this->isBoundaryMaster() )
{
thisFieldInNeighbor_.sendData(pFlowProcessors(), this->thisField(), this->name());
dataRecieved_ = false;
}
}
template <class T, class MemorySpace>
void pFlow::MPI::processorBoundaryField<T, MemorySpace>::updateBoundaryFromSlave()const
{
if( this->isBoundaryMaster() )
{
neighborProcField_.recieveData(pFlowProcessors(), this->neighborProcSize(), this->name());
dataRecieved_ = false;
}
}

View File

@ -1,115 +0,0 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __processorBoundaryField_hpp__
#define __processorBoundaryField_hpp__
#include "boundaryField.hpp"
#include "dataSender.hpp"
#include "dataReciever.hpp"
#include "boundaryProcessor.hpp"
namespace pFlow::MPI
{
template< class T, class MemorySpace = void>
class processorBoundaryField
:
public boundaryField<T, MemorySpace>
{
public:
using processorBoundaryFieldType = processorBoundaryField<T, MemorySpace>;
using BoundaryFieldType = boundaryField<T, MemorySpace>;
using InternalFieldType = typename BoundaryFieldType::InternalFieldType;
using memory_space = typename BoundaryFieldType::memory_space;
using execution_space = typename BoundaryFieldType::execution_space;
using FieldAccessType = typename BoundaryFieldType::FieldAccessType;
using ProcVectorType = typename BoundaryFieldType::ProcVectorType;
private:
mutable dataSender<T, MemorySpace> thisFieldInNeighbor_;
mutable dataReciever<T, MemorySpace> neighborProcField_;
mutable bool dataRecieved_ = true;
void checkDataRecieved()const;
bool updateBoundary(int step, DataDirection direction);
public:
TypeInfoTemplate211("boundaryField","processor", T, memory_space::name());
processorBoundaryField(
const boundaryBase& boundary,
const pointStructure& pStruct,
InternalFieldType& internal);
~processorBoundaryField()override = default;
add_vCtor
(
BoundaryFieldType,
processorBoundaryFieldType,
boundaryBase
);
ProcVectorType& neighborProcField() override;
const ProcVectorType& neighborProcField()const override;
void fill(const T& val)override
{
neighborProcField_.fill(val);
}
bool hearChanges(
const timeInfo & ti,
const message& msg,
const anyList& varList
) override;
void sendBackData()const;
void recieveBackData()const;
void addBufferToInternalField()const;
void updateBoundaryToMaster()const;
void updateBoundaryFromSlave()const;
};
}
#include "processorBoundaryField.cpp"
#endif //__processorBoundaryField_hpp__

View File

@ -1,24 +0,0 @@
//#include "Field.hpp"
#include "processorBoundaryField.hpp"
template class pFlow::MPI::processorBoundaryField<pFlow::uint8>;
template class pFlow::MPI::processorBoundaryField<pFlow::uint8, pFlow::HostSpace>;
template class pFlow::MPI::processorBoundaryField<pFlow::uint32>;
template class pFlow::MPI::processorBoundaryField<pFlow::uint32, pFlow::HostSpace>;
template class pFlow::MPI::processorBoundaryField<pFlow::uint64>;
template class pFlow::MPI::processorBoundaryField<pFlow::uint64, pFlow::HostSpace>;
template class pFlow::MPI::processorBoundaryField<pFlow::real>;
template class pFlow::MPI::processorBoundaryField<pFlow::real, pFlow::HostSpace>;
template class pFlow::MPI::processorBoundaryField<pFlow::realx3>;
template class pFlow::MPI::processorBoundaryField<pFlow::realx3, pFlow::HostSpace>;
template class pFlow::MPI::processorBoundaryField<pFlow::realx4>;
template class pFlow::MPI::processorBoundaryField<pFlow::realx4, pFlow::HostSpace>;

View File

@ -1,433 +0,0 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#include "boundaryProcessor.hpp"
#include "boundaryProcessorKernels.hpp"
#include "dictionary.hpp"
#include "mpiCommunication.hpp"
#include "boundaryBaseKernels.hpp"
#include "internalPoints.hpp"
#include "Time.hpp"
#include "anyList.hpp"
void
pFlow::MPI::boundaryProcessor::checkDataRecieved() const
{
if (!dataRecieved_)
{
uint32 nRecv = neighborProcPoints_.waitBufferForUse();
dataRecieved_ = true;
if (nRecv != neighborProcSize())
{
fatalErrorInFunction<<"In boundary "<<this->name()<<
" ,number of recieved data is "<< nRecv<<
" and neighborProcSize is "<<neighborProcSize()<<endl;
fatalExit;
}
}
}
pFlow::MPI::boundaryProcessor::boundaryProcessor(
const dictionary& dict,
const plane& bplane,
internalPoints& internal,
boundaryList& bndrs,
uint32 thisIndex
)
: boundaryBase(dict, bplane, internal, bndrs, thisIndex),
thisPointsInNeighbor_(
groupNames("sendBuffer", name()),
neighborProcessorNo(),
thisBoundaryIndex()
),
neighborProcPoints_(
groupNames("neighborProcPoints", name()),
neighborProcessorNo(),
mirrorBoundaryIndex()
)
{
}
bool
pFlow::MPI::boundaryProcessor::beforeIteration(
uint32 step,
const timeInfo& ti,
bool updateIter,
bool iterBeforeUpdate ,
bool& callAgain
)
{
if(step == 1)
{
boundaryBase::beforeIteration(step, ti, updateIter, iterBeforeUpdate, callAgain);
callAgain = true;
}
else if(step == 2 )
{
#ifdef BoundaryModel1
callAgain = true;
#else
if(!performBoundaryUpdate())
{
callAgain = false;
return true;
}
#endif
thisNumPoints_ = size();
MPI_Isend(
&thisNumPoints_,
1,
MPI_UNSIGNED,
neighborProcessorNo(),
thisBoundaryIndex(),
pFlowProcessors().localCommunicator(),
&numPointsRequest0_);
MPI_Irecv(
&neighborProcNumPoints_,
1,
MPI_UNSIGNED,
neighborProcessorNo(),
mirrorBoundaryIndex(),
pFlowProcessors().localCommunicator(),
&numPointsRequest_
);
}
else if(step == 3 )
{
callAgain = true;
if(numPointsRequest_ != RequestNull)
{
MPI_Wait(&numPointsRequest_, MPI_STATUS_IGNORE);
if(numPointsRequest0_!= RequestNull)
{
MPI_Wait(&numPointsRequest0_, MPI_STATUS_IGNORE);
}
}
// Size has not been changed. Notification is not required.
if(neighborProcNumPoints_ == neighborProcPoints_.size()) return true;
anyList varList;
message msg;
varList.emplaceBack(msg.addAndName(message::BNDR_PROC_SIZE_CHANGED), neighborProcNumPoints_);
if( !notify(ti, msg, varList) )
{
fatalErrorInFunction;
callAgain = false;
return false;
}
}
else if(step == 4)
{
dataRecieved_ = false;
if ( !isBoundaryMaster())
{
thisPointsInNeighbor_.sendData(pFlowProcessors(), thisPoints(),"positions");
}
else if (isBoundaryMaster())
{
neighborProcPoints_.recieveData(pFlowProcessors(), neighborProcSize(), "positions");
}
callAgain = false;
}
return true;
}
pFlow::uint32
pFlow::MPI::boundaryProcessor::neighborProcSize() const
{
return neighborProcNumPoints_;
}
pFlow::realx3Vector_D&
pFlow::MPI::boundaryProcessor::neighborProcPoints()
{
checkDataRecieved();
return neighborProcPoints_.buffer();
}
const pFlow::realx3Vector_D&
pFlow::MPI::boundaryProcessor::neighborProcPoints() const
{
checkDataRecieved();
return neighborProcPoints_.buffer();
}
bool
pFlow::MPI::boundaryProcessor::updataBoundaryData(int step)
{
return true;
}
bool pFlow::MPI::boundaryProcessor::transferData(
uint32 iter,
int step,
bool& callAgain
)
{
if( !iterBeforeBoundaryUpdate() )
{
callAgain = false;
return true;
}
if(step == 1)
{
uint32Vector_D transferFlags("transferFlags"+this->name());
numToTransfer_ = markInNegativeSide(
"transferData::markToTransfer"+this->name(),
transferFlags);
uint32Vector_D keepIndices("keepIndices");
if(numToTransfer_ != 0u)
{
pFlow::boundaryBaseKernels::createRemoveKeepIndices
(
indexList(),
numToTransfer_,
transferFlags,
transferIndices_,
keepIndices,
false
);
// delete transfer point from this processor
if( !setRemoveKeepIndices(transferIndices_, keepIndices))
{
fatalErrorInFunction<<
"error in setting transfer and keep points in boundary "<< name()<<endl;
return false;
}
}
else
{
transferIndices_.clear();
}
CheckMPI( Isend(
numToTransfer_,
neighborProcessorNo(),
thisBoundaryIndex(),
pFlowProcessors().localCommunicator(),
&numTransferRequest_), true );
CheckMPI(Irecv(
numToRecieve_,
neighborProcessorNo(),
mirrorBoundaryIndex(),
pFlowProcessors().localCommunicator(),
&numRecieveRequest_), true);
callAgain = true;
return true;
}
else if(step ==2) // to transferData to neighbor
{
if(numTransferRequest_!= RequestNull)
{
Wait(&numTransferRequest_, StatusIgnore);
}
if( numToTransfer_ == 0u)
{
callAgain = true;
return true;
}
pointFieldAccessType transferPoints(
transferIndices_.size(),
transferIndices_.deviceViewAll(),
internal().pointPositionDevice()
);
// this buffer is used temporarily
thisPointsInNeighbor_.sendData(pFlowProcessors(), transferPoints);
message msg;
anyList varList;
varList.emplaceBack(
msg.addAndName(message::BNDR_PROCTRANSFER_SEND),
transferIndices_);
const auto ti = internal().time().TimeInfo();
if(!notify(ti, msg, varList)
)
{
fatalErrorInFunction;
callAgain = false;
return false;
}
callAgain = true;
return true;
}
else if(step == 3) // to recieve data
{
if(numRecieveRequest_ != RequestNull)
{
Wait(&numRecieveRequest_, StatusIgnore);
}
if(numToRecieve_ == 0u)
{
callAgain = false;
return true;
}
// this buffer is being used temporarily
neighborProcPoints_.recieveData(pFlowProcessors(), numToRecieve_);
message msg;
anyList varList;
varList.emplaceBack(
msg.addAndName(message::BNDR_PROCTRANSFER_RECIEVE),
numToRecieve_);
const auto ti = internal().time().TimeInfo();
if(!notify( ti, msg, varList))
{
fatalErrorInFunction;
callAgain = false;
return false;
}
callAgain = true;
return true;
}
else if(step == 4) // to insert data
{
if(numToRecieve_ == 0u)
{
callAgain = false;
return true;
}
// points should be inserted first
message msg(message::BNDR_PROCTRANSFER_WAITFILL);
anyList varList;
neighborProcPoints_.waitBufferForUse();
internal().insertPointsOnly(neighborProcPoints_.buffer(), msg, varList);
const auto& indices = varList.getObject<uint32IndexContainer>(message::eventName(message::ITEMS_INSERT));
// creates a view (does not copy data)
auto indView = deviceViewType1D<uint32>(indices.deviceView().data(), indices.deviceView().size());
uint32Vector_D newIndices("newIndices", indView);
if(! appendNewIndices(newIndices))
{
fatalErrorInFunction;
callAgain = false;
return false;
}
const auto& ti = internal().time().TimeInfo();
if(!notify(ti, msg, varList))
{
fatalErrorInFunction;
callAgain = false;
return false;
}
callAgain = false;
return true;
}
return true;
}
bool
pFlow::MPI::boundaryProcessor::iterate(const timeInfo& ti)
{
return true;
}
bool
pFlow::MPI::boundaryProcessor::afterIteration(const timeInfo& ti)
{
uint32 s = size();
pOutput<<"size of boundary is "<< s <<endl;
uint32Vector_D transferFlags("transferFlags",s+1, s+1, RESERVE());
transferFlags.fill(0u);
const auto& transferD = transferFlags.deviceViewAll();
auto points = thisPoints();
auto p = boundaryPlane().infPlane();
uint32 numTransfer = 0;
Kokkos::parallel_reduce
(
"boundaryProcessor::afterIteration",
deviceRPolicyStatic(0,s),
LAMBDA_HD(uint32 i, uint32& transferToUpdate)
{
if(p.pointInNegativeSide(points(i)))
{
transferD(i)=1;
transferToUpdate++;
}
},
numTransfer
);
pOutput<<"Numebr to be transfered "<< numTransfer<<endl;
uint32Vector_D transferIndices("transferIndices");
uint32Vector_D keepIndices("keepIndices");
pFlow::boundaryBaseKernels::createRemoveKeepIndices
(
indexList(),
numTransfer,
transferFlags,
transferIndices,
keepIndices
);
// delete transfer point from this processor
if( !setRemoveKeepIndices(transferIndices, keepIndices))
{
fatalErrorInFunction<<
"error in setting transfer and keep points in boundary "<< name()<<endl;
return false;
}
return true;
}

View File

@ -1,137 +0,0 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __boundaryProcessor_hpp__
#define __boundaryProcessor_hpp__
#include "boundaryBase.hpp"
#include "timeInfo.hpp"
#include "mpiTypes.hpp"
#include "dataSender.hpp"
#include "dataReciever.hpp"
#include "boundaryConfigs.hpp"
namespace pFlow::MPI
{
class boundaryProcessor
: public boundaryBase
{
public:
using pointFieldAccessType = typename boundaryBase::pointFieldAccessType;
private:
uint32 neighborProcNumPoints_ = 0;
uint32 thisNumPoints_ = 0;
Request numPointsRequest_ = RequestNull;
Request numPointsRequest0_ = RequestNull;
dataSender<realx3> thisPointsInNeighbor_;
dataReciever<realx3> neighborProcPoints_;
mutable bool dataRecieved_ = true;
uint32 numToTransfer_ = 0;
uint32 numToRecieve_ = 0;
uint32Vector_D transferIndices_{"transferIndices"};
Request numTransferRequest_ = RequestNull;
Request numRecieveRequest_ = RequestNull;
void checkDataRecieved() const;
/// @brief Update processor boundary data for this processor
/// @param step It is either 1 or 2 in the input to indicate
/// the update step
/// @return true if successful
/// @details This method is called by boundaryList two times to
/// allow processor boundaries to exchange data in two steps.
/// The first step is a buffered non-blocking send and the second
/// step is non-blocking recieve to get data.
bool updataBoundaryData(int step) override;
bool transferData(uint32 iter, int step, bool& callAgain) override;
public:
TypeInfo("boundary<processor>");
boundaryProcessor(
const dictionary &dict,
const plane &bplane,
internalPoints &internal,
boundaryList &bndrs,
uint32 thisIndex);
~boundaryProcessor() override = default;
add_vCtor(
boundaryBase,
boundaryProcessor,
dictionary);
bool beforeIteration(
uint32 step,
const timeInfo& ti,
bool updateIter,
bool iterBeforeUpdate ,
bool& callAgain
) override;
bool iterate(const timeInfo& ti) override;
bool afterIteration(const timeInfo& ti) override;
/// @brief Return number of points in the neighbor processor boundary.
/// This is overriden from boundaryBase.
uint32 neighborProcSize() const override;
/// @brief Return a reference to point positions in the neighbor
/// processor boundary.
realx3Vector_D &neighborProcPoints() override;
/// @brief Return a const reference to point positions in the
/// neighbor processor boundary.
const realx3Vector_D &neighborProcPoints() const override;
uint32 numToTransfer()const override
{
return numToTransfer_;
}
uint32 numToRecieve()const override
{
return numToRecieve_;
}
};
} // namespace pFlow::MPI
#endif //__boundaryProcessor_hpp__

View File

@ -1,56 +0,0 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#include "phasicFlowKokkos.hpp"
#include "infinitePlane.hpp"
#include "scatteredFieldAccess.hpp"
namespace pFlow::boundaryProcessorKernels
{
struct markNegative
{
markNegative(const infinitePlane& pl,
const deviceViewType1D<uint32>& f,
const deviceScatteredFieldAccess<realx3>& p
)
:
plane_(pl),
flags_(f),
points_(p)
{}
infinitePlane plane_;
deviceViewType1D<uint32> flags_;
deviceScatteredFieldAccess<realx3> points_;
INLINE_FUNCTION_HD
void operator()(uint32 i, uint32& transferToUpdate)const
{
if(plane_.pointInNegativeSide(points_(i)))
{
flags_(i)=1;
transferToUpdate++;
}
}
};
}

View File

@ -1,135 +0,0 @@
#ifndef __dataReciever_hpp__
#define __dataReciever_hpp__
#include "span.hpp"
#include "localProcessors.hpp"
#include "mpiCommunication.hpp"
namespace pFlow::MPI
{
template<typename T, typename MemorySpace=void>
class dataReciever
{
public:
using BufferVectorType = VectorSingle<T, MemorySpace>;
using BufferVectorTypeHost = VectorSingle<T, HostSpace>;
using memory_space = typename BufferVectorType::memory_space;
using execution_space = typename BufferVectorType::execution_space;
private:
BufferVectorType buffer_;
int fromProc_;
int tag_;
mutable Request recvRequest_ = RequestNull;
public:
dataReciever(const word& name, int from, int tag)
:
buffer_(name),
fromProc_(from),
tag_(tag)
{}
~dataReciever()=default;
uint32 waitBufferForUse()const
{
if(recvRequest_ != RequestNull)
{
Status status;
MPI_Wait(&recvRequest_, &status);
int count;
CheckMPI(getCount<T>(&status, count), true);
return static_cast<uint32>(count);
}
else
return buffer_.size();
}
void sendBackData(
const localProcessors& processors)const
{
CheckMPI(
Isend(
buffer_.getSpan(),
fromProc_,
tag_,
processors.localCommunicator(),
&recvRequest_
),
true
);
}
void recieveData(
const localProcessors& processors,
uint32 numToRecv,
const word& name = "dataReciver"
)
{
resize(numToRecv);
CheckMPI(
Irecv(
buffer_.getSpan(),
fromProc_,
tag_,
processors.localCommunicator(),
&recvRequest_
),
true
);
}
inline
auto& buffer()
{
return buffer_;
}
inline
const auto& buffer()const
{
return buffer_;
}
inline
void fill(const T& val)
{
waitBufferForUse();
buffer_.fill(val);
}
inline
uint32 size()const
{
return buffer_.size();
}
inline
void resize(uint32 newSize)
{
waitBufferForUse();
buffer_.clear();
buffer_.resize(newSize);
}
};
}
#endif //__dataReciever_hpp__

View File

@ -1,202 +0,0 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __dataSender_hpp__
#define __dataSender_hpp__
#include "VectorSingles.hpp"
#include "localProcessors.hpp"
#include "mpiCommunication.hpp"
namespace pFlow::MPI
{
template<typename T, typename MemorySpace=void>
class dataSender
{
public:
using BufferVectorType = VectorSingle<T, MemorySpace>;
using BufferVectorTypeHost = VectorSingle<T, HostSpace>;
using memory_space = typename BufferVectorType::memory_space;
using execution_space = typename BufferVectorType::execution_space;
// This is device vector
private:
mutable BufferVectorType buffer_;
int toProc_;
int tag_;
mutable Request sendRequest_ = RequestNull;
public:
dataSender(const word& name, int toProc, int tag)
:
toProc_(toProc),
tag_(tag)
{}
~dataSender()
{
if(sendRequest_ != RequestNull)
{
MPI_Request_free(&sendRequest_);
}
}
bool waitBufferForUse()const
{
if(sendRequest_ != RequestNull)
{
MPI_Wait(&sendRequest_, StatusesIgnore);
}
return true;
}
void sendData(
const localProcessors& processors,
const scatteredFieldAccess<T, memory_space>& scatterField,
const word& name = "dataSender::sendData"
)
{
using RPolicy = Kokkos::RangePolicy<
execution_space,
Kokkos::Schedule<Kokkos::Static>,
Kokkos::IndexType<pFlow::uint32>>;
uint32 n = scatterField.size();
// make sure the buffer is ready to be used and free
// the previous request (if any).
waitBufferForUse();
// clear the buffer to prevent data copy if capacity increases
buffer_.clear();
buffer_.resize(n);
const auto& buffView = buffer_.deviceViewAll();
Kokkos::parallel_for(
"packDataForSend::"+name,
RPolicy(0,n),
LAMBDA_HD(uint32 i)
{
buffView[i] = scatterField[i];
}
);
Kokkos::fence();
CheckMPI(
Isend(buffer_.getSpan(),
toProc_,
tag_,
processors.localCommunicator(),
&sendRequest_
),
true
);
}
bool recieveBackData(
const localProcessors& processors,
uint32 numToRecieve
)const
{
// make sure the buffer is ready to be used and free
// the previous request (if any).
waitBufferForUse();
// clear the buffer to prevent data copy if capacity increases
buffer_.clear();
buffer_.resize(numToRecieve);
CheckMPI(
Irecv(
buffer_.getSpan(),
toProc_,
tag_,
processors.localCommunicator(),
&sendRequest_
),
true
);
return true;
}
auto& buffer()
{
return buffer_;
}
const auto& buffer()const
{
return buffer_;
}
inline
void fill(const T& val)
{
waitBufferForUse();
buffer_.fill(val);
}
uint32 size()const
{
return buffer_.size();
}
bool sendComplete()
{
int test;
if(sendRequest_ != RequestNull)
{
MPI_Test(&sendRequest_, &test, StatusIgnore);
return test;
}
else
{
return true;
}
}
inline
void resize(uint32 newSize)
{
waitBufferForUse();
buffer_.clear();
buffer_.resize(newSize);
}
};
}
#endif //__dataSender_hpp__

View File

@ -114,8 +114,8 @@ public:
return true;
}
fatalErrorInFunction<<"Event "<< msg.eventNames()<<" with code "<< msg <<
" is not handled in boundaryField "<< name()<<endl;
fatalErrorInFunction<<"Event"<< msg.eventNames()<<"with code "<< msg <<
" is not handled in boundaryField."<<endl;
return false;
}

View File

@ -223,7 +223,7 @@ bool pFlow::internalField<T, MemorySpace>:: hearChanges
else
{
fatalErrorInFunction<<"hear changes in internal field is not processing "<<
msg.eventNames()<<
message::eventName(message::RANGE_CHANGED)<<
" event with message code "<< msg<<endl;
return false;
}

View File

@ -55,7 +55,7 @@ private:
mutable T* data_ = nullptr;
IndexType size_ = 0;
index size_ = 0;
public:
@ -104,7 +104,7 @@ public:
/// Returns the number of elements in the span
INLINE_FUNCTION_HD
IndexType size() const
index size() const
{
return size_;
}
@ -139,14 +139,14 @@ public:
/// Returns a reference to the element at the specified index
INLINE_FUNCTION_HD
T& operator[](IndexType i)
T& operator[](index i)
{
return data_[i];
}
/// Returns a const reference to the element at the specified index
INLINE_FUNCTION_HD
T& operator[](IndexType i)const
T& operator[](index i)const
{
return data_[i];
}

View File

@ -23,21 +23,21 @@ Licence:
void pFlow::baseTimeControl::setTimeControl
(
TimeValueType startTime,
TimeValueType endTime,
TimeValueType interval,
timeValue startTime,
timeValue endTime,
timeValue interval,
const word &intervalPrefix
)
{
isTimeStep_ = false;
intervalPrefix_ = intervalPrefix;
rRange_ = stridedRange<TimeValueType>(startTime, endTime, interval);
rRange_ = stridedRange<timeValue>(startTime, endTime, interval);
}
pFlow::baseTimeControl::baseTimeControl(
const dictionary &dict,
const word &intervalPrefix,
TimeValueType defStartTime)
timeValue defStartTime)
: intervalPrefix_(intervalPrefix)
{
auto tControl = dict.getVal<word>("timeControl");
@ -59,10 +59,10 @@ pFlow::baseTimeControl::baseTimeControl(
if(!isTimeStep_)
{
auto startTime = (dict.getValOrSet<TimeValueType>("startTime", defStartTime));
auto endTime = (dict.getValOrSet<TimeValueType>("endTime", largeValue));
auto interval = dict.getVal<TimeValueType>(intervalWord);
rRange_ = stridedRange<TimeValueType>(startTime, endTime, interval);
auto startTime = (dict.getValOrSet<timeValue>("startTime", defStartTime));
auto endTime = (dict.getValOrSet<timeValue>("endTime", largeValue));
auto interval = dict.getVal<timeValue>(intervalWord);
rRange_ = stridedRange<timeValue>(startTime, endTime, interval);
}
else
@ -78,9 +78,9 @@ pFlow::baseTimeControl::baseTimeControl(
pFlow::baseTimeControl::baseTimeControl
(
const dictionary& dict,
const TimeValueType defInterval,
const timeValue defInterval,
const word& intervalPrefix,
const TimeValueType defStartTime
const timeValue defStartTime
)
:
intervalPrefix_(intervalPrefix)
@ -104,10 +104,10 @@ pFlow::baseTimeControl::baseTimeControl
if(!isTimeStep_)
{
auto startTime = (dict.getValOrSet<TimeValueType>("startTime", defStartTime));
auto endTime = (dict.getValOrSet<TimeValueType>("endTime", largeValue));
auto interval = dict.getValOrSet<TimeValueType>(intervalWord, defInterval);
rRange_ = stridedRange<TimeValueType>(startTime, endTime, interval);
auto startTime = (dict.getValOrSet<timeValue>("startTime", defStartTime));
auto endTime = (dict.getValOrSet<timeValue>("endTime", largeValue));
auto interval = dict.getValOrSet<timeValue>(intervalWord, defInterval);
rRange_ = stridedRange<timeValue>(startTime, endTime, interval);
}
else
@ -131,9 +131,9 @@ pFlow::baseTimeControl::baseTimeControl(int32 start, int32 end, int32 stride, co
pFlow::baseTimeControl::baseTimeControl
(
TimeValueType start,
TimeValueType end,
TimeValueType stride,
timeValue start,
timeValue end,
timeValue stride,
const word &intervalPrefix
)
:
@ -147,7 +147,7 @@ pFlow::baseTimeControl::baseTimeControl
{
}
bool pFlow::baseTimeControl::eventTime(uint32 iter, TimeValueType t, TimeValueType dt) const
bool pFlow::baseTimeControl::eventTime(uint32 iter, timeValue t, timeValue dt) const
{
if(isTimeStep_)
{
@ -166,7 +166,7 @@ bool pFlow::baseTimeControl::eventTime(const timeInfo &ti) const
}
bool
pFlow::baseTimeControl::isInRange(uint32 iter, TimeValueType t, TimeValueType dt) const
pFlow::baseTimeControl::isInRange(uint32 iter, timeValue t, timeValue dt) const
{
if(isTimeStep_)
{
@ -178,7 +178,7 @@ pFlow::baseTimeControl::isInRange(uint32 iter, TimeValueType t, TimeValueType dt
}
}
pFlow::TimeValueType
pFlow::timeValue
pFlow::baseTimeControl::startTime() const
{
if(!isTimeStep_)
@ -191,7 +191,7 @@ pFlow::baseTimeControl::startTime() const
return 0;
}
pFlow::TimeValueType
pFlow::timeValue
pFlow::baseTimeControl::endTime() const
{
if(!isTimeStep_)
@ -204,7 +204,7 @@ pFlow::baseTimeControl::endTime() const
return 0;
}
pFlow::TimeValueType
pFlow::timeValue
pFlow::baseTimeControl::rInterval() const
{
if(!isTimeStep_)

View File

@ -36,16 +36,16 @@ private:
int32StridedRagne iRange_;
stridedRange<TimeValueType> rRange_;
stridedRange<timeValue> rRange_;
word intervalPrefix_;
protected:
void setTimeControl(
TimeValueType startTime,
TimeValueType endTime,
TimeValueType interval,
timeValue startTime,
timeValue endTime,
timeValue interval,
const word& intervalPrefix);
public:
@ -53,14 +53,14 @@ public:
baseTimeControl(
const dictionary& dict,
const word& intervalPrefix = "",
TimeValueType defStartTime = 0.0
timeValue defStartTime = 0.0
);
baseTimeControl(
const dictionary& dict,
const TimeValueType defInterval,
const timeValue defInterval,
const word& intervalPrefix="",
const TimeValueType defStartTime=0.0);
const timeValue defStartTime=0.0);
baseTimeControl(
int32 start,
@ -70,9 +70,9 @@ public:
);
baseTimeControl(
TimeValueType start,
TimeValueType end,
TimeValueType stride,
timeValue start,
timeValue end,
timeValue stride,
const word& intervalPrefix = ""
);
@ -99,17 +99,17 @@ public:
return isTimeStep_;
}
bool eventTime(uint32 iter, TimeValueType t, TimeValueType dt) const;
bool eventTime(uint32 iter, timeValue t, timeValue dt) const;
bool eventTime(const timeInfo& ti)const;
bool isInRange(uint32 iter, TimeValueType t, TimeValueType dt) const;
bool isInRange(uint32 iter, timeValue t, timeValue dt) const;
TimeValueType startTime() const;
timeValue startTime() const;
TimeValueType endTime() const;
timeValue endTime() const;
TimeValueType rInterval() const;
timeValue rInterval() const;
int32 startIter() const;

View File

@ -36,16 +36,16 @@ pFlow::timeControl::timeControl
ti_(dict),
startTime_
(
dict.getVal<TimeValueType>("startTime")
dict.getVal<timeValue>("startTime")
),
endTime_
(
dict.getVal<TimeValueType>("endTime")
dict.getVal<timeValue>("endTime")
),
stopAt_(endTime_),
saveInterval_
(
dict.getVal<TimeValueType>("saveInterval")
dict.getVal<timeValue>("saveInterval")
),
lastSaved_(startTime_),
performSorting_
@ -65,9 +65,9 @@ pFlow::timeControl::timeControl
pFlow::timeControl::timeControl(
dictionary& dict,
TimeValueType startTime,
TimeValueType endTime,
TimeValueType saveInterval,
timeValue startTime,
timeValue endTime,
timeValue saveInterval,
word startTimeName)
:
ti_(startTime, dict),
@ -95,9 +95,9 @@ pFlow::timeControl::timeControl(
checkForOutputToFile();
}
pFlow::TimeValueType pFlow::timeControl::setTime(TimeValueType t)
pFlow::timeValue pFlow::timeControl::setTime(timeValue t)
{
TimeValueType tmp = ti_.currentTime();
timeValue tmp = ti_.currentTime();
ti_.currentTime_ = t;
lastSaved_ = t;
checkForOutputToFile();

View File

@ -37,7 +37,7 @@ class dictionary;
class timeControl
{
public:
using timeStridedRange = stridedRange<TimeValueType>;
using timeStridedRange = stridedRange<timeValue>;
private:
@ -47,19 +47,19 @@ private:
timeInfo ti_;
// - start time of simulation
TimeValueType startTime_;
timeValue startTime_;
// - end time of simulation
TimeValueType endTime_;
timeValue endTime_;
// - stopAt
TimeValueType stopAt_;
timeValue stopAt_;
// - time interval for time folder output
TimeValueType saveInterval_;
timeValue saveInterval_;
// - the last time folder that was saved
TimeValueType lastSaved_;
timeValue lastSaved_;
bool managedExternaly_ = false;
@ -80,7 +80,7 @@ private:
inline word timeName_ = "wrongSettings"; // for managedExternamly
static
inline TimeValueType writeTime_ = 0; // for managedExternamly
inline timeValue writeTime_ = 0; // for managedExternamly
void checkForOutputToFile();
@ -92,22 +92,22 @@ public:
timeControl(
dictionary& dict,
TimeValueType startTime,
TimeValueType endTime,
TimeValueType saveInterval,
timeValue startTime,
timeValue endTime,
timeValue saveInterval,
word startTimeName);
virtual ~timeControl() = default;
TimeValueType dt()const
timeValue dt()const
{
return ti_.dt();
}
TimeValueType setTime(TimeValueType t);
timeValue setTime(timeValue t);
void setStopAt(TimeValueType sT)
void setStopAt(timeValue sT)
{
if(managedExternaly_)
{
@ -115,24 +115,24 @@ public:
}
}
TimeValueType startTime()const
timeValue startTime()const
{
return startTime_;
}
TimeValueType endTime()const
timeValue endTime()const
{
return endTime_;
}
TimeValueType saveInterval()const
timeValue saveInterval()const
{
return saveInterval_;
}
word timeName()const;
TimeValueType currentTime() const
timeValue currentTime() const
{
return ti_.currentTime();
}

View File

@ -36,16 +36,16 @@ private:
uint32 currentIter_;
// - current time of simulation
TimeValueType currentTime_;
timeValue currentTime_;
// - integration time step
TimeValueType dt_;
timeValue dt_;
inline static uint32 presicion_ = 5;
public:
timeInfo(uint32 cIter, TimeValueType cTime, TimeValueType dt)
timeInfo(uint32 cIter, timeValue cTime, timeValue dt)
: currentIter_(cIter),
currentTime_(cTime),
dt_(dt)
@ -55,31 +55,31 @@ public:
timeInfo(const dictionary& dict)
:
currentIter_(0),
currentTime_(dict.getVal<TimeValueType>("startTime")),
dt_( dict.getVal<TimeValueType>("dt"))
currentTime_(dict.getVal<timeValue>("startTime")),
dt_( dict.getVal<timeValue>("dt"))
{
presicion_ = dict.getValOrSet<uint32>("timePrecision",5);
}
timeInfo(TimeValueType currentTime, const dictionary& dict)
timeInfo(timeValue currentTime, const dictionary& dict)
:
currentIter_(0),
currentTime_(currentTime),
dt_( dict.getVal<TimeValueType>("dt"))
dt_( dict.getVal<timeValue>("dt"))
{
presicion_ = dict.getValOrSet<int32>("timePrecision",5);
}
inline const TimeValueType& currentTime()const
inline const timeValue& currentTime()const
{
return currentTime_;
}
inline const TimeValueType& t() const
inline const timeValue& t() const
{
return currentTime_;
}
inline const TimeValueType& dt() const
inline const timeValue& dt() const
{
return dt_;
}
@ -109,7 +109,7 @@ public:
inline
word prevTimeName()const
{
return real2FixedStripZeros( max(currentTime_-dt_, TimeValueType(0)), presicion_);
return real2FixedStripZeros( max(currentTime_-dt_, timeValue(0)), presicion_);
}
static

View File

@ -1,55 +0,0 @@
/* -------------------------------*- C++ -*--------------------------------- *\
| phasicFlow File |
| copyright: www.cemf.ir |
\* ------------------------------------------------------------------------- */
objectName settingsDict;
objectType dictionary;
fileFormat ASCII;
/*---------------------------------------------------------------------------*/
// list of libraries that should be loaded during runtime;
// for example, "libPostprocessData.so" (optional)
libs ();
// Auxiliary function to be run during simulation (optional)
auxFunctions postprocessData;
// time step for integration in seconds, (mandatory)
dt 0.00001;
// start time for simulation, can be any valid value. Simulation starts from
// time folder with the same name
startTime 0;
// end time for simulation (mandatory)
endTime 10;
// time interval between each data save on disk, (mandatory)
saveInterval 0.01;
// maximum number of digits for time folder, (opetional, default is 5)
timePrecision 5;
// gravitational acceleration vector (m/s2), (mandatory)
g (0 -9.8 0); // gravity vector (m/s2)
// include a list of objects that are not normally in the save list on disk, (optional)
includeObjects (diameter);
// exculde a list of objects from saving on the disk, (optional)
excludeObjects ();
// integration method for position and velocity, (mandatory)
integrationMethod AdamsBashforth2;
// if keep the integeration history under timeFolder/integeration or not, (optional, default is no)
integrationHistory Yes;
// data writting format (ascii or binary), (mandatory)
writeFormat ascii;
// report timers (Yes or No)?, (optional, default is Yes)
timersReport Yes;
// time interval between each timer report, (optional, default is 0.04)
timersReportInterval 0.01; // time interval for reporting timers

View File

@ -27,11 +27,11 @@ Licence:
namespace pFlow
{
Map<TimeValueType, fileSystem> getTimeFolders(const fileSystem& path);
Map<timeValue, fileSystem> getTimeFolders(const fileSystem& path);
class timeFolder
{
using timeList = Map<TimeValueType, fileSystem>;
using timeList = Map<timeValue, fileSystem>;
protected:
@ -61,14 +61,14 @@ public:
}
inline
TimeValueType currentTime()const
timeValue currentTime()const
{
if(folders_.empty()) return -1;
return currentFolder_->first;
}
inline
TimeValueType nextTime()const
timeValue nextTime()const
{
auto next = currentFolder_;
next++;
@ -98,7 +98,7 @@ public:
return !finished();
}
bool setTime(TimeValueType upto)
bool setTime(timeValue upto)
{
timeList::iterator orgFolder = currentFolder_;
@ -140,14 +140,14 @@ public:
return false;
}
TimeValueType startTime()const
timeValue startTime()const
{
if(folders_.empty()) return -1;
auto [t,f] = *folders_.begin();
return t;
}
TimeValueType endTime()const
timeValue endTime()const
{
if(folders_.empty()) return -1;
auto [t,f] = *(--folders_.end());
@ -162,16 +162,16 @@ public:
};
inline
Map<TimeValueType, fileSystem> getTimeFolders(const fileSystem& path)
Map<timeValue, fileSystem> getTimeFolders(const fileSystem& path)
{
Map<TimeValueType, fileSystem> tFolders;
Map<timeValue, fileSystem> tFolders;
auto subDirs = subDirectories(path);
for(auto& subD: subDirs)
{
auto timeName = tailName(subD.wordPath(), '/');
TimeValueType TIME;
timeValue TIME;
if( auto success = readReal(timeName, TIME); success)
{
if(!tFolders.insertIf(TIME, subD))

View File

@ -25,8 +25,8 @@ template class pFlow::dataIORegular<pFlow::uint32x3>;
template class pFlow::dataIO<pFlow::uint64>;
template class pFlow::dataIORegular<pFlow::uint64>;
//template class pFlow::dataIO<size_t>;
//template class pFlow::dataIORegular<size_t>;
template class pFlow::dataIO<size_t>;
template class pFlow::dataIORegular<size_t>;
template class pFlow::dataIO<pFlow::real>;
template class pFlow::dataIORegular<pFlow::real>;

View File

@ -246,7 +246,7 @@ public:
/// Is this iter the right time for updating bounday list
inline
bool performBoundaryUpdate()const
bool performBoundarytUpdate()const
{
return updateTime_;
}

View File

@ -60,7 +60,7 @@ bool pFlow::boundaryExit::beforeIteration
{
callAgain = false;
if( !performBoundaryUpdate())
if( !performBoundarytUpdate())
{
return true;
}

View File

@ -60,7 +60,7 @@ bool pFlow::boundaryPeriodic::beforeIteration(
return true;
}
//output<<this->thisBoundaryIndex()<<" ->"<<ti.iter()<<" update called\n";
if(!performBoundaryUpdate())
if(!performBoundarytUpdate())
{
return true;
}

View File

@ -1,64 +0,0 @@
/* -------------------------------*- C++ -*--------------------------------- *\
| phasicFlow File |
| copyright: www.cemf.ir |
\* ------------------------------------------------------------------------- */
objectName domainDict;
objectType dictionary;
fileFormat ASCII;
/*---------------------------------------------------------------------------*/
// Simulation domain: every particles that goes outside this domain will be deleted
globalBox
{
min (-0.12 -0.12 0.00);
max (0.12 0.12 0.11);
}
pointSorting
{
active Yes; // optional (default: No)
dx 0.01; // optional (default: 0.01)
timeControl simulationTime; // runTime, or timeStep
startTime 0.0;
endTime 100; // optional (default: very large number)
sortingInterval 0.1; // in seconds
}
boundaries
{
left
{
type exit; // other options: periodict, reflective
}
right
{
type exit; // other options: periodict, reflective
}
bottom
{
type exit; // other options: periodict, reflective
}
top
{
type exit; // other options: periodict, reflective
}
rear
{
type exit; // other options: periodict, reflective
}
front
{
type exit; // other options: periodict, reflective
}
}

View File

@ -32,23 +32,15 @@ pFlow::pointSorting::pointSorting(const dictionary & dict)
dx_(
performSorting_()?
dict.getVal<real>("dx"):
0.01
1.0
)
{
if( performSorting_() )
{
REPORT(2)<<"Point sorting is "<<Yellow_Text("active")<<" in the simulation"<<END_REPORT;
dictionary dict2("pointSorting");
dict2.add("avtive", performSorting_);
dict2.add("dx", dx_);
timeControl_.write(dict2);
output.incrIndent();
output<<dict2<<endl;
output.decrIndent();
}
}
REPORT(2)<<"Point sorting is "<<Yellow_Text("active")<<" in simulation"<<END_REPORT;
else
REPORT(2)<<"Point sorting is "<<Yellow_Text("inactive")<<" in simulation"<<END_REPORT;
}
pFlow::uint32IndexContainer
pFlow::pointSorting::getSortedIndices(
const box& boundingBox,

View File

@ -50,9 +50,9 @@ public:
return performSorting_();
}
bool sortTime(const timeInfo& ti)const
bool sortTime(uint32 iter, real t, real dt)const
{
return performSorting_() && timeControl_.eventTime(ti);
return performSorting_() && timeControl_.eventTime(iter, t, dt);
}
uint32IndexContainer getSortedIndices(

View File

@ -178,7 +178,7 @@ bool pFlow::pointStructure::beforeIteration()
{
const timeInfo ti = TimeInfo();
if(pointSorting_().sortTime(ti))
if(pointSorting_().sortTime(ti.iter(), ti.t(), ti.dt()))
{
auto sortedIndices = pointSorting_().getSortedIndices(
simulationDomain_().globalBox(),

View File

@ -25,14 +25,11 @@ Licence:
pFlow::Logical::Logical(const word& l)
{
bool s;
int yesNoSet;
if (!evaluteWord(l, s, yesNoSet))
if (!evaluteWord(l, s_, yesNoSet_))
{
fatalErrorInFunction << " invalid input for Logical: " << l << endl;
fatalExit;
}
*this = Logical(s, yesNoSet);
}
pFlow::Logical::Logical(const char* ch)
@ -45,7 +42,7 @@ pFlow::Logical::evaluteWord(const word& l, bool& b, int& yesNoSet)
{
auto Ul = toUpper(l);
for (int i = 1; i < 5; ++i)
for (int i = 0; i < 4; ++i)
{
if (toUpper(YesNo__[i][0]) == Ul)
{
@ -92,26 +89,20 @@ pFlow::Logical::read(iIstream& is)
is.setBad();
return false;
}
bool s;
int yesNoSet;
if( evaluteWord(w, s, yesNoSet) )
{
*this = Logical(s, yesNoSet);
return true;
}
return false;
return evaluteWord(w, s_, yesNoSet_);
}
bool
pFlow::Logical::write(iOstream& os) const
{
if (s_ > 0)
if (s_)
{
os << YesNo__[s_][0];
os << YesNo__[yesNoSet_][0];
}
else
{
os << YesNo__[-s_][1];
os << YesNo__[yesNoSet_][1];
}
return os.check(FUNCTION_NAME);
}

View File

@ -41,30 +41,24 @@ class Logical
private:
/// bool value
/// negative value means false
/// positive value means true
/// 0 means not set, but still valid as true
int8_t s_ = -1;
bool s_ = false;
/// Set numbe of of Yes or No
int yesNoSet_ = 0;
/// Set of Yes or Nos
inline static const word YesNo__[5][2] = { { "Y", "Y" },
{ "Yes", "No" },
inline static const word YesNo__[4][2] = { { "Yes", "No" },
{ "on", "off" },
{ "true", "false" },
{ "Ok", "No" } };
/// Construct from bool and set number
inline Logical(bool s, int yns)
inline explicit Logical(bool s, int yns)
: s_(s),
yesNoSet_(yns)
{
yns = std::max(1, std::min(4, yns));
s_ = s ? static_cast<int8_t>(yns) : static_cast<int8_t>(-yns);
}
inline explicit Logical(int8_t s)
{
s_ = s;
}
public:
/// Type info
@ -85,8 +79,7 @@ public:
/// Construct from bool
inline explicit Logical(bool s)
:
Logical(s, 1)
: s_(s)
{
}
@ -111,7 +104,8 @@ public:
/// Assignment with bool
inline Logical& operator=(const bool& b)
{
*this = Logical(b);
s_ = b;
yesNoSet_ = 0;
return *this;
}
@ -120,19 +114,19 @@ public:
/// () operator, return bool value
inline bool operator()() const
{
return s_ > 0;
return s_;
}
/// Return bool value
inline explicit operator bool() const
{
return s_ > 0;
return s_;
}
/// Not operator
inline Logical operator!() const
{
return Logical(static_cast<int8_t>(-s_));
return Logical(!s_, yesNoSet_);
}
//// IO operations

View File

@ -21,6 +21,7 @@ Licence:
\brief Helper functions for built-in types.
*/
#ifndef __bTypesFunctions_hpp__
#define __bTypesFunctions_hpp__

View File

@ -61,26 +61,25 @@ using uint64 = unsigned long long int;
using word = std::string;
/// type for time values
using TimeValueType = double;
using timeValue = double;
/// type for indexing arrays and vectors
#ifdef pFlow_Build_Index64
#define Index64 1
using IndexType = std::size_t;
using index = std::size_t;
inline const char* IndexType__ = "std::size_t";
#else
#define Index64 0
using IndexType = uint32;
using index = uint32;
inline const char* IndexType__ = "uint32";
#endif
inline word
floatingPointDescription()
{
return word("In this build, ") + word(floatingPointType__) +
word(" is used for floating point operations and ") +
IndexType__ + " for indexing.";
IndexType__ + "for indexing.";
}
} // end of pFlow

11
thirdParty/README.md vendored
View File

@ -1,11 +0,0 @@
# Third-party packages used in PhasicFlow
This folder contains the third-party packages that are used by PhasicFlow. Since the source code is under the main directory of the project, they are downloaded with the source code. But this does not mean that these packages are maintained by PhasicFlow or they are a part of the project.
## [Zoltan](./Zoltan/)
This package is used for space-decomposition and load balancing for MPI execution of the code.
## Thrust
This package is a parallel library developed by NVIDIA. It is built for either CUDA or OpenMP depending of the type of build.

View File

@ -1,133 +0,0 @@
#
# A) Define the package
#
TRIBITS_PACKAGE(Zoltan)
#
# B) Set up package-specific options
#
TRIBITS_ADD_OPTION_AND_DEFINE( ${PACKAGE_NAME}_ENABLE_UINT_IDS
UNSIGNED_INT_GLOBAL_IDS
"typedef unsigned int ZOLTAN_ID_TYPE"
OFF )
TRIBITS_ADD_OPTION_AND_DEFINE( ${PACKAGE_NAME}_ENABLE_ULONG_IDS
UNSIGNED_LONG_GLOBAL_IDS
"typedef unsigned long ZOLTAN_ID_TYPE"
OFF )
TRIBITS_ADD_OPTION_AND_DEFINE( ${PACKAGE_NAME}_ENABLE_ULLONG_IDS
UNSIGNED_LONG_LONG_GLOBAL_IDS
"typedef unsigned long long ZOLTAN_ID_TYPE"
OFF )
TRIBITS_ADD_OPTION_AND_DEFINE(${PACKAGE_NAME}_ENABLE_HUND
HAVE_ZOLTAN_HUND
"Enable support for HUND in ${PACKAGE_NAME}."
OFF )
TRIBITS_ADD_OPTION_AND_DEFINE(${PACKAGE_NAME}_ENABLE_KNUTH_HASH
HAVE_ZOLTAN_KNUTH_HASH
"Enable support for Knuth's hash function in ${PACKAGE_NAME}."
OFF )
TRIBITS_ADD_OPTION_AND_DEFINE(${PACKAGE_NAME}_ENABLE_F90INTERFACE
BUILD_ZOLTAN_F90_INTERFACE
"Enable build of F90 interface to ${PACKAGE_NAME}."
OFF )
TRIBITS_ADD_OPTION_AND_DEFINE(${PACKAGE_NAME}_ENABLE_PURIFY
HAVE_PURIFY
"Enable work-arounds to purify bugs."
OFF )
OPTION(${PACKAGE_NAME}_ENABLE_CPPDRIVER
"Enable C++ driver for ${PACKAGE_NAME}."
${${PROJECT_NAME}_ENABLE_CXX} )
IF (${PROJECT_NAME}_ENABLE_CXX)
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DMPICH_IGNORE_CXX_SEEK")
ENDIF()
IF(${PROJECT_NAME}_ENABLE_Fortran AND BUILD_ZOLTAN_F90_INTERFACE)
# Special Fortran compiler-specific flags
IF (${CMAKE_Fortran_COMPILER_ID} MATCHES "PGI")
SET(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -DPGI")
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DPGI")
ENDIF()
IF (${CMAKE_Fortran_COMPILER_ID} MATCHES "LAHEY")
SET(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -DFUJITSU")
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DFUJITSU")
ENDIF()
SET(ZOLTAN_BUILD_ZFDRIVE YES)
ENDIF()
#
# C) Add the libraries, tests, and examples
#
IF (NOT TPL_ENABLE_MPI)
ADD_SUBDIRECTORY(siMPI)
SET(ZOLTAN_BUILD_ZFDRIVE NO)
ENDIF()
ADD_SUBDIRECTORY(src)
TRIBITS_ADD_TEST_DIRECTORIES(src/driver)
IF(ZOLTAN_BUILD_ZFDRIVE)
# CMAKE version > 2.6.4 is needed; earlier versions use C++ compiler to link
# fdriver, while fdriver needs F90.
TRIBITS_ADD_TEST_DIRECTORIES(src/fdriver)
ENDIF()
TRIBITS_ADD_TEST_DIRECTORIES(test)
TRIBITS_ADD_EXAMPLE_DIRECTORIES(example)
TRIBITS_EXCLUDE_FILES(
test/ch_brack2_3
test/ch_bug
test/ch_degenerate
test/ch_degenerateAA
test/ch_drake
test/ch_grid20x19
test/ch_hammond
test/ch_hammond2
test/ch_hughes
test/ch_nograph
test/ch_onedbug
test/ch_random
test/ch_serial
test/ch_slac
test/hg_cage10
test/hg_diag500_4
test/hg_ewgt
test/hg_felix
test/hg_ibm03
test/hg_ml27
test/hg_nograph
test/hg_vwgt
test/nem_ti_20k
test/nem_ti_4k
test/misc_siefert
test/th
test/bin
doc/Zoltan_html/tu_html
src/ZoltanComponent
src/driver_old
src/fdriver_old
)
#
# D) Do standard postprocessing
#
TRIBITS_PACKAGE_POSTPROCESS()

View File

@ -1,45 +0,0 @@
/*
* @HEADER
*
* ***********************************************************************
*
* Zoltan Toolkit for Load-balancing, Partitioning, Ordering and Coloring
* Copyright 2012 Sandia Corporation
*
* Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
* the U.S. Government retains certain rights in this software.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Corporation nor the names of the
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Questions? Contact Karen Devine kddevin@sandia.gov
* Erik Boman egboman@sandia.gov
*
* ***********************************************************************
*
* @HEADER
*/

View File

@ -1,17 +0,0 @@
Zoltan Library for Parallel Applications
Neither Sandia, the government, the DOE, nor any of their employees, makes
any warranty, express or implied, or assumes any legal liability or
responsibility for the accuracy, completeness, or usefulness of any
information, apparatus, product, or process disclosed, or represents that
its use would not infringe privately owned rights. This information is made
available on an "AS-IS" basis.
ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
EXCLUDED HEREUNDER.
Neither Sandia nor the Government, nor their agents, officers and employees
shall be liable for any loss, damage (including, incidental, consequential
and special), injury or other casualty of whatsoever kind, or by whomsoever
caused, to the person or property of anyone arising out of or resulting from
this information, or the accuracy and validity of this information.

View File

@ -1,251 +0,0 @@
Problems existing in Zoltan.
This file was last updated on $Date$
-------------------------------------------------------------------------------
ERROR CONDITIONS IN ZOLTAN
When a processor returns from Zoltan to the application due to an error
condition, other processors do not necessarily return the same condition.
In fact, other processors may not know that the processor has quit Zoltan,
and may hang in a communication (waiting for a message that is not sent
due to the error condition). The parallel error-handling capabilities of
Zoltan will be improved in future releases.
-------------------------------------------------------------------------------
RCB/RIB ON ASCI RED
On ASCI Red, the number of context IDs (e.g., MPI Communicators) is limited
to 8192. The environment variable MPI_REUSE_CONTEXT_IDS must be set to
reuse the IDs; setting this variable, however, slows performance.
An alternative is to set Zoltan_Parameter TFLOPS_SPECIAL to "1". With
TFLOPS_SPECIAL set, communicators in RCB/RIB are not split and, thus, the
application is less likely to run out of context IDs. However, ASCI Red
also has a bug that is exposed by TFLOPS_SPECIAL; when messages that use
MPI_Send/MPI_Recv within RCB/RIB exceed the MPI_SHORT_MSG_SIZE, MPI_Recv
hangs. We do not expect these conditions to exist on future platforms and,
indeed, plan to make TFLOPS_SPECIAL obsolete in future versions of Zoltan
rather than re-work it with MPI_Irecv. -- KDD 10/5/2004
-------------------------------------------------------------------------------
ERROR CONDITIONS IN OCTREE, PARMETIS AND JOSTLE
On failure, OCTREE, ParMETIS and Jostle methods abort rather than return
error codes.
-------------------------------------------------------------------------------
ZOLTAN_INITIALIZE BUT NO ZOLTAN_FINALIZE
If Zoltan_Initialize calls MPI_Init, then MPI_Finalize
will never be called because there is no Zoltan_Finalize routine.
If the application uses MPI and calls MPI_Init and MPI_Finalize,
then there is no problem.
-------------------------------------------------------------------------------
HETEROGENEOUS ENVIRONMENTS
Some parts of Zoltan currently assume that basic data types like
integers and real numbers (floats) have identical representation
on all processors. This may not be true in a heterogeneous
environment. Specifically, the unstructured (irregular) communication
library is unsafe in a heterogeneous environment. This problem
will be corrected in a future release of Zoltan for heterogeneous
systems.
-------------------------------------------------------------------------------
F90 ISSUES
Pacific Sierra Research (PSR) Vastf90 is not currently supported due to bugs
in the compiler with no known workarounds. It is not known when or if this
compiler will be supported.
N.A.Software FortranPlus is not currently supported due to problems with the
query functions. We anticipate that this problem can be overcome, and support
will be added soon.
-------------------------------------------------------------------------------
PROBLEMS EXISTING IN PARMETIS
(Reported to the ParMETIS development team at the University of Minnesota,
metis@cs.umn.edu)
Name: Free-memory write in PartGeomKway
Version: ParMETIS 3.1.1
Symptom: Free-memory write reported by Purify and Valgrind for graphs with
no edges.
Description:
For input graphs with no (or, perhaps, few) edges, Purify and Valgrind
report writes to already freed memory as shown below.
FMW: Free memory write:
* This is occurring while in thread 22199:
SetUp(void) [setup.c:80]
PartitionSmallGraph(void) [weird.c:39]
ParMETIS_V3_PartGeomKway [gkmetis.c:214]
Zoltan_ParMetis [parmetis_interface.c:280]
Zoltan_LB [lb_balance.c:384]
Zoltan_LB_Partition [lb_balance.c:91]
run_zoltan [dr_loadbal.c:581]
main [dr_main.c:386]
__libc_start_main [libc.so.6]
_start [crt1.o]
* Writing 4 bytes to 0xfcd298 in the heap.
* Address 0xfcd298 is at the beginning of a freed block of 4 bytes.
* This block was allocated from thread -1781075296:
malloc [rtlib.o]
GKmalloc(void) [util.c:151]
idxmalloc(void) [util.c:100]
AllocateWSpace [memory.c:28]
ParMETIS_V3_PartGeomKway [gkmetis.c:123]
Zoltan_ParMetis [parmetis_interface.c:280]
Zoltan_LB [lb_balance.c:384]
Zoltan_LB_Partition [lb_balance.c:91]
run_zoltan [dr_loadbal.c:581]
main [dr_main.c:386]
__libc_start_main [libc.so.6]
_start [crt1.o]
* There have been 10 frees since this block was freed from thread 22199:
GKfree(void) [util.c:168]
Mc_MoveGraph(void) [move.c:92]
ParMETIS_V3_PartGeomKway [gkmetis.c:149]
Zoltan_ParMetis [parmetis_interface.c:280]
Zoltan_LB [lb_balance.c:384]
Zoltan_LB_Partition [lb_balance.c:91]
run_zoltan [dr_loadbal.c:581]
main [dr_main.c:386]
__libc_start_main [libc.so.6]
_start [crt1.o]
Reported: Reported 8/31/09 http://glaros.dtc.umn.edu/flyspray/task/50
Status: Reported 8/31/09
Name: PartGeom limitation
Version: ParMETIS 3.0, 3.1
Symptom: inaccurate number of partitions when # partitions != # processors
Description:
ParMETIS method PartGeom produces decompositions with #-processor
partitions only. Zoltan parameters NUM_GLOBAL_PARTITIONS and
NUM_LOCAL_PARTITIONS will be ignored.
Reported: Not yet reported.
Status: Not yet reported.
Name: vsize array freed in ParMetis
Version: ParMETIS 3.0 and 3.1
Symptom: seg. fault, core dump at runtime
Description:
When calling ParMETIS_V3_AdaptiveRepart with the vsize parameter,
ParMetis will try to free the vsize array even if it was
allocated in Zoltan. Zoltan will then try to free vsize again
later, resulting in a fatal error. As a temporary fix,
Zoltan will never call ParMetis with the vsize parameter.
Reported: 11/25/2003.
Status: Acknowledged by George Karypis.
Name: ParMETIS_V3_AdaptiveRepart and ParMETIS_V3_PartKWay crash
for zero-sized partitions.
Version: ParMETIS 3.1
Symptom: run-time error "killed by signal 8" on DEC. FPE, divide-by-zero.
Description:
Metis divides by partition size; thus, zero-sized partitions
cause a floating-point exception.
Reported: 9/9/2003.
Status: ?
Name: ParMETIS_V3_AdaptiveRepart dies for zero-sized partitions.
Version: ParMETIS 3.0
Symptom: run-time error "killed by signal 8" on DEC. FPE, divide-by-zero.
Description:
ParMETIS_V3_AdaptiveRepart divides by partition size; thus, zero-sized
partitions cause a floating-point exception. This problem is exhibited in
adaptive-partlocal3 tests. The tests actually run on Sun and Linux machines
(which don't seem to care about the divide-by-zero), but cause an FPE
signal on DEC (Compaq) machines.
Reported: 1/23/2003.
Status: Fixed in ParMetis 3.1, but new problem appeared (see above).
Name: ParMETIS_V3_AdaptiveRepart crashes when no edges.
Version: ParMETIS 3.0
Symptom: Floating point exception, divide-by-zero.
Description:
Divide-by-zero in ParMETISLib/adrivers.c, function Adaptive_Partition,
line 40.
Reported: 1/23/2003.
Status: Fixed in ParMetis 3.1.
Name: Uninitialized memory read in akwayfm.c.
Version: ParMETIS 3.0
Symptom: UMR warning.
Description:
UMR in ParMETISLib/akwayfm.c, function Moc_KWayAdaptiveRefine, near line 520.
Reported: 1/23/2003.
Status: Fixed in ParMetis 3.1.
Name: Memory leak in wave.c
Version: ParMETIS 3.0
Symptom: Some memory not freed.
Description:
Memory leak in ParMETISLib/wave.c, function WavefrontDiffusion;
memory for the following variables is not always freed:
solution, perm, workspace, cand
We believe the early return near line 111 causes the problem.
Reported: 1/23/2003.
Status: Fixed in ParMetis 3.1.
Name: tpwgts ignored for small graphs.
Version: ParMETIS 3.0
Symptom: incorrect output (partitioning)
Description:
When using ParMETIS_V3_PartKway to partition into partitions
of unequal sizes, the input array tpwgts is ignored and
uniform-sized partitions are computed. This bug shows up when
(a) the number of vertices is < 10000 and (b) only one weight
per vertex is given (ncon=1).
Reported: Reported to George Karypis and metis@cs.umn.edu on 2002/10/30.
Status: Fixed in ParMetis 3.1.
Name: AdaptiveRepart crashes on partless test.
Version: ParMETIS 3.0
Symptom: run-time segmentation violation.
Description:
ParMETIS_V3_AdaptiveRepart crashes with a SIGSEGV if
the input array _part_ contains any value greater then
the desired number of partitions, nparts. This shows up
in Zoltan's "partless" test cases.
Reported: Reported to George Karypis and metis@cs.umn.edu on 2002/12/02.
Status: Fixed in ParMetis 3.1.
Name: load imbalance tolerance
Version: ParMETIS 2.0
Symptom: missing feature
Description:
The load imbalance parameter UNBALANCE_FRACTION can
only be set at compile-time. With Zoltan it is
necessary to be able to set this parameter at run-time.
Reported: Reported to metis@cs.umn.edu on 19 Aug 1999.
Status: Fixed in version 3.0.
Name: no edges
Version: ParMETIS 2.0
Symptom: segmentation fault at run time
Description:
ParMETIS crashes if the input graph has no edges and
ParMETIS_PartKway is called. We suspect all the graph based
methods crash. From the documentation it is unclear if
a NULL pointer is a valid input for the adjncy array.
Apparently, the bug occurs both with NULL as input or
a valid pointer to an array.
Reported: Reported to metis@cs.umn.edu on 5 Oct 1999.
Status: Fixed in version 3.0.
Name: no vertices
Version: ParMETIS 2.0, 3.0, 3.1
Symptom: segmentation fault at run time
Description:
ParMETIS may crash if a processor owns no vertices.
The extent of this bug is not known (which methods are affected).
Again, it is unclear if NULL pointers are valid input.
Reported: Reported to metis@cs.umn.edu on 6 Oct 1999.
Status: Fixed in 3.0 and 3.1 for the graph methods, but not the geometric methods.
New bug report sent on 2003/08/20.
Name: partgeom bug
Version: ParMETIS 2.0
Symptom: floating point exception
Description:
For domains where the global delta_x, delta_y, or delta_z (in 3D)
is zero (e.g., all nodes lie along the y-axis), a floating point
exception can occur when the partgeom algorithm is used.
Reported: kirk@cs.umn.edu in Jan 2001.
Status: Fixed in version 3.0.
-------------------------------------------------------------------------------

Some files were not shown because too many files have changed in this diff Show More