diff --git a/src/Interaction/CMakeLists.txt b/src/Interaction/CMakeLists.txt index 1427433f..c0265808 100644 --- a/src/Interaction/CMakeLists.txt +++ b/src/Interaction/CMakeLists.txt @@ -7,6 +7,8 @@ contactSearch/methods/cellBased/NBS/NBS.cpp contactSearch/methods/cellBased/NBS/cellsWallLevel0.cpp contactSearch/boundaries/boundaryContactSearch/boundaryContactSearch.cpp +contactSearch/boundaries/twoPartContactSearch/twoPartContactSearchKernels.cpp +contactSearch/boundaries/twoPartContactSearch/twoPartContactSearch.cpp contactSearch/boundaries/periodicBoundaryContactSearch/ppwBndryContactSearchKernels.cpp contactSearch/boundaries/periodicBoundaryContactSearch/ppwBndryContactSearch.cpp contactSearch/boundaries/periodicBoundaryContactSearch/wallBoundaryContactSearch.cpp @@ -22,6 +24,13 @@ sphereInteraction/sphereInteractionsNonLinearModels.cpp sphereInteraction/sphereInteractionsNonLinearModModels.cpp ) +if(pFlow_Build_MPI) + list(APPEND SourceFiles + contactSearch/boundaries/processorBoundaryContactSearch/processorBoundaryContactSearch.cpp + sphereInteraction/boundaries/processorBoundarySphereInteraction/processorBoundarySphereInteractions.cpp + ) +endif() + set(link_libs Kokkos::kokkos phasicFlow Property Particles Geometry) pFlow_add_library_install(Interaction SourceFiles link_libs) diff --git a/src/Interaction/contactSearch/ContactSearch/ContactSearch.hpp b/src/Interaction/contactSearch/ContactSearch/ContactSearch.hpp index 531ecad9..bb72657d 100644 --- a/src/Interaction/contactSearch/ContactSearch/ContactSearch.hpp +++ b/src/Interaction/contactSearch/ContactSearch/ContactSearch.hpp @@ -130,9 +130,10 @@ public: csPairContainerType& pwPairs, bool force = false) override { - ppTimer().start(); + Particles().boundingSphere().updateBoundaries(DataDirection::SlaveToMaster); + const auto& position = Particles().pointPosition().deviceViewAll(); const auto& flags = Particles().dynPointStruct().activePointsMaskDevice(); const auto& diam = Particles().boundingSphere().deviceViewAll(); diff --git a/src/Interaction/contactSearch/boundaries/periodicBoundaryContactSearch/ppwBndryContactSearch.hpp b/src/Interaction/contactSearch/boundaries/periodicBoundaryContactSearch/ppwBndryContactSearch.hpp index 15f5dc6a..0eda8983 100644 --- a/src/Interaction/contactSearch/boundaries/periodicBoundaryContactSearch/ppwBndryContactSearch.hpp +++ b/src/Interaction/contactSearch/boundaries/periodicBoundaryContactSearch/ppwBndryContactSearch.hpp @@ -36,15 +36,16 @@ public: using NextType = deviceViewType1D; private: - cells searchCells_; - HeadType head_{"periodic::head", 1, 1, 1}; + cells searchCells_; - NextType next_{"periodic::next", 1}; + HeadType head_{ "periodic::head", 1, 1, 1 }; - real sizeRatio_ = 1.0; + NextType next_{ "periodic::next", 1 }; - uint32 nextCapacity_ = 0; + real sizeRatio_ = 1.0; + + uint32 nextCapacity_ = 0; void checkAllocateNext(uint32 n); diff --git a/src/Interaction/contactSearch/boundaries/processorBoundaryContactSearch/processorBoundaryContactSearch.cpp b/src/Interaction/contactSearch/boundaries/processorBoundaryContactSearch/processorBoundaryContactSearch.cpp new file mode 100644 index 00000000..9f9384e9 --- /dev/null +++ b/src/Interaction/contactSearch/boundaries/processorBoundaryContactSearch/processorBoundaryContactSearch.cpp @@ -0,0 +1,108 @@ +/*------------------------------- phasicFlow --------------------------------- + O C enter of + O O E ngineering and + O O M ultiscale modeling of + OOOOOOO F luid flow +------------------------------------------------------------------------------ + Copyright (C): www.cemf.ir + email: hamid.r.norouzi AT gmail.com +------------------------------------------------------------------------------ +Licence: + This file is part of phasicFlow code. It is a free software for simulating + granular and multiphase flows. You can redistribute it and/or modify it under + the terms of GNU General Public License v3 or any other later versions. + + phasicFlow is distributed to help others in their research in the field of + granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the + implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +-----------------------------------------------------------------------------*/ + +#include "processorBoundaryContactSearch.hpp" +#include "contactSearch.hpp" +#include "particles.hpp" +//#include "pointStructure.hpp" +//#include "geometry.hpp" + + +void pFlow::processorBoundaryContactSearch::setSearchBox() +{ + + auto l = boundary().neighborLength(); + auto n = boundary().boundaryPlane().normal(); + auto pp1 = boundary().boundaryPlane().parallelPlane(l); + auto pp2 = boundary().boundaryPlane().parallelPlane(-l); + + realx3 minP1 = min(min(min(pp1.p1(), pp1.p2()), pp1.p3()), pp1.p4()); + realx3 maxP1 = max(max(max(pp1.p1(), pp1.p2()), pp1.p3()), pp1.p4()); + + realx3 minP2 = min(min(min(pp2.p1(), pp2.p2()), pp2.p3()), pp2.p4()); + realx3 maxP2 = max(max(max(pp2.p1(), pp2.p2()), pp2.p3()), pp2.p4()); + + auto minP = min(minP1, minP2) - l*(realx3(1.0)-abs(n)); + auto maxP = max(maxP1, maxP2) + l*(realx3(1.0)-abs(n)); + + searchBox_={minP, maxP}; +} + +pFlow::processorBoundaryContactSearch::processorBoundaryContactSearch( + const dictionary &dict, + const boundaryBase &boundary, + const contactSearch &cSearch) +: + boundaryContactSearch(dict, boundary, cSearch), + diameter_(cSearch.Particles().boundingSphere()), + masterSearch_(this->isBoundaryMaster()) +{ + + if(masterSearch_) + { + setSearchBox(); + + real minD; + real maxD; + cSearch.Particles().boundingSphereMinMax(minD, maxD); + + ppContactSearch_ = makeUnique( + searchBox_, + maxD); + } + else + { + searchBox_={{0,0,0},{0,0,0}}; + } +} + +bool pFlow::processorBoundaryContactSearch::broadSearch +( + uint32 iter, + real t, + real dt, + csPairContainerType &ppPairs, + csPairContainerType &pwPairs, + bool force +) +{ + if(masterSearch_) + { + /*const auto thisPoints = boundary().thisPoints(); + const auto& neighborProcPoints = boundary().neighborProcPoints(); + const auto& bDiams = diameter_.BoundaryField(thisBoundaryIndex()); + const auto thisDiams = bDiams.thisField(); + const auto& neighborProcDiams = bDiams.neighborProcField(); + + ppContactSearch_().broadSearchPP( + ppPairs, + thisPoints, + thisDiams, + neighborProcPoints, + neighborProcDiams); + + pOutput<<"ppPairs size in boundary"<< ppPairs.size()< ppContactSearch_ = nullptr; + + const realPointField_D& diameter_; + + bool masterSearch_; + + void setSearchBox(); + +public: + + TypeInfo("boundaryContactSearch") + + processorBoundaryContactSearch( + const dictionary& dict, + const boundaryBase& boundary, + const contactSearch& cSearch + ); + + ~processorBoundaryContactSearch() override = default; + + add_vCtor( + boundaryContactSearch, + processorBoundaryContactSearch, + boundaryBase + ); + + bool broadSearch( + uint32 iter, + real t, + real dt, + csPairContainerType& ppPairs, + csPairContainerType& pwPairs, + bool force = false + ) override; +}; + +} + +#endif //__processorBoundaryContactSearch_hpp__ \ No newline at end of file diff --git a/src/Interaction/contactSearch/boundaries/twoPartContactSearch/twoPartContactSearch.cpp b/src/Interaction/contactSearch/boundaries/twoPartContactSearch/twoPartContactSearch.cpp new file mode 100644 index 00000000..2f0e4089 --- /dev/null +++ b/src/Interaction/contactSearch/boundaries/twoPartContactSearch/twoPartContactSearch.cpp @@ -0,0 +1,160 @@ + +#include "twoPartContactSearch.hpp" +#include "twoPartContactSearchKernels.hpp" +#include "phasicFlowKokkos.hpp" +#include "streams.hpp" + +void pFlow::twoPartContactSearch::checkAllocateNext(uint32 n) +{ + if( nextCapacity_ < n) + { + nextCapacity_ = n; + reallocNoInit(next_, n); + } +} + +void pFlow::twoPartContactSearch::nullifyHead() +{ + fill(head_, static_cast(-1)); +} + +void pFlow::twoPartContactSearch::nullifyNext(uint32 n) +{ + fill(next_, 0u, n, static_cast(-1)); +} + +void pFlow::twoPartContactSearch::buildList( + const deviceScatteredFieldAccess &points) +{ + if(points.empty())return; + uint32 n = points.size(); + checkAllocateNext(n); + nullifyNext(n); + nullifyHead(); + + pFlow::twoPartContactSearchKernels::buildNextHead( + points, + searchCells_, + head_, + next_ + ); +} + +pFlow::twoPartContactSearch::twoPartContactSearch +( + const box &domain, + real cellSize, + real sizeRatio +) +: + searchCells_(domain, cellSize), + head_("periodic:head",searchCells_.nx(), searchCells_.ny(), searchCells_.nz()), + sizeRatio_(sizeRatio) +{ + +} + +bool pFlow::twoPartContactSearch::broadSearchPP +( + csPairContainerType &ppPairs, + const deviceScatteredFieldAccess &points1, + const deviceScatteredFieldAccess& diams1, + const deviceScatteredFieldAccess &points2, + const deviceScatteredFieldAccess& diams2, + const realx3& transferVec +) +{ + + buildList(points1); + + uint32 nNotInserted = 1; + + // loop until the container size fits the numebr of contact pairs + while (nNotInserted > 0) + { + + nNotInserted = pFlow::twoPartContactSearchKernels::broadSearchPP + ( + ppPairs, + points1, + diams1, + points2, + diams2, + transferVec, + head_, + next_, + searchCells_, + sizeRatio_ + ); + + + if(nNotInserted) + { + // - resize the container + // note that getFull now shows the number of failed insertions. + uint32 len = max(nNotInserted,100u) ; + + auto oldCap = ppPairs.capacity(); + + ppPairs.increaseCapacityBy(len); + + INFORMATION<< "Particle-particle contact pair container capacity increased from "<< + oldCap << " to "< &points1, + const deviceScatteredFieldAccess &diams1, + const realx3Vector_D& points2, + const realVector_D& diams2 +) +{ + buildList(points1); + + uint32 nNotInserted = 1; + + // loop until the container size fits the numebr of contact pairs + while (nNotInserted > 0) + { + + nNotInserted = pFlow::twoPartContactSearchKernels::broadSearchPP + ( + ppPairs, + points1, + diams1, + points2, + diams2, + head_, + next_, + searchCells_, + sizeRatio_ + ); + + + if(nNotInserted) + { + // - resize the container + // note that getFull now shows the number of failed insertions. + uint32 len = max(nNotInserted,100u) ; + + auto oldCap = ppPairs.capacity(); + + ppPairs.increaseCapacityBy(len); + + INFORMATION<< "Particle-particle contact pair container capacity increased from "<< + oldCap << " to "<; + + using NextType = deviceViewType1D; + +private: + + cells searchCells_; + + HeadType head_{ "periodic::head", 1, 1, 1 }; + + NextType next_{ "periodic::next", 1 }; + + real sizeRatio_ = 1.0; + + uint32 nextCapacity_ = 0; + + void checkAllocateNext(uint32 n); + + void nullifyHead(); + + void nullifyNext(uint32 n); + + void buildList( + const deviceScatteredFieldAccess &points); + +public: + twoPartContactSearch( + const box &domain, + real cellSize, + real sizeRatio = 1.0); + + /// @brief Perform a broad-search for spheres in two adjacent regions. + /// Region 1 is considered as the master (primary) region and region 2 as slave + /// @param ppPairs pairs container which holds i and j + /// @param points1 point positions in region 1 + /// @param diams1 diameter of spheres in region 1 + /// @param points2 point positions in region 2 + /// @param diams2 diameter of spheres in region 2 + /// @param transferVec a vector to transfer points from region 2 to region 1 + /// @return true if it is successful + bool broadSearchPP( + csPairContainerType &ppPairs, + const deviceScatteredFieldAccess &points1, + const deviceScatteredFieldAccess &diams1, + const deviceScatteredFieldAccess &points2, + const deviceScatteredFieldAccess &diams2, + const realx3 &transferVec); + + bool broadSearchPP( + csPairContainerType &ppPairs, + const deviceScatteredFieldAccess &points1, + const deviceScatteredFieldAccess &diams1, + const realx3Vector_D& points2, + const realVector_D& diams2); + + const auto& searchCells()const + { + return searchCells_; + } + + real sizeRatio()const + { + return sizeRatio_; + } +}; + +} + +#endif //__twoPartContactSearch_hpp__ \ No newline at end of file diff --git a/src/Interaction/contactSearch/boundaries/twoPartContactSearch/twoPartContactSearchKernels.cpp b/src/Interaction/contactSearch/boundaries/twoPartContactSearch/twoPartContactSearchKernels.cpp new file mode 100644 index 00000000..56f1885d --- /dev/null +++ b/src/Interaction/contactSearch/boundaries/twoPartContactSearch/twoPartContactSearchKernels.cpp @@ -0,0 +1,188 @@ +#include "twoPartContactSearchKernels.hpp" + +INLINE_FUNCTION_HD +bool +sphereSphereCheckB( + const pFlow::realx3& p1, + const pFlow::realx3 p2, + pFlow::real d1, + pFlow::real d2 +) +{ + return pFlow::length(p2 - p1) < 0.5 * (d2 + d1); +} + +void +pFlow::twoPartContactSearchKernels::buildNextHead( + const deviceScatteredFieldAccess& points, + const cells& searchCells, + deviceViewType3D& head, + deviceViewType1D& next +) +{ + if (points.empty()) + return; + + uint32 n = points.size(); + + Kokkos::parallel_for( + "pFlow::ppwBndryContactSearch::buildList", + deviceRPolicyStatic(0, n), + LAMBDA_HD(uint32 i) { + int32x3 ind; + if (searchCells.pointIndexInDomain(points[i], ind)) + { + // discards points out of searchCell + uint32 old = + Kokkos::atomic_exchange(&head(ind.x(), ind.y(), ind.z()), i); + next[i] = old; + } + } + ); + Kokkos::fence(); +} + +pFlow::uint32 +pFlow::twoPartContactSearchKernels::broadSearchPP( + csPairContainerType& ppPairs, + const deviceScatteredFieldAccess& points, + const deviceScatteredFieldAccess& diams, + const deviceScatteredFieldAccess& mirrorPoints, + const deviceScatteredFieldAccess& mirrorDiams, + const realx3& transferVec, + const deviceViewType3D& head, + const deviceViewType1D& next, + const cells& searchCells, + const real sizeRatio +) +{ + if (points.empty()) + return 0; + if (mirrorPoints.empty()) + return 0; + + auto nMirror = mirrorPoints.size(); + + uint32 getFull = 0; + + Kokkos::parallel_reduce( + "pFlow::twoPartContactSearchKernels::broadSearchPP", + deviceRPolicyStatic(0, nMirror), + LAMBDA_HD(const uint32 mrrI, uint32& getFullUpdate) { + realx3 p_m = mirrorPoints(mrrI) + transferVec; + + int32x3 ind_m; + if (!searchCells.pointIndexInDomain(p_m, ind_m)) + return; + + real d_m = sizeRatio * mirrorDiams[mrrI]; + + for (int ii = -1; ii < 2; ii++) + { + for (int jj = -1; jj < 2; jj++) + { + for (int kk = -1; kk < 2; kk++) + { + auto ind = ind_m + int32x3{ ii, jj, kk }; + + if (!searchCells.inCellRange(ind)) + continue; + + uint32 thisI = head(ind.x(), ind.y(), ind.z()); + while (thisI != -1) + { + auto d_n = sizeRatio * diams[thisI]; + + // first item is for this boundary and second itme, + // for mirror + if(sphereSphereCheckB(p_m, points[thisI], d_m, d_n)&& + ppPairs.insert(thisI,mrrI) == -1) + { + getFullUpdate++; + } + + thisI = next(thisI); + } + } + } + } + }, + getFull + ); + + return getFull; +} + +pFlow::uint32 +pFlow::twoPartContactSearchKernels::broadSearchPP( + csPairContainerType& ppPairs, + const deviceScatteredFieldAccess& points1, + const deviceScatteredFieldAccess& diams1, + const realx3Vector_D& points2, + const realVector_D& diams2, + const deviceViewType3D& head, + const deviceViewType1D& next, + const cells& searchCells, + real sizeRatio +) +{ + if (points1.empty()) + return 0; + if (points2.empty()) + return 0; + + auto nP2 = points2.size(); + auto points2View = points2.deviceView(); + auto diams2View = diams2.deviceView(); + + uint32 getFull = 0; + + Kokkos::parallel_reduce( + "pFlow::twoPartContactSearchKernels::broadSearchPP", + deviceRPolicyStatic(0, nP2), + LAMBDA_HD(const uint32 i2, uint32& getFullUpdate) { + realx3 p_m = points2View(i2); + + int32x3 ind_m; + if (!searchCells.pointIndexInDomain(p_m, ind_m)) + return; + + real d_m = sizeRatio * diams2View[i2]; + + for (int ii = -1; ii < 2; ii++) + { + for (int jj = -1; jj < 2; jj++) + { + for (int kk = -1; kk < 2; kk++) + { + auto ind = ind_m + int32x3{ ii, jj, kk }; + + if (!searchCells.inCellRange(ind)) + { + continue; + } + + uint32 i1 = head(ind.x(), ind.y(), ind.z()); + while (i1 != -1) + { + auto d_n = sizeRatio * diams1[i1]; + + // first item is for this boundary and second itme, + // for mirror + if(sphereSphereCheckB(p_m, points1[i1], d_m, d_n)&& + ppPairs.insert(i1,i2) == -1) + { + getFullUpdate++; + } + + i1 = next(i1); + } + } + } + } + }, + getFull + ); + + return getFull; +} \ No newline at end of file diff --git a/src/Interaction/contactSearch/boundaries/twoPartContactSearch/twoPartContactSearchKernels.hpp b/src/Interaction/contactSearch/boundaries/twoPartContactSearch/twoPartContactSearchKernels.hpp new file mode 100644 index 00000000..42f7cda1 --- /dev/null +++ b/src/Interaction/contactSearch/boundaries/twoPartContactSearch/twoPartContactSearchKernels.hpp @@ -0,0 +1,49 @@ +#ifndef __twoPartContactSearchKernels_hpp__ +#define __twoPartContactSearchKernels_hpp__ + +#include "contactSearchGlobals.hpp" +#include "cells.hpp" +#include "contactSearchFunctions.hpp" +#include "scatteredFieldAccess.hpp" +#include "VectorSingles.hpp" + +namespace pFlow::twoPartContactSearchKernels +{ + +void buildNextHead( + const deviceScatteredFieldAccess &points, + const cells &searchCells, + deviceViewType3D &head, + deviceViewType1D &next ); + + +uint32 broadSearchPP +( + csPairContainerType &ppPairs, + const deviceScatteredFieldAccess &points, + const deviceScatteredFieldAccess &diams, + const deviceScatteredFieldAccess &mirrorPoints, + const deviceScatteredFieldAccess &mirrorDiams, + const realx3 &transferVec, + const deviceViewType3D &head, + const deviceViewType1D &next, + const cells &searchCells, + real sizeRatio +); + +uint32 +broadSearchPP( + csPairContainerType& ppPairs, + const deviceScatteredFieldAccess& points1, + const deviceScatteredFieldAccess& diams1, + const realx3Vector_D& points2, + const realVector_D& diams2, + const deviceViewType3D& head, + const deviceViewType1D& next, + const cells& searchCells, + real sizeRatio +); +} + + +#endif //__twoPartContactSearchKernels_hpp__ \ No newline at end of file diff --git a/src/Interaction/sphereInteraction/boundaries/boundarySphereInteraction/boundarySphereInteraction.hpp b/src/Interaction/sphereInteraction/boundaries/boundarySphereInteraction/boundarySphereInteraction.hpp index b511ac6c..56058b6d 100644 --- a/src/Interaction/sphereInteraction/boundaries/boundarySphereInteraction/boundarySphereInteraction.hpp +++ b/src/Interaction/sphereInteraction/boundaries/boundarySphereInteraction/boundarySphereInteraction.hpp @@ -143,7 +143,9 @@ public: ) override { - notImplementedFunction; + pOutput<<"Function (hearChanges in boundarySphereInteractions)is not implmented Message "<< + msg <name() <<" type "<< this->type()< +inline void sphereSphereInteraction ( real dt, @@ -46,14 +47,6 @@ void sphereSphereInteraction if( ovrlp >0.0 ) { - - /*auto Vi = thisVel[i]; - auto Vj = mirrorVel[j]; - auto wi = thisRVel[i]; - auto wj = mirrorRVel[j]; - auto Nij = (xj-xi)/dist; - auto Vr = Vi - Vj + cross((Ri*wi+Rj*wj), Nij);*/ - auto Nij = (xj-xi)/dist; auto wi = rVel[ind_i]; auto wj = rVel[ind_j]; diff --git a/src/Interaction/sphereInteraction/boundaries/processorBoundarySphereInteraction/processorBoundarySIKernels.hpp b/src/Interaction/sphereInteraction/boundaries/processorBoundarySphereInteraction/processorBoundarySIKernels.hpp new file mode 100644 index 00000000..a62f3166 --- /dev/null +++ b/src/Interaction/sphereInteraction/boundaries/processorBoundarySphereInteraction/processorBoundarySIKernels.hpp @@ -0,0 +1,131 @@ + +#ifndef __processorBoundarySIKernels_hpp__ +#define __processorBoundarySIKernels_hpp__ + +namespace pFlow::MPI::processorBoundarySIKernels +{ + +template +inline +void sphereSphereInteraction +( + real dt, + const ContactListType& cntctList, + const ContactForceModel& forceModel, + const deviceScatteredFieldAccess& thisPoints, + const deviceViewType1D& thisDiam, + const deviceViewType1D& thisPropId, + const deviceViewType1D& thisVel, + const deviceViewType1D& thisRVel, + const deviceViewType1D& thisCForce, + const deviceViewType1D& thisCTorque, + const deviceViewType1D& neighborPoints, + const deviceViewType1D& neighborDiam, + const deviceViewType1D& neighborPropId, + const deviceViewType1D& neighborVel, + const deviceViewType1D& neighborRVel, + const deviceViewType1D& neighborCForce, + const deviceViewType1D& neighborCTorque +) +{ + + using ValueType = typename ContactListType::ValueType; + uint32 ss = cntctList.size(); + if(ss == 0u)return; + + uint32 lastItem = cntctList.loopCount(); + + Kokkos::parallel_for( + "pFlow::MPI::processorBoundarySIKernels::sphereSphereInteraction", + deviceRPolicyDynamic(0,lastItem), + LAMBDA_HD(uint32 n) + { + + if(!cntctList.isValid(n))return; + + auto [i,j] = cntctList.getPair(n); + uint32 ind_i = thisPoints.index(i); + uint32 ind_j = j; + + real Ri = 0.5*thisDiam[ind_i]; + real Rj = 0.5*neighborDiam[ind_j]; + realx3 xi = thisPoints.field()[ind_i]; + realx3 xj = neighborPoints[ind_j]; + + real dist = length(xj-xi); + real ovrlp = (Ri+Rj) - dist; + + if( ovrlp >0.0 ) + { + auto Nij = (xj-xi)/max(dist,smallValue); + auto wi = thisRVel[ind_i]; + auto wj = neighborRVel[ind_j]; + auto Vr = thisVel[ind_i] - neighborVel[ind_j] + cross((Ri*wi+Rj*wj), Nij); + + auto history = cntctList.getValue(n); + + int32 propId_i = thisPropId[ind_i]; + int32 propId_j = neighborPropId[ind_j]; + + realx3 FCn, FCt, Mri, Mrj, Mij, Mji; + + // calculates contact force + forceModel.contactForce( + dt, i, j, + propId_i, propId_j, + Ri, Rj, + ovrlp, + Vr, Nij, + history, + FCn, FCt); + + forceModel.rollingFriction( + dt, i, j, + propId_i, propId_j, + Ri, Rj, + wi, wj, + Nij, + FCn, + Mri, Mrj); + + auto M = cross(Nij,FCt); + Mij = Ri*M+Mri; + Mji = Rj*M+Mrj; + + auto FC = FCn + FCt; + + + Kokkos::atomic_add(&thisCForce[ind_i].x_,FC.x_); + Kokkos::atomic_add(&thisCForce[ind_i].y_,FC.y_); + Kokkos::atomic_add(&thisCForce[ind_i].z_,FC.z_); + + Kokkos::atomic_add(&neighborCForce[ind_j].x_,-FC.x_); + Kokkos::atomic_add(&neighborCForce[ind_j].y_,-FC.y_); + Kokkos::atomic_add(&neighborCForce[ind_j].z_,-FC.z_); + + Kokkos::atomic_add(&thisCTorque[ind_i].x_, Mij.x_); + Kokkos::atomic_add(&thisCTorque[ind_i].y_, Mij.y_); + Kokkos::atomic_add(&thisCTorque[ind_i].z_, Mij.z_); + + Kokkos::atomic_add(&neighborCTorque[ind_j].x_, Mji.x_); + Kokkos::atomic_add(&neighborCTorque[ind_j].y_, Mji.y_); + Kokkos::atomic_add(&neighborCTorque[ind_j].z_, Mji.z_); + + + cntctList.setValue(n,history); + + } + else + { + cntctList.setValue(n, ValueType()); + } + + }); + Kokkos::fence(); +} + + +} //pFlow::MPI::processorBoundarySIKernels + + +#endif //__processorBoundarySIKernels_hpp__ \ No newline at end of file diff --git a/src/Interaction/sphereInteraction/boundaries/processorBoundarySphereInteraction/processorBoundarySphereInteraction.cpp b/src/Interaction/sphereInteraction/boundaries/processorBoundarySphereInteraction/processorBoundarySphereInteraction.cpp new file mode 100644 index 00000000..ef09f0b5 --- /dev/null +++ b/src/Interaction/sphereInteraction/boundaries/processorBoundarySphereInteraction/processorBoundarySphereInteraction.cpp @@ -0,0 +1,73 @@ +/*------------------------------- phasicFlow --------------------------------- + O C enter of + O O E ngineering and + O O M ultiscale modeling of + OOOOOOO F luid flow +------------------------------------------------------------------------------ + Copyright (C): www.cemf.ir + email: hamid.r.norouzi AT gmail.com +------------------------------------------------------------------------------ +Licence: + This file is part of phasicFlow code. It is a free software for simulating + granular and multiphase flows. You can redistribute it and/or modify it under + the terms of GNU General Public License v3 or any other later versions. + + phasicFlow is distributed to help others in their research in the field of + granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the + implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +-----------------------------------------------------------------------------*/ + +#include "processorBoundarySIKernels.hpp" + +template +pFlow::MPI::processorBoundarySphereInteraction::processorBoundarySphereInteraction( + const boundaryBase &boundary, + const sphereParticles &sphPrtcls, + const GeometryMotionModel &geomMotion) +: + boundarySphereInteraction( + boundary, + sphPrtcls, + geomMotion + ), + masterInteraction_(boundary.isBoundaryMaster()) +{ + pOutput<<"Processor boundayrCondition for "<< boundary.name()< +bool pFlow::MPI::processorBoundarySphereInteraction::sphereSphereInteraction +( + real dt, + const ContactForceModel &cfModel +) +{ + if(!masterInteraction_) return true; + + const auto & sphPar = this->sphParticles(); + uint32 thisIndex = this->boundary().thisBoundaryIndex(); + const auto& a = sphPar.diameter().BoundaryField(thisIndex).neighborProcField().deviceViewAll(); + + /*pFlow::MPI::processorBoundarySIKernels::sphereSphereInteraction( + dt, + this->ppPairs(), + cfModel, + this->boundary().thisPoints(), + sphPar.diameter().deviceViewAll(), + sphPar.propertyId().deviceViewAll(), + sphPar.velocity().deviceViewAll(), + sphPar.rVelocity().deviceViewAll(), + sphPar.contactForce().deviceViewAll(), + sphPar.contactTorque().deviceViewAll(), + this->boundary().neighborProcPoints().deviceViewAll(), + sphPar.diameter().BoundaryField(thisIndex).neighborProcField().deviceViewAll(), + sphPar.propertyId().BoundaryField(thisIndex).neighborProcField().deviceViewAll(), + sphPar.velocity().BoundaryField(thisIndex).neighborProcField().deviceViewAll(), + sphPar.rVelocity().BoundaryField(thisIndex).neighborProcField().deviceViewAll(), + sphPar.contactForce().BoundaryField(thisIndex).neighborProcField().deviceViewAll(), + sphPar.contactTorque().BoundaryField(thisIndex).neighborProcField().deviceViewAll() + );*/ + + return true; +} \ No newline at end of file diff --git a/src/Interaction/sphereInteraction/boundaries/processorBoundarySphereInteraction/processorBoundarySphereInteraction.hpp b/src/Interaction/sphereInteraction/boundaries/processorBoundarySphereInteraction/processorBoundarySphereInteraction.hpp new file mode 100644 index 00000000..d3c56c04 --- /dev/null +++ b/src/Interaction/sphereInteraction/boundaries/processorBoundarySphereInteraction/processorBoundarySphereInteraction.hpp @@ -0,0 +1,90 @@ +/*------------------------------- phasicFlow --------------------------------- + O C enter of + O O E ngineering and + O O M ultiscale modeling of + OOOOOOO F luid flow +------------------------------------------------------------------------------ + Copyright (C): www.cemf.ir + email: hamid.r.norouzi AT gmail.com +------------------------------------------------------------------------------ +Licence: + This file is part of phasicFlow code. It is a free software for simulating + granular and multiphase flows. You can redistribute it and/or modify it under + the terms of GNU General Public License v3 or any other later versions. + + phasicFlow is distributed to help others in their research in the field of + granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the + implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +-----------------------------------------------------------------------------*/ +#ifndef __processorBoundarySphereInteraction_hpp__ +#define __processorBoundarySphereInteraction_hpp__ + +#include "boundarySphereInteraction.hpp" + +namespace pFlow::MPI +{ + +template +class processorBoundarySphereInteraction +: + public boundarySphereInteraction +{ +public: + + using PBSInteractionType = + processorBoundarySphereInteraction; + + using BSInteractionType = + boundarySphereInteraction; + + using GeometryMotionModel = typename BSInteractionType::GeometryMotionModel; + + using ContactForceModel = typename BSInteractionType::ContactForceModel; + + using MotionModel = typename geometryMotionModel::MotionModel; + + using ModelStorage = typename ContactForceModel::contactForceStorage; + + using IdType = typename BSInteractionType::IdType; + + using IndexType = typename BSInteractionType::IndexType; + + using ContactListType = typename BSInteractionType::ContactListType; + +private: + + bool masterInteraction_; + +public: + + TypeInfoTemplate22("boundarySphereInteraction", "processor",ContactForceModel, MotionModel); + + + processorBoundarySphereInteraction( + const boundaryBase& boundary, + const sphereParticles& sphPrtcls, + const GeometryMotionModel& geomMotion + ); + + add_vCtor + ( + BSInteractionType, + PBSInteractionType, + boundaryBase + ); + + ~processorBoundarySphereInteraction()override = default; + + bool sphereSphereInteraction( + real dt, + const ContactForceModel& cfModel)override; + +}; + +} + +#include "processorBoundarySphereInteraction.cpp" + + +#endif //__processorBoundarySphereInteraction_hpp__ \ No newline at end of file diff --git a/src/Interaction/sphereInteraction/boundaries/processorBoundarySphereInteraction/processorBoundarySphereInteractions.cpp b/src/Interaction/sphereInteraction/boundaries/processorBoundarySphereInteraction/processorBoundarySphereInteractions.cpp new file mode 100644 index 00000000..25347d61 --- /dev/null +++ b/src/Interaction/sphereInteraction/boundaries/processorBoundarySphereInteraction/processorBoundarySphereInteractions.cpp @@ -0,0 +1,17 @@ + +#include "processorBoundarySphereInteraction.hpp" +#include "geometryMotions.hpp" +#include "contactForceModels.hpp" + + +template class pFlow::MPI::processorBoundarySphereInteraction +< + pFlow::cfModels::limitedNonLinearNormalRolling, + pFlow::rotationAxisMotionGeometry +>; + +template class pFlow::MPI::processorBoundarySphereInteraction +< + pFlow::cfModels::nonLimitedNonLinearNormalRolling, + pFlow::rotationAxisMotionGeometry +>; \ No newline at end of file diff --git a/src/Interaction/sphereInteraction/sphereInteraction/sphereInteraction.cpp b/src/Interaction/sphereInteraction/sphereInteraction/sphereInteraction.cpp index cb298cd7..29451289 100644 --- a/src/Interaction/sphereInteraction/sphereInteraction/sphereInteraction.cpp +++ b/src/Interaction/sphereInteraction/sphereInteraction/sphereInteraction.cpp @@ -163,9 +163,16 @@ bool pFlow::sphereInteraction::iterate() auto t = this->currentTime(); auto dt = this->dt(); - //output<<"iter, t, dt "<< iter<<" "<< t << " "< + pFlow::MPI::MPIParticleIdHandler::getIdRange(uint32 nNewParticles) +{ + uint32 startId; + if(maxId_==-1) + { + startId = 0; + } + else + { + startId = maxId_+1; + } + uint32 endId = startId+nNewParticles-1; + maxId_ = endId; + return {startId, endId}; +} + +bool pFlow::MPI::MPIParticleIdHandler::initialIdCheck() +{ + /// empty point structure / no particles in simulation + uint32 maxId = -1; + if( !pStruct().empty() ) + { + maxId = max( *this ); + } + + auto maxIdAll = procVector(pFlowProcessors()); + auto numAll = procVector(pFlowProcessors()); + auto comm = procCommunication(pFlowProcessors()); + + comm.collectAllToAll(maxId, maxIdAll); + comm.collectAllToAll(size(),numAll); + + uint32 n = 0; + for(uint32 i=0; i"); + + explicit MPIParticleIdHandler(pointStructure& pStruct); + + ~MPIParticleIdHandler() override = default; + + add_vCtor( + particleIdHandler, + MPIParticleIdHandler, + pointStructure + ); + + Pair getIdRange(uint32 nNewParticles) override; + + uint32 maxId() const override + { + return maxId_; + } +}; + +} + +#endif //__MPIParticleIdHandler_hpp__ \ No newline at end of file diff --git a/src/Particles/particles/particleIdHandler/particleIdHandler.hpp b/src/Particles/particles/particleIdHandler/particleIdHandler.hpp index 11344336..836e0141 100644 --- a/src/Particles/particles/particleIdHandler/particleIdHandler.hpp +++ b/src/Particles/particles/particleIdHandler/particleIdHandler.hpp @@ -31,7 +31,12 @@ class particleIdHandler : public uint32PointField_D { - + +private: + + virtual + bool initialIdCheck()=0; + public: /// class info @@ -53,7 +58,9 @@ public: Pair getIdRange(uint32 nNewParticles)=0; virtual - bool initialIdCheck()=0; + uint32 maxId()const = 0; + + // heat change for possible insertion of particles // overrdie from internalField diff --git a/src/Particles/particles/particles.cpp b/src/Particles/particles/particles.cpp index b6a389ab..187f45da 100644 --- a/src/Particles/particles/particles.cpp +++ b/src/Particles/particles/particles.cpp @@ -78,7 +78,7 @@ pFlow::particles::particles(systemControl& control) { this->addToSubscriber(dynPointStruct_); - idHandler_().initialIdCheck(); + //idHandler_().initialIdCheck(); } bool @@ -87,7 +87,16 @@ pFlow::particles::beforeIteration() zeroForce(); zeroTorque(); - return dynPointStruct_.beforeIteration(); + if( !dynPointStruct_.beforeIteration()) + { + return false; + } + + shapeIndex_.updateBoundariesSlaveToMasterIfRequested(); + accelertion_.updateBoundariesSlaveToMasterIfRequested(); + idHandler_().updateBoundariesSlaveToMasterIfRequested(); + + return true; } bool diff --git a/src/Particles/particles/particles.hpp b/src/Particles/particles/particles.hpp index a06c5c3c..1f3c7ab1 100644 --- a/src/Particles/particles/particles.hpp +++ b/src/Particles/particles/particles.hpp @@ -181,6 +181,12 @@ public: return contactTorque_; } + inline + uint maxId()const + { + return idHandler_().maxId(); + } + bool beforeIteration() override; bool iterate() override; diff --git a/src/Particles/particles/regularParticleIdHandler/regularParticleIdHandler.cpp b/src/Particles/particles/regularParticleIdHandler/regularParticleIdHandler.cpp index 401bc281..1916f430 100644 --- a/src/Particles/particles/regularParticleIdHandler/regularParticleIdHandler.cpp +++ b/src/Particles/particles/regularParticleIdHandler/regularParticleIdHandler.cpp @@ -9,6 +9,7 @@ pFlow::regularParticleIdHandler::regularParticleIdHandler : particleIdHandler(pStruct) { + initialIdCheck(); } pFlow::Pair diff --git a/src/Particles/particles/regularParticleIdHandler/regularParticleIdHandler.hpp b/src/Particles/particles/regularParticleIdHandler/regularParticleIdHandler.hpp index 4d40261f..a237e0da 100644 --- a/src/Particles/particles/regularParticleIdHandler/regularParticleIdHandler.hpp +++ b/src/Particles/particles/regularParticleIdHandler/regularParticleIdHandler.hpp @@ -33,6 +33,7 @@ private: uint32 maxId_ = -1; + bool initialIdCheck()override; public: ClassInfo("particleIdHandler"); @@ -50,7 +51,10 @@ public: Pair getIdRange(uint32 nNewParticles)override; - bool initialIdCheck()override; + uint32 maxId()const override + { + return maxId_; + } }; diff --git a/src/phasicFlow/CMakeLists.txt b/src/phasicFlow/CMakeLists.txt index 58117785..de526e20 100644 --- a/src/phasicFlow/CMakeLists.txt +++ b/src/phasicFlow/CMakeLists.txt @@ -25,6 +25,7 @@ streams/TStream/oTstream.cpp streams/Fstream/iFstream.cpp streams/Fstream/oFstream.cpp streams/Fstream/fileStream.cpp +streams/dataIO/dataIORegulars.cpp streams/streams.cpp fileSystem/fileSystem.cpp @@ -132,14 +133,14 @@ if(pFlow_Build_MPI) message(STATUS "Zoltan lib path: ${ZOLTAN_LIBRARY}") list(APPEND SourceFiles - MPIParallelization/partitioning.cpp - MPIParallelization/rcb1DPartitioning.cpp + MPIParallelization/domain/partitioning/partitioning.cpp + MPIParallelization/domain/partitioning/rcb1DPartitioning.cpp MPIParallelization/domain/MPISimulationDomain.cpp - #MPIParallelization/dataIOMPI.cpp - MPIParallelization/procCommunication.cpp - MPIParallelization/boundaryProcessor.cpp - MPIParallelization/scatteredMasterDistributeChar.cpp - MPIParallelization/processorBoundaryFields.cpp + MPIParallelization/dataIOMPI/dataIOMPIs.cpp + MPIParallelization/MPI/procCommunication.cpp + MPIParallelization/MPI/scatteredMasterDistributeChar.cpp + MPIParallelization/pointStructure/boundaries/boundaryProcessor.cpp + MPIParallelization/pointField/processorBoundaryFields.cpp ) list(APPEND link_libs MPI::MPI_CXX ${ZOLTAN_LIBRARY} -lm ) diff --git a/src/phasicFlow/MPIParallelization/MPI/gatherMaster.hpp b/src/phasicFlow/MPIParallelization/MPI/gatherMaster.hpp new file mode 100644 index 00000000..dc87ec01 --- /dev/null +++ b/src/phasicFlow/MPIParallelization/MPI/gatherMaster.hpp @@ -0,0 +1,106 @@ +/*------------------------------- phasicFlow --------------------------------- + O C enter of + O O E ngineering and + O O M ultiscale modeling of + OOOOOOO F luid flow +------------------------------------------------------------------------------ + Copyright (C): www.cemf.ir + email: hamid.r.norouzi AT gmail.com +------------------------------------------------------------------------------ +Licence: + This file is part of phasicFlow code. It is a free software for simulating + granular and multiphase flows. You can redistribute it and/or modify it under + the terms of GNU General Public License v3 or any other later versions. + + phasicFlow is distributed to help others in their research in the field of + granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the + implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +-----------------------------------------------------------------------------*/ +#ifndef __gatherMaster_hpp__ +#define __gatherMaster_hpp__ + +#include + +#include "procCommunication.hpp" +#include "stdVectorHelper.hpp" + +namespace pFlow::MPI +{ + +template +class gatherMaster +: + public procCommunication +{ +protected: + + std::vector buffer_; + +public: + + gatherMaster(const localProcessors& procs) + : + procCommunication(procs) + {} + + span getData() + { + if(this->localMaster()) + return span( buffer_.data(), buffer_.size()); + else + return span(nullptr, 0); + } + + std::vector moveData() + { + return std::move(buffer_); + } + + bool gatherData(span data) + { + int thisN = data.size(); + + bool succss; + + procVector numElems(this->processors(), true); + procVector displ(this->processors(), true); + + if( !this->collectAllToMaster(thisN, numElems) ) + { + fatalErrorInFunction<< + "error in collecting number of elements from processors"<(0)); + + buffer_.resize(totalN); + + std::exclusive_scan( + numElems.begin(), + numElems.end(), + displ.begin(), + 0); + + auto bufferSpan = span(this->buffer_.data(),this->buffer_.size() ); + + return CheckMPI( + Gatherv( + data, + bufferSpan, + numElems.getSpan(), + displ.getSpan(), + this->localMasterNo(), + this->localCommunicator()), + false); + + } + + +}; +} + +#endif \ No newline at end of file diff --git a/src/phasicFlow/MPIParallelization/MPI/mpiCommunication.hpp b/src/phasicFlow/MPIParallelization/MPI/mpiCommunication.hpp new file mode 100644 index 00000000..4fd5e260 --- /dev/null +++ b/src/phasicFlow/MPIParallelization/MPI/mpiCommunication.hpp @@ -0,0 +1,427 @@ +/*------------------------------- phasicFlow --------------------------------- + O C enter of + O O E ngineering and + O O M ultiscale modeling of + OOOOOOO F luid flow +------------------------------------------------------------------------------ + Copyright (C): www.cemf.ir + email: hamid.r.norouzi AT gmail.com +------------------------------------------------------------------------------ +Licence: + This file is part of phasicFlow code. It is a free software for simulating + granular and multiphase flows. You can redistribute it and/or modify it under + the terms of GNU General Public License v3 or any other later versions. + + phasicFlow is distributed to help others in their research in the field of + granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the + implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +-----------------------------------------------------------------------------*/ + +#ifndef __mpiCommunication_H__ +#define __mpiCommunication_H__ + + +#include "mpiTypes.hpp" +#include "types.hpp" +#include "span.hpp" + + + +namespace pFlow::MPI +{ + +extern DataType realx3Type__; + +extern DataType realx4Type__; + +extern DataType int32x3Type__; + +template +auto constexpr Type() +{ + return MPI_BYTE; +} + +template +auto constexpr sFactor() +{ + return sizeof(T); +} + +template +auto constexpr Type() +{ + return MPI_CHAR; +} +template +auto constexpr sFactor() +{ + return 1; +} + +template +auto constexpr Type() +{ + return MPI_SHORT; +} +template +auto constexpr sFactor() +{ + return 1; +} + +template +auto constexpr Type() +{ + return MPI_UNSIGNED_SHORT; +} +template +auto constexpr sFactor() +{ + return 1; +} + +template +auto constexpr Type() +{ + return MPI_INT; +} +template +auto constexpr sFactor() +{ + return 1; +} + +template<> +auto constexpr Type() +{ + return MPI_UNSIGNED; +} +template<> +auto constexpr sFactor() +{ + return 1; +} + +template<> +auto constexpr Type() +{ + return MPI_LONG; +} +template<> +auto constexpr sFactor() +{ + return 1; +} + +template<> +auto constexpr Type() +{ + return MPI_UNSIGNED_LONG; +} +template<> +auto constexpr sFactor() +{ + return 1; +} + + +template<> +auto constexpr Type() +{ + return MPI_FLOAT; +} +template<> +auto constexpr sFactor() +{ + return 1; +} + +template<> +auto constexpr Type() +{ + return MPI_DOUBLE; +} +template<> +auto constexpr sFactor() +{ + return 1; +} + +template<> +inline +auto Type() +{ + return realx3Type__; +} + +template<> +auto constexpr sFactor() +{ + return 1; +} + +template<> +inline +auto Type() +{ + return realx4Type__; +} + +template<> +auto constexpr sFactor() +{ + return 1; +} + + +template<> +inline +auto Type() +{ + return int32x3Type__; +} + + +template<> +auto constexpr sFactor() +{ + return 1; +} + +/*inline +auto createByteSequence(int sizeOfElement) +{ + DataType newType; + MPI_Type_contiguous(sizeOfElement, MPI_CHAR, &newType); + MPI_Type_commit(&newType); + return newType; +}*/ + +inline +auto TypeCommit(DataType* type) +{ + return MPI_Type_commit(type); +} + +inline +auto TypeFree(DataType* type) +{ + return MPI_Type_free(type); + +} +template +inline auto getCount(Status* status, int& count) +{ + int lCount; + auto res = MPI_Get_count(status, Type(), &lCount); + count = lCount/sFactor(); + return res; +} + +template +inline int convertIndex(const int& ind) +{ + return ind*sFactor(); +} + +template +inline auto send(span data, int dest, int tag, Comm comm) +{ + return MPI_Send( + data.data(), + sFactor()*data().size(), + Type(), + dest, + tag, + comm); +} + +template +inline auto Isend(span data, int dest, int tag, Comm comm, Request* req) +{ + return MPI_Isend( + data.data(), + sFactor()*data.size(), + Type(), + dest, + tag, + comm, + req); +} + +template +inline auto Isend(const T& data, int dest, int tag, Comm comm, Request* req) +{ + return MPI_Isend( + &data, + sFactor(), + Type(), + dest, + tag, + comm, + req); +} + +template +inline auto recv(span data, int source, int tag, Comm comm, Status *status) +{ + return MPI_Recv( + data.data(), + sFactor()*data.size(), + Type(), + source, + tag, + comm, + status); +} + +template +inline auto Irecv(T& data, int source, int tag, Comm comm, Request* req) +{ + return MPI_Irecv( + &data, + sFactor(), + Type(), + source, + tag, + comm, + req); +} + +template +inline auto Irecv(span data, int source, int tag, Comm comm, Request* req) +{ + return MPI_Irecv( + data.data(), + sFactor()*data.size(), + Type(), + source, + tag, + comm, + req); +} + +template +inline auto scan(T sData, T& rData, Comm comm, Operation op = SumOp) +{ + return MPI_Scan(&sData, &rData, sFactor()*1, Type(), op , comm ); +} + +// gathering one scalar data to root processor +template +inline auto gather(T sendData, span& recvData, int root, Comm comm) +{ + return MPI_Gather( + &sendData, + sFactor()*1, + Type(), + recvData.data(), + sFactor()*1, + Type(), + root, + comm); +} + +template +inline auto allGather(T sendData, span& recvData, Comm comm) +{ + return MPI_Allgather( + &sendData, + sFactor()*1, + Type(), + recvData.data(), + sFactor()*1, + Type(), + comm); +} + +template +inline auto scatter(span sendData, T& recvData, int root, Comm comm) +{ + return MPI_Scatter( + sendData.data(), + sFactor()*1, + Type(), + &recvData, + sFactor()*1, + Type(), + root, + comm); +} + +template +inline auto Bcast(T& sendData, int root, Comm comm) +{ + return MPI_Bcast( + &sendData, sFactor()*1, Type(), root, comm); + +} + + +template +bool typeCreateIndexedBlock( + span index, + DataType &newType) +{ + auto res = MPI_Type_create_indexed_block( + index.size(), + sFactor(), + index.data(), + Type(), + &newType); + + if(res == Success) + { + TypeCommit(&newType); + } + else + { + return false; + } + + return true; +} + + +template +inline auto Gatherv +( + span sendData, + span& recvData, + span recvCounts, + span displs, + int root, + Comm comm) +{ + + return MPI_Gatherv( + sendData.data(), + sendData.size()*sFactor(), + Type(), + recvData.data(), + recvCounts.data(), + displs.data(), + Type(), + root, + comm + ); + +} + +inline auto Wait(Request* request, Status* status) +{ + return MPI_Wait(request, status); +} + +inline auto typeFree(DataType& type) +{ + return MPI_Type_free(&type); +} + + +} + + +#endif //__mpiCommunication_H__ diff --git a/src/phasicFlow/MPIParallelization/MPI/mpiTypes.hpp b/src/phasicFlow/MPIParallelization/MPI/mpiTypes.hpp new file mode 100644 index 00000000..873dd7eb --- /dev/null +++ b/src/phasicFlow/MPIParallelization/MPI/mpiTypes.hpp @@ -0,0 +1,69 @@ +/*------------------------------- phasicFlow --------------------------------- + O C enter of + O O E ngineering and + O O M ultiscale modeling of + OOOOOOO F luid flow +------------------------------------------------------------------------------ + Copyright (C): www.cemf.ir + email: hamid.r.norouzi AT gmail.com +------------------------------------------------------------------------------ +Licence: + This file is part of phasicFlow code. It is a free software for simulating + granular and multiphase flows. You can redistribute it and/or modify it under + the terms of GNU General Public License v3 or any other later versions. + + phasicFlow is distributed to help others in their research in the field of + granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the + implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +-----------------------------------------------------------------------------*/ + +#ifndef __mpiTypes_H__ +#define __mpiTypes_H__ + + + +#include + +namespace pFlow::MPI +{ + // types + using Comm = MPI_Comm; + using Group = MPI_Group; + using Status = MPI_Status; + using Offset = MPI_Offset; + using Request = MPI_Request; + using Operation = MPI_Op; + using Information = MPI_Info; + using DataType = MPI_Datatype; + + inline Comm CommWorld = MPI_COMM_WORLD; + + // all nulls + + inline auto ProcNull = MPI_PROC_NULL; + inline auto InfoNull = MPI_INFO_NULL; + inline auto RequestNull = MPI_REQUEST_NULL; + inline auto StatusIgnore = MPI_STATUS_IGNORE; + inline auto StatusesIgnore = MPI_STATUSES_IGNORE; + inline auto FileNull = MPI_FILE_NULL; + inline Comm CommNull = MPI_COMM_NULL; + inline auto TypeNull = MPI_DATATYPE_NULL; + + // errors + inline const auto Success = MPI_SUCCESS; + inline const auto ErrOp = MPI_ERR_OP; + + inline const auto SumOp = MPI_SUM; + + inline const size_t MaxNoProcessors = 2048; + +} + + + + + + + +#endif //__mpiTypes_H__ diff --git a/src/phasicFlow/MPIParallelization/MPI/procCommunication.cpp b/src/phasicFlow/MPIParallelization/MPI/procCommunication.cpp new file mode 100644 index 00000000..81869453 --- /dev/null +++ b/src/phasicFlow/MPIParallelization/MPI/procCommunication.cpp @@ -0,0 +1,30 @@ +/*------------------------------- phasicFlow --------------------------------- + O C enter of + O O E ngineering and + O O M ultiscale modeling of + OOOOOOO F luid flow +------------------------------------------------------------------------------ + Copyright (C): www.cemf.ir + email: hamid.r.norouzi AT gmail.com +------------------------------------------------------------------------------ +Licence: + This file is part of phasicFlow code. It is a free software for simulating + granular and multiphase flows. You can redistribute it and/or modify it under + the terms of GNU General Public License v3 or any other later versions. + + phasicFlow is distributed to help others in their research in the field of + granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the + implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +-----------------------------------------------------------------------------*/ + +#include "procCommunication.hpp" + + +pFlow::MPI::procCommunication::procCommunication +( + const localProcessors& proc +) +: + processors_(proc) +{} diff --git a/src/phasicFlow/MPIParallelization/MPI/procCommunication.hpp b/src/phasicFlow/MPIParallelization/MPI/procCommunication.hpp new file mode 100644 index 00000000..80c0f513 --- /dev/null +++ b/src/phasicFlow/MPIParallelization/MPI/procCommunication.hpp @@ -0,0 +1,178 @@ +/*------------------------------- phasicFlow --------------------------------- + O C enter of + O O E ngineering and + O O M ultiscale modeling of + OOOOOOO F luid flow +------------------------------------------------------------------------------ + Copyright (C): www.cemf.ir + email: hamid.r.norouzi AT gmail.com +------------------------------------------------------------------------------ +Licence: + This file is part of phasicFlow code. It is a free software for simulating + granular and multiphase flows. You can redistribute it and/or modify it under + the terms of GNU General Public License v3 or any other later versions. + + phasicFlow is distributed to help others in their research in the field of + granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the + implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +-----------------------------------------------------------------------------*/ +#ifndef __procCommunication_hpp__ +#define __procCommunication_hpp__ + + +#include "procVector.hpp" +#include "localProcessors.hpp" +#include "mpiCommunication.hpp" + +namespace pFlow::MPI +{ + + +class procCommunication +{ +protected: + + const localProcessors& processors_; + +public: + + procCommunication(const localProcessors& proc); + + ~procCommunication()=default; + + /// @brief Tell if this processor is master processor in the local + /// communicator + /// @return true if this processor is master + + inline + const auto& processors()const + { + return processors_; + } + + inline + bool localMaster()const + { + return processors_.localMaster();; + } + + inline + auto localSize()const + { + return processors_.localSize(); + } + + inline + auto localRank()const + { + return processors_.localRank(); + } + + inline + auto localCommunicator()const + { + return processors_.localCommunicator(); + } + + /// @brief return the master number in the local communicator + auto localMasterNo()const + { + return processors_.localMasterNo(); + } + + /// Send a single val to all processors including itself (local communicator) + template + std::pair distributeMasterToAll(const T& val) + { + + T retVal = val; + auto res = CheckMPI( + Bcast(retVal, localMasterNo(),localCommunicator() ), + false); + + return {retVal, res}; + } + + /// @brief Send a single value to all processor including master (in local communicator) + /// @param val value to be sent + /// @param recvVal recieved value + /// @return true if successful and false if fail + template + bool distributeMasterToAll(const T& val, T& recvVal) + { + recvVal = val; + return CheckMPI( + Bcast(recvVal, localMasterNo(), localCommunicator()), + false); + } + + /// @brief values in the vector (size is equal to number of + // processors in local communicator) to each processor + template + std::pair distributeMasterToAll(const procVector& vals) + { + T val; + auto vec = vals.getSpan(); + auto res = CheckMPI( + scatter(vec, val, localMasterNo(), localCommunicator()), + false); + + return {val, res}; + } + + /// @brief Each processor in the local communicator calls this funtion with a value + /// and the values are distributed among all processors + template + std::pair, bool> collectAllToAll(const T& val) + { + procVector allVec(processors_); + auto vec = allVec.getSpan(); + auto res = CheckMPI( + allGather(val, vec, localCommunicator()), + false); + return {allVec, res}; + } + + /// @brief Each processor in the local communicator calls this funtion with a value + /// and the values are distributed among all processors + template + bool collectAllToAll(const T& val, procVector& allVec) + { + auto vec = allVec.getSpan(); + return CheckMPI( + allGather(val, vec, localCommunicator()), + false); + } + + /// @brief Each processor in the local communicator calls this function with a value + /// and all values are collected in the master processor + template + std::pair,bool> collectAllToMaster(const T& val) + { + // only on master processor + procVector masterVec(processors_, true); + + auto masterSpan = masterVec.getSpan(); + auto res = CheckMPI( + gather(val,masterSpan, localMasterNo(), localCommunicator()), + false); + + return {masterVec, res}; + + } + + template + bool collectAllToMaster(const T& val, procVector& masterVec) + { + // only on master processor + auto [vec, res] = collectAllToMaster(val); + masterVec = vec; + return res; + } + +}; //procCommunication + +} // pFlow::MPI + +#endif //__procCommunication_hpp__ diff --git a/src/phasicFlow/MPIParallelization/MPI/procVector.hpp b/src/phasicFlow/MPIParallelization/MPI/procVector.hpp new file mode 100644 index 00000000..f9a80037 --- /dev/null +++ b/src/phasicFlow/MPIParallelization/MPI/procVector.hpp @@ -0,0 +1,199 @@ +#ifndef __procVector_hpp__ +#define __procVector_hpp__ + +// from PhasicFlow + +#include "localProcessors.hpp" +#include "span.hpp" +#include "streams.hpp" +#include "IOPattern.hpp" + +#include "mpiTypes.hpp" + +namespace pFlow::MPI +{ + +template +class procVector +: + public std::vector +{ +public: + + using ProcVectorType = procVector; + + using VectorType = std::vector; + +protected: + + int rank_ = 0; + + bool isMaster_ = false; + + using VectorType::reserve; + + using VectorType::resize; + + using VectorType::assign; + + using VectorType::clear; + + using VectorType::erase; + +public: + + procVector( + const localProcessors& procs, + bool onlyMaster = false) + : + rank_(procs.localRank()), + isMaster_(procs.localMaster()) + { + + if( onlyMaster && !isMaster_ ) return; + this->reserve(procs.localSize()); + this->resize(procs.localSize()); + } + + procVector( + const T& val, + const localProcessors& procs, + bool onlyMaster = false) + : + procVector(procs, onlyMaster) + { + std::fill(this->begin(), this->end(), val); + } + + procVector(const T& val, const procVector& src) + { + this->reserve(src.size()); + this->resize(src.size()); + std::fill(this->begin(), this->end(), val); + } + + procVector(const localProcessors& procs, const VectorType& src) + : + procVector(procs) + { + if(src.size()!= this->size()) + { + fatalErrorInFunction<< + "Size of std::vector and procVector does not match in construction"<assign(src.begin(), src.end()); + } + + procVector(const procVector&) = default; + + procVector(procVector&&) = default; + + procVector& operator=(const procVector&) = default; + + procVector& operator=(procVector&&) = default; + + procVector& operator=(const VectorType& src) + { + if(src.size() != this->size()) + { + fatalErrorInFunction<< + "Size of std::vector and procVector does not match in copy assignment"<(*this).operator=(src); + return *this; + } + + procVector& operator=(VectorType&& src) + { + if(src.size() != this->size()) + { + fatalErrorInFunction<< + "Size of std::vector and procVector does not match in move assignment" + <(*this).operator=(std::move(src)); + return *this; + } + + procVector(const localProcessors& procs, VectorType&& src) + : + VectorType(std::move(src)) + { + if(this->size()!= static_cast(procs.localSize())) + { + fatalErrorInFunction<< + "Size of std::vector and procVector does not match in move"<(this->data(), this->size()); + } + + inline + auto getSpan()const + { + return span(const_cast(this->data()), this->size()); + } + + bool write( + iOstream& os, + const IOPattern& iop ) const + { + return writeStdVector(os, *this, iop); + } + +}; + +template +inline iOstream& operator << (iOstream& os, const procVector& ovec ) +{ + if( !ovec.write(os, IOPattern::AllProcessorsDifferent) ) + { + ioErrorInFile(os.name(), os.lineNumber()); + fatalExit; + } + return os; +} + +} + + +#endif diff --git a/src/phasicFlow/MPIParallelization/MPI/scatteredMasterDistribute.cpp b/src/phasicFlow/MPIParallelization/MPI/scatteredMasterDistribute.cpp new file mode 100644 index 00000000..a771dc54 --- /dev/null +++ b/src/phasicFlow/MPIParallelization/MPI/scatteredMasterDistribute.cpp @@ -0,0 +1,158 @@ + + +template +pFlow::MPI::scatteredMasterDistribute::scatteredMasterDistribute +( + const localProcessors& procs +) +: + procCommunication(procs), + indexedMap_(TypeNull, procs, true) +{ + +} + +template +bool pFlow::MPI::scatteredMasterDistribute::setDataMaps +( + procVector>& maps +) +{ + if(this->localMaster()) + { + if(maps.size() != this->localSize() ) + { + fatalErrorInFunction<<"size mismatch"; + return false; + } + + std::vector index; + + freeIndexedMap(); + + for(auto proc = 0; proc< maps.size(); proc++) + { + auto m = maps[proc]; + index.resize(m.size()); + for(auto i=0; i( makeSpan(index), dt)) + { + fatalErrorInFunction; + return false; + } + else + { + indexedMap_[proc] = dt; + } + } + } + return true; +} + + +template +bool pFlow::MPI::scatteredMasterDistribute::setDataMaps +( + procVector>& maps +) +{ + if(this->localMaster()) + { + if(maps.size() != this->localSize() ) + { + fatalErrorInFunction<<"size mismatch"; + return false; + } + + freeIndexedMap(); + + + for(auto proc = 0; proc< maps.size(); proc++) + { + DataType dt; + if( !typeCreateIndexedBlock(maps[proc], dt) ) + { + fatalErrorInFunction; + return false; + } + else + { + indexedMap_[proc] = dt; + } + } + } + return true; +} + +template +void pFlow::MPI::scatteredMasterDistribute::freeIndexedMap() +{ + for(auto i=0; i +bool pFlow::MPI::scatteredMasterDistribute::distribute +( + span& sendBuff, + span& recvb +) +{ + procVector requests(processors(), true); + procVector statuses(processors(), true); + + if(this->localMaster()) + { + bool res = true; + for(int32 i = indexedMap_.size()-1; i>=0; i--) + { + res = res&&CheckMPI( + MPI_Issend( + sendBuff.data(), + 1, + indexedMap_[i], + i, + 0, + localCommunicator(), + &requests[i]), + false); + } + + if(!res)return false; + } + + Status stat; + bool sucss = CheckMPI( + MPI_Recv( + recvb.data(), + recvb.size()*sFactor(), + Type(), + 0, + 0, + localCommunicator(), + &stat), + false); + + if(this->localMaster()) + { + CheckMPI( + MPI_Waitall(requests.size(), requests.data(), statuses.data()), + false + ); + } + + return sucss; +} \ No newline at end of file diff --git a/src/phasicFlow/MPIParallelization/MPI/scatteredMasterDistribute.hpp b/src/phasicFlow/MPIParallelization/MPI/scatteredMasterDistribute.hpp new file mode 100644 index 00000000..146ce56c --- /dev/null +++ b/src/phasicFlow/MPIParallelization/MPI/scatteredMasterDistribute.hpp @@ -0,0 +1,67 @@ +/*------------------------------- phasicFlow --------------------------------- + O C enter of + O O E ngineering and + O O M ultiscale modeling of + OOOOOOO F luid flow +------------------------------------------------------------------------------ + Copyright (C): www.cemf.ir + email: hamid.r.norouzi AT gmail.com +------------------------------------------------------------------------------ +Licence: + This file is part of phasicFlow code. It is a free software for simulating + granular and multiphase flows. You can redistribute it and/or modify it under + the terms of GNU General Public License v3 or any other later versions. + + phasicFlow is distributed to help others in their research in the field of + granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the + implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +-----------------------------------------------------------------------------*/ + +#ifndef __scatteredMasterDistribute_hpp__ +#define __scatteredMasterDistribute_hpp__ + +#include "mpiCommunication.hpp" +#include "procCommunication.hpp" +#include "procVector.hpp" +#include "stdVectorHelper.hpp" +#include "streams.hpp" + +namespace pFlow::MPI +{ + +template +class scatteredMasterDistribute : public procCommunication +{ +protected: + + procVector indexedMap_; + + void freeIndexedMap(); + +public: + + scatteredMasterDistribute(const localProcessors& procs); + + ~scatteredMasterDistribute() + { + freeIndexedMap(); + } + + scatteredMasterDistribute(const scatteredMasterDistribute&) = delete; + + scatteredMasterDistribute& operator=(const scatteredMasterDistribute&) = + delete; + + bool setDataMaps(procVector>& maps); + + bool setDataMaps(procVector>& maps); + + bool distribute(span& sendBuff, span& recvb); +}; + +} // pFlow::MPI + +#include "scatteredMasterDistribute.cpp" + +#endif //__scatteredMasterDistribute_hpp__ diff --git a/src/phasicFlow/MPIParallelization/MPI/scatteredMasterDistributeChar.cpp b/src/phasicFlow/MPIParallelization/MPI/scatteredMasterDistributeChar.cpp new file mode 100644 index 00000000..7579e8d5 --- /dev/null +++ b/src/phasicFlow/MPIParallelization/MPI/scatteredMasterDistributeChar.cpp @@ -0,0 +1,166 @@ + +#include "scatteredMasterDistributeChar.hpp" + +pFlow::MPI::scatteredMasterDistribute::scatteredMasterDistribute +( + size_t sizeOfElement, + const localProcessors& procs +) +: + procCommunication(procs), + indexedMap_(TypeNull, procs, true), + sizeOfElement_(sizeOfElement) +{} + + +bool pFlow::MPI::scatteredMasterDistribute::setDataMaps +( + procVector>& maps +) +{ + if(this->localMaster()) + { + if(maps.size() != this->localSize() ) + { + fatalErrorInFunction<<"size mismatch"; + return false; + } + + freeIndexedMap(); + + std::vector index; + + for(auto proc = 0; proc< maps.size(); proc++) + { + auto m = maps[proc]; + index.resize(m.size()); + for(auto i=0; i::setDataMaps +( + procVector>& maps +) +{ + if(this->localMaster()) + { + if(maps.size() != this->localSize() ) + { + fatalErrorInFunction<<"size mismatch"; + return false; + } + + std::vector index; + freeIndexedMap(); + + for(auto proc = 0; proc< maps.size(); proc++) + { + + auto m = maps[proc]; + index.resize(m.size()); + for(auto i=0; i::freeIndexedMap() +{ + for(auto i=0; i::distribute +( + span& sendBuff, + span& recvb +) +{ + procVector requests(processors(), true); + procVector statuses(processors(), true); + + + if(this->localMaster()) + { + bool res = true; + for(int32 i = indexedMap_.size()-1; i>=0; i--) + { + res = res&&CheckMPI( + MPI_Issend( + sendBuff.data(), + 1, + indexedMap_[i], + i, + 0, + localCommunicator(), + &requests[i]), + false); + } + + if(!res)return false; + } + + Status stat; + bool sucss = CheckMPI( + MPI_Recv( + recvb.data(), + recvb.size(), + MPI_CHAR, + 0, + 0, + localCommunicator(), + &stat), + true); + + if(this->localMaster()) + { + CheckMPI( + MPI_Waitall(requests.size(), requests.data(), statuses.data()), + false + ); + } + + return sucss; +} \ No newline at end of file diff --git a/src/phasicFlow/MPIParallelization/MPI/scatteredMasterDistributeChar.hpp b/src/phasicFlow/MPIParallelization/MPI/scatteredMasterDistributeChar.hpp new file mode 100644 index 00000000..0ea1a770 --- /dev/null +++ b/src/phasicFlow/MPIParallelization/MPI/scatteredMasterDistributeChar.hpp @@ -0,0 +1,66 @@ +/*------------------------------- phasicFlow --------------------------------- + O C enter of + O O E ngineering and + O O M ultiscale modeling of + OOOOOOO F luid flow +------------------------------------------------------------------------------ + Copyright (C): www.cemf.ir + email: hamid.r.norouzi AT gmail.com +------------------------------------------------------------------------------ +Licence: + This file is part of phasicFlow code. It is a free software for simulating + granular and multiphase flows. You can redistribute it and/or modify it under + the terms of GNU General Public License v3 or any other later versions. + + phasicFlow is distributed to help others in their research in the field of + granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the + implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +-----------------------------------------------------------------------------*/ + +#ifndef __scatteredMasterDistributeChar_hpp__ +#define __scatteredMasterDistributeChar_hpp__ + +#include "scatteredMasterDistribute.hpp" + +namespace pFlow::MPI +{ + +template<> +class scatteredMasterDistribute : public procCommunication +{ +protected: + + procVector indexedMap_; + + size_t sizeOfElement_; + + void freeIndexedMap(); + +public: + + scatteredMasterDistribute( + size_t sizeOfElement, + const localProcessors& procs + ); + + ~scatteredMasterDistribute() + { + freeIndexedMap(); + } + + scatteredMasterDistribute(const scatteredMasterDistribute&) = delete; + + scatteredMasterDistribute& operator=(const scatteredMasterDistribute&) = + delete; + + bool setDataMaps(procVector>& maps); + + bool setDataMaps(procVector>& maps); + + bool distribute(span& sendBuff, span& recvb); +}; + +} // pFlow::MPI + +#endif //__scatteredMasterDistributeChar_hpp__ diff --git a/src/phasicFlow/MPIParallelization/dataIOMPI/dataIOMPI.cpp b/src/phasicFlow/MPIParallelization/dataIOMPI/dataIOMPI.cpp new file mode 100644 index 00000000..eb5e074c --- /dev/null +++ b/src/phasicFlow/MPIParallelization/dataIOMPI/dataIOMPI.cpp @@ -0,0 +1,52 @@ + +template +bool pFlow::MPI::dataIOMPI::gatherData(span data ) +{ + + if(this->ioPattern_.isAllProcessorsDifferent()) + { + this->bufferSpan_ = data; + return true; + } + + if( this->ioPattern_.isMasterProcessorDistribute()) + { + + auto gatherT = pFlow::MPI::gatherMaster(pFlowProcessors()); + + if(!gatherT.gatherData(data)) + { + fatalErrorInFunction<<"Error in gathering data to master"<buffer_ = gatherT.moveData(); + this->bufferSpan_ = span(this->buffer_.data(),this->buffer_.size() ); + + return true; + + } + + if( this->ioPattern_.isMasterProcessorOnly() || this->ioPattern_.isAllProcessorSimilar() ) + { + if( this->ioPattern_.isMaster() ) + { + this->bufferSpan_ = data; + return true; + } + else + { + this->bufferSpan_ = span(nullptr, 0); + return true; + } + } + + return false; + +} + +template +pFlow::MPI::dataIOMPI::dataIOMPI(const IOPattern& iop) +: + dataIO(iop) +{} \ No newline at end of file diff --git a/src/phasicFlow/MPIParallelization/dataIOMPI/dataIOMPI.hpp b/src/phasicFlow/MPIParallelization/dataIOMPI/dataIOMPI.hpp new file mode 100644 index 00000000..1bfeb94d --- /dev/null +++ b/src/phasicFlow/MPIParallelization/dataIOMPI/dataIOMPI.hpp @@ -0,0 +1,58 @@ +#ifndef __datIOMPI_hpp__ +#define __datIOMPI_hpp__ + +#include "dataIO.hpp" +#include "pFlowProcessors.hpp" +#include "gatherMaster.hpp" + + +namespace pFlow::MPI +{ + +template +class dataIOMPI +: + public dataIO +{ +public: + + using DataIOType = dataIO; + + using DataIOMPIType = dataIOMPI; + +protected: + + bool gatherData(span data ) override; + +public: + + TypeInfoTemplate111("dataIO",T,"MPI"); + + explicit dataIOMPI(const IOPattern& iop); + + dataIOMPI(const dataIOMPI&) = default; + + dataIOMPI(dataIOMPI&&) = default; + + + dataIOMPI& operator=(const dataIOMPI&) = default; + + dataIOMPI& operator=(dataIOMPI&&) = default; + + ~dataIOMPI() = default; + + add_vCtor + ( + DataIOType, + DataIOMPIType, + IOPattern + ); + +}; //dataIOMPI + + +} //namespace pFlow::MPI + +#include "dataIOMPI.cpp" + +#endif //__datIOMPI_hpp__ \ No newline at end of file diff --git a/src/phasicFlow/MPIParallelization/dataIOMPI/dataIOMPIs.cpp b/src/phasicFlow/MPIParallelization/dataIOMPI/dataIOMPIs.cpp new file mode 100644 index 00000000..73d307f2 --- /dev/null +++ b/src/phasicFlow/MPIParallelization/dataIOMPI/dataIOMPIs.cpp @@ -0,0 +1,27 @@ + +#include "types.hpp" +#include "dataIOMPI.hpp" + + +template class pFlow::MPI::dataIOMPI; + +template class pFlow::MPI::dataIOMPI; + +template class pFlow::MPI::dataIOMPI; + +template class pFlow::MPI::dataIOMPI; + +template class pFlow::MPI::dataIOMPI; +template class pFlow::MPI::dataIOMPI; + +template class pFlow::MPI::dataIOMPI; + +template class pFlow::MPI::dataIOMPI; + +template class pFlow::MPI::dataIOMPI; + +template class pFlow::MPI::dataIOMPI; + +template class pFlow::MPI::dataIOMPI; + +template class pFlow::MPI::dataIOMPI; \ No newline at end of file diff --git a/src/phasicFlow/MPIParallelization/domain/MPISimulationDomain.cpp b/src/phasicFlow/MPIParallelization/domain/MPISimulationDomain.cpp new file mode 100644 index 00000000..3e23d15f --- /dev/null +++ b/src/phasicFlow/MPIParallelization/domain/MPISimulationDomain.cpp @@ -0,0 +1,450 @@ +/*------------------------------- phasicFlow --------------------------------- + O C enter of + O O E ngineering and + O O M ultiscale modeling of + OOOOOOO F luid flow +------------------------------------------------------------------------------ + Copyright (C): www.cemf.ir + email: hamid.r.norouzi AT gmail.com +------------------------------------------------------------------------------ +Licence: + This file is part of phasicFlow code. It is a free software for simulating + granular and multiphase flows. You can redistribute it and/or modify it under + the terms of GNU General Public License v3 or any other later versions. + + phasicFlow is distributed to help others in their research in the field of + granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the + implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +-----------------------------------------------------------------------------*/ + +#include "MPISimulationDomain.hpp" +#include "systemControl.hpp" +#include "rcb1DPartitioning.hpp" +#include "scatteredMasterDistribute.hpp" +#include "scatteredMasterDistributeChar.hpp" + +pFlow::MPI::MPISimulationDomain::MPISimulationDomain(systemControl& control) +: + simulationDomain(control), + communication_(pFlowProcessors()), + subDomainsAll_(pFlowProcessors()), + numPointsAll_(pFlowProcessors()), + domainPartitioning_( makeUnique(subDict("decomposition"), globalBox())) +{} + +bool pFlow::MPI::MPISimulationDomain::createBoundaryDicts() +{ + auto& boundaries = this->subDict("boundaries"); + + this->addDict("MPIBoundaries", boundaries); + auto& mpiBoundaries = this->subDict("MPIBoundaries"); + + real neighborLength = boundaries.getVal("neighborLength"); + + auto neighbors = findPlaneNeighbors(); + + for(uint32 i=0; i("type") == "periodic") + { + fatalErrorInFunction<< + "periodic is not implemented "<localBox()); + uint32 thisNumPoints = initialNumberInThis(); + + if(!communication_.collectAllToAll(thisNumPoints, numPointsAll_)) + { + fatalErrorInFunction<< + "Failed to distribute number of points."< pFlow::MPI::MPISimulationDomain::findPlaneNeighbors() const +{ + + std::vector neighbors(sizeOfBoundaries(), -2); + domain gDomain(globalBox()); + + // left + if( thisDomain_.left().parallelTouch( gDomain.left() ) ) + { + neighbors[0] = -1; + } + + for(int i=0; isubDict("MPIBoundaries"); +} + +bool pFlow::MPI::MPISimulationDomain::initialUpdateDomains(span pointPos) +{ + pFlagTypeHost flags(pointPos.size(), 0 , pointPos.size()); + initialNumPoints_ = pointPos.size(); + if( !domainPartitioning_->partition(pointPos, flags) ) + { + fatalErrorInFunction<< + "Point partitioning failed."<numberImportThisProc(); + uint32 numExport = domainPartitioning_->numberExportThisProc(); + return max(initialNumPoints_+ numImport - numExport, 0u); +} + +bool pFlow::MPI::MPISimulationDomain::initialTransferBlockData +( + span src, + span dst, + size_t sizeOfElement +)const +{ + MPI::scatteredMasterDistribute dataDist(sizeOfElement, pFlowProcessors()); + + auto lists = domainPartitioning_->allExportLists(); + + if(!dataDist.setDataMaps( lists )) + { + fatalErrorInFunction; + return false; + } + + if(!dataDist.distribute(src, dst)) + { + fatalErrorInFunction<< + "Error in distribute"< src, + span dst +)const +{ + + MPI::scatteredMasterDistribute + dataDist(pFlowProcessors()); + auto lists = domainPartitioning_->allExportLists(); + + if(!dataDist.setDataMaps( lists )) + { + fatalErrorInFunction; + return false; + } + + if(!dataDist.distribute(src, dst)) + { + fatalErrorInFunction<< + "Error in distribute"< src, + span dst +)const +{ + MPI::scatteredMasterDistribute + dataDist(pFlowProcessors()); + + auto lists = domainPartitioning_->allExportLists(); + + if(!dataDist.setDataMaps( lists )) + { + fatalErrorInFunction; + return false; + } + + if(!dataDist.distribute(src, dst)) + { + fatalErrorInFunction<< + "Error in distribute"< src, + span dst +)const +{ + MPI::scatteredMasterDistribute + dataDist(pFlowProcessors()); + + auto lists = domainPartitioning_->allExportLists(); + + if(!dataDist.setDataMaps( lists )) + { + fatalErrorInFunction; + return false; + } + + if(!dataDist.distribute(src, dst)) + { + fatalErrorInFunction<< + "Error in distribute"< src, + span dst +)const +{ + MPI::scatteredMasterDistribute + dataDist(pFlowProcessors()); + + auto lists = domainPartitioning_->allExportLists(); + + if(!dataDist.setDataMaps( lists )) + { + fatalErrorInFunction; + return false; + } + + if(!dataDist.distribute(src, dst)) + { + fatalErrorInFunction<< + "Error in distribute"<numberImportThisProc(); +} + +pFlow::uint32 pFlow::MPI::MPISimulationDomain::numberToBeExported() const +{ + return domainPartitioning_->numberExportThisProc(); +} + +bool +pFlow::MPI::MPISimulationDomain::domainActive() const +{ + return thisDomainActive_; +} + +const pFlow::domain& +pFlow::MPI::MPISimulationDomain::thisDomain() const +{ + return thisDomain_; +} diff --git a/src/phasicFlow/MPIParallelization/domain/MPISimulationDomain.hpp b/src/phasicFlow/MPIParallelization/domain/MPISimulationDomain.hpp new file mode 100644 index 00000000..bab83611 --- /dev/null +++ b/src/phasicFlow/MPIParallelization/domain/MPISimulationDomain.hpp @@ -0,0 +1,118 @@ +/*------------------------------- phasicFlow --------------------------------- + O C enter of + O O E ngineering and + O O M ultiscale modeling of + OOOOOOO F luid flow +------------------------------------------------------------------------------ + Copyright (C): www.cemf.ir + email: hamid.r.norouzi AT gmail.com +------------------------------------------------------------------------------ +Licence: + This file is part of phasicFlow code. It is a free software for simulating + granular and multiphase flows. You can redistribute it and/or modify it under + the terms of GNU General Public License v3 or any other later versions. + + phasicFlow is distributed to help others in their research in the field of + granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the + implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +-----------------------------------------------------------------------------*/ +#ifndef __MPISimulationDomain_hpp__ +#define __MPISimulationDomain_hpp__ + +#include "partitioning.hpp" +#include "procCommunication.hpp" +#include "procVector.hpp" +#include "simulationDomain.hpp" + +namespace pFlow::MPI +{ + +class MPISimulationDomain : public simulationDomain +{ +private: + + /// a processor communcator for simulation domain + procCommunication communication_; + + /// sub-domain (thisDomain_ for all processors) + procVector subDomainsAll_; + + /// number of points in all processors + procVector numPointsAll_; + + /// partitioning object + uniquePtr domainPartitioning_ = nullptr; + + /// the acutal limits of the simulation domain in this processor + domain thisDomain_; + + uint32 initialNumPoints_ = 0; + + bool thisDomainActive_ = false; + + bool createBoundaryDicts() final; + + bool setThisDomain() final; + + std::vector findPlaneNeighbors() const; + +public: + + TypeInfo("simulationDomain"); + + explicit MPISimulationDomain(systemControl& control); + + ~MPISimulationDomain() final = default; + + add_vCtor + ( + simulationDomain, + MPISimulationDomain, + systemControl + ); + + const dictionary& thisBoundaryDict() const final; + + /// @brief + /// @param pointPos + /// @return + bool initialUpdateDomains(span pointPos) final; + + /// @brief + /// @return + uint32 initialNumberInThis() const final; + + bool initialTransferBlockData( + span src, + span dst, + size_t sizeOfElement + ) const final; + + bool initialTransferBlockData(span src, span dst) + const final; + + bool initialTransferBlockData(span src, span dst) + const final; + + bool initialTransferBlockData(span src, span dst) + const final; + + bool initialTransferBlockData(span src, span dst) + const final; + + uint32 numberToBeImported() const final; + + uint32 numberToBeExported() const final; + + /// @brief Is this domain active? + /// Active mean, there is particle in it and + /// boundaries and other entities of simulation domains are valid + bool domainActive() const final; + + const domain& thisDomain()const final; +}; + +} // namespace pFlow::MPI + +#endif // \ No newline at end of file diff --git a/src/phasicFlow/MPIParallelization/domain/partitioning/partitioning.cpp b/src/phasicFlow/MPIParallelization/domain/partitioning/partitioning.cpp new file mode 100644 index 00000000..0ae5cf82 --- /dev/null +++ b/src/phasicFlow/MPIParallelization/domain/partitioning/partitioning.cpp @@ -0,0 +1,113 @@ + + +#include "partitioning.hpp" +#include "error.hpp" +#include "streams.hpp" + +void pFlow::partitioning::freeZoltan() +{ + if(validPointers_) + { + Zoltan::LB_Free_Part(&importGlobalGids_, &importLocalGids_, + &importProcs_, &importToPart_); + + Zoltan::LB_Free_Part(&exportGlobalGids_, &exportLocalGids_, + &exportProcs_, &exportToPart_); + validPointers_ = false; + } + + zoltan_.release(); +} + + +pFlow::partitioning::partitioning +( + const dictionary& dict, + const box& globalBox +) +: + globalBox_(globalBox) +{ + if(!zoltanInitialized__) + { + auto rc = Zoltan_Initialize + ( + processors::argc(), + processors::argv(), + &version_ + ); + + if (rc != ZOLTAN_OK) + { + fatalErrorInFunction<<"Cannot initialize zoltan"<(pFlowProcessors().localCommunicator()); + + zoltan_->Set_Param("DEBUG_LEVEL", "0"); + zoltan_->Set_Param("LB_METHOD", "RCB"); + zoltan_->Set_Param("NUM_GID_ENTRIES", "1"); + zoltan_->Set_Param("NUM_LID_ENTRIES", "1"); + zoltan_->Set_Param("OBJ_WEIGHT_DIM", "0"); + zoltan_->Set_Param("RETURN_LISTS", "ALL"); + +} + +bool pFlow::partitioning::partition(span points, pFlagTypeHost flags) +{ + pointCollection pointCollctn{points, flags}; + + return partition(pointCollctn); +} +int GetObjectSize +( + void *data, + int num_gid_entries, + int num_lid_entries, + ZOLTAN_ID_PTR global_id, + ZOLTAN_ID_PTR local_id, + int *ierr +) +{ + *ierr = ZOLTAN_OK; + pFlow::uint32 s = *(static_cast(data)); + return static_cast(s); +} + +void PackObject +( + void *data, + int num_gid_entries, + int num_lid_entries, + ZOLTAN_ID_PTR global_id, + ZOLTAN_ID_PTR local_id, + int dest, + int size, + char *buf, + int *ierr +) +{ + +} + +bool pFlow::partitioning::migrateData(span src, span dst, uint32 elementSize) +{ + dataCollection data{src, dst, elementSize}; + + zoltan_->Set_Obj_Size_Fn(GetObjectSize, &elementSize); + return false; +} + +pFlow::partitioning::~partitioning() +{ + freeZoltan(); +} + +void pFlow::partitioning::printBox()const +{ + pOutput<< "localBox:" << localBox_< points_; + pFlagTypeHost pFlag_; + + uint32 numActivePoints()const + { + return pFlag_.numActive(); + } +}; + +struct dataCollection +{ + span srcData_; + span dstData_; + uint32 elementSize_; +}; + +class partitioning +{ +protected: + + float version_ = 0.0; + + std::unique_ptr zoltan_ = nullptr; + + bool validPointers_ = false; + + box globalBox_; + + box localBox_; + + int32 changes_, numImport_, numExport_; + + id_t *importGlobalGids_, *importLocalGids_, *exportGlobalGids_, *exportLocalGids_; + + int32 *importProcs_, *importToPart_, *exportProcs_, *exportToPart_; + + uint32 numBeforePartition_ = 0 ; + + static inline bool zoltanInitialized__ = false; + + void freeZoltan(); + + virtual + bool partition(pointCollection& points) = 0; + +public: + + partitioning( + const dictionary& dict, + const box& globalBox); + + virtual + ~partitioning(); + + create_vCtor( + partitioning, + dictionary, + ( + const dictionary& dict, + const box& globalBox + ), + (dict, globalBox)); + + bool partition( + span points, + pFlagTypeHost flags); + + + bool migrateData(span src, span dst, uint32 elementSize); + + inline + auto localBox()const + { + return localBox_; + } + + inline + const auto& globalBox()const + { + return globalBox_; + } + + inline + bool partitionsChanged()const + { + return changes_ == 1; + } + + + uint32 numberImportThisProc()const + { + return numImport_; + } + + uint32 numberExportThisProc()const + { + return numExport_; + } + + virtual + span exportList(int procNo)const = 0; + + virtual + pFlow::MPI::procVector> allExportLists()const=0; + + void printBox()const; + + +}; + + +} + + +#endif //__partitioning_hpp__ + + + +/*static + int getNumberOfPoints(void *data, int32 *ierr); + + static + void getPointList( + void *data, + int32 sizeGID, + int32 sizeLID, + id_t* globalID, + id_t* localID, + int32 wgt_dim, + float *obj_wgts, + int32 *ierr);*/ \ No newline at end of file diff --git a/src/phasicFlow/MPIParallelization/domain/partitioning/rcb1DPartitioning.cpp b/src/phasicFlow/MPIParallelization/domain/partitioning/rcb1DPartitioning.cpp new file mode 100644 index 00000000..ba147512 --- /dev/null +++ b/src/phasicFlow/MPIParallelization/domain/partitioning/rcb1DPartitioning.cpp @@ -0,0 +1,330 @@ +/*------------------------------- phasicFlow --------------------------------- + O C enter of + O O E ngineering and + O O M ultiscale modeling of + OOOOOOO F luid flow +------------------------------------------------------------------------------ + Copyright (C): www.cemf.ir + email: hamid.r.norouzi AT gmail.com +------------------------------------------------------------------------------ +Licence: + This file is part of phasicFlow code. It is a free software for simulating + granular and multiphase flows. You can redistribute it and/or modify it under + the terms of GNU General Public License v3 or any other later versions. + + phasicFlow is distributed to help others in their research in the field of + granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the + implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +-----------------------------------------------------------------------------*/ + +#include "zoltan_cpp.h" + + +#include "error.hpp" +#include "processors.hpp" +#include "rcb1DPartitioning.hpp" + +bool pFlow::rcb1DPartitioning::partition(pointCollection &points) +{ + + zoltan_->Set_Param("RCB_OUTPUT_LEVEL", "0"); + zoltan_->Set_Param("RCB_RECTILINEAR_BLOCKS", "1"); + zoltan_->Set_Param("KEEP_CUTS", "1"); + zoltan_->Set_Param("REDUCE_DIMENSIONS", "1"); + zoltan_->Set_Param("RCB_RECOMPUTE_BOX", "1"); + zoltan_->Set_Param("AVERAGE_CUTS", "0"); + zoltan_->Set_Param("MIGRATE_ONLY_PROC_CHANGES", "0"); + + zoltan_->Set_Num_Obj_Fn(rcb1DPartitioning::getNumberOfPoints, &points); + zoltan_->Set_Obj_List_Fn(rcb1DPartitioning::getPointList, &points); + zoltan_->Set_Num_Geom_Fn(rcb1DPartitioning::getNumGeometry, &points); + switch (direction_) + { + case Direction::X: + zoltan_->Set_Geom_Multi_Fn(rcb1DPartitioning::getGeometryList_x, &points); + break; + case Direction::Y: + zoltan_->Set_Geom_Multi_Fn(rcb1DPartitioning::getGeometryList_y, &points); + break; + case Direction::Z: + zoltan_->Set_Geom_Multi_Fn(rcb1DPartitioning::getGeometryList_z, &points); + break; + } + + int numGidEntries_, numLidEntries_; + int rc = zoltan_->LB_Partition(changes_, numGidEntries_, numLidEntries_, + numImport_, importGlobalGids_, importLocalGids_, importProcs_, importToPart_, + numExport_, exportGlobalGids_, exportLocalGids_, exportProcs_, exportToPart_); + + + if (rc != ZOLTAN_OK) + { + fatalErrorInFunction<< "Zoltan faild to perform partitioning."< thisProc(points.numActivePoints(),-1); + + for(auto i =0; iRCB_Box + ( + processors::globalRank(), + nDim, + x0, y0, z0, + x1, y1, z1 + ); + + localBox_ = globalBox_; + + if(equal(x0, x1)) + { + x0 = x0 - 0.00001; + x1 = x1 + 0.00001; + } + + switch (direction_) + { + case Direction::X : + localBox_.minPoint().x_ = x0; + localBox_.maxPoint().x_ = x1; + break; + + case Direction::Y : + localBox_.minPoint().y_ = x0; + localBox_.maxPoint().y_ = x1; + break; + + case Direction::Z : + localBox_.minPoint().z_ = x0; + localBox_.maxPoint().z_ = x1; + break; + } + + + localBox_.minPoint() = max(localBox_.minPoint(), globalBox_.minPoint()); + localBox_.maxPoint() = min(localBox_.maxPoint(), globalBox_.maxPoint()); + + + return true; +} + +pFlow::rcb1DPartitioning::rcb1DPartitioning +( + const dictionary &dict, + const box &globalBox +) +: + partitioning(dict, globalBox), + exportIds_(pFlowProcessors()) +{ + + word directionName = dict.getVal("direction"); + + if(toUpper(directionName)== "X") + { + direction_ = Direction::X; + dirVector_ ={1.0, 0.0, 0.0}; + } + else if( toUpper(directionName) == "Y") + { + direction_ = Direction::Y; + dirVector_ ={0.0, 1.0, 0.0}; + } + else if( toUpper(directionName) == "Z") + { + direction_ = Direction::Z; + dirVector_ ={0.0, 0.0, 1.0}; + } + else + { + fatalErrorInFunction<< "wrong direction in dictionary "<< + dict.globalName()<<". Directions should be one of x, y, or z."<(data); + + *ierr = ZOLTAN_OK; + + return obj->numActivePoints(); +} + +void pFlow::rcb1DPartitioning::getPointList +( + void *data, + int sizeGID, + int sizeLID, + ZOLTAN_ID_PTR globalID, + ZOLTAN_ID_PTR localID, + int wgt_dim, + float *obj_wgts, + int *ierr +) +{ + auto* obj = static_cast(data); + *ierr = ZOLTAN_OK; + + auto activeRange = obj->pFlag_.activeRange(); + uint32 n = 0; + for (auto i=activeRange.start(); ipFlag_.isActive(i) ) + { + globalID[n] = i; + localID[n] = n; + n++; + } + } + +} + +void pFlow::rcb1DPartitioning::getGeometryList_x +( + void *data, + int sizeGID, + int sizeLID, + int num_obj, + ZOLTAN_ID_PTR globalID, + ZOLTAN_ID_PTR localID, + int num_dim, + double *geom_vec, + int *ierr +) +{ + + auto* obj = static_cast(data); + + if ( (sizeGID != 1) || (sizeLID != 1) || (num_dim != 1)) + { + *ierr = ZOLTAN_FATAL; + return; + } + + auto activeRange = obj->pFlag_.activeRange(); + uint32 n = 0; + for (auto i=activeRange.start(); ipFlag_.isActive(i) ) + { + geom_vec[n] = obj->points_[i].x_; + n++; + } + } + + *ierr = ZOLTAN_OK; + + return; +} + +void pFlow::rcb1DPartitioning::getGeometryList_y +( + void *data, + int sizeGID, + int sizeLID, + int num_obj, + ZOLTAN_ID_PTR globalID, + ZOLTAN_ID_PTR localID, + int num_dim, + double *geom_vec, + int *ierr +) +{ + + auto* obj = static_cast(data); + + if ( (sizeGID != 1) || (sizeLID != 1) || (num_dim != 1)) + { + *ierr = ZOLTAN_FATAL; + return; + } + + auto activeRange = obj->pFlag_.activeRange(); + uint32 n = 0; + for (auto i=activeRange.start(); ipFlag_.isActive(i) ) + { + geom_vec[n] = obj->points_[i].y_; + n++; + } + } + + *ierr = ZOLTAN_OK; + + return; +} + +void pFlow::rcb1DPartitioning::getGeometryList_z +( + void *data, + int sizeGID, + int sizeLID, + int num_obj, + ZOLTAN_ID_PTR globalID, + ZOLTAN_ID_PTR localID, + int num_dim, + double *geom_vec, + int *ierr +) +{ + + auto* obj = static_cast(data); + + if ( (sizeGID != 1) || (sizeLID != 1) || (num_dim != 1)) + { + *ierr = ZOLTAN_FATAL; + return; + } + + auto activeRange = obj->pFlag_.activeRange(); + uint32 n = 0; + for (auto i=activeRange.start(); ipFlag_.isActive(i) ) + { + geom_vec[n] = obj->points_[i].z_; + n++; + } + } + + *ierr = ZOLTAN_OK; + + return; +} + diff --git a/src/phasicFlow/MPIParallelization/domain/partitioning/rcb1DPartitioning.hpp b/src/phasicFlow/MPIParallelization/domain/partitioning/rcb1DPartitioning.hpp new file mode 100644 index 00000000..b58532e3 --- /dev/null +++ b/src/phasicFlow/MPIParallelization/domain/partitioning/rcb1DPartitioning.hpp @@ -0,0 +1,240 @@ +/*------------------------------- phasicFlow --------------------------------- + O C enter of + O O E ngineering and + O O M ultiscale modeling of + OOOOOOO F luid flow +------------------------------------------------------------------------------ + Copyright (C): www.cemf.ir + email: hamid.r.norouzi AT gmail.com +------------------------------------------------------------------------------ +Licence: + This file is part of phasicFlow code. It is a free software for simulating + granular and multiphase flows. You can redistribute it and/or modify it under + the terms of GNU General Public License v3 or any other later versions. + + phasicFlow is distributed to help others in their research in the field of + granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the + implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +-----------------------------------------------------------------------------*/ +#ifndef __rcb1DPartitioning_hpp__ +#define __rcb1DPartitioning_hpp__ + +#include "partitioning.hpp" +#include "procVector.hpp" + +namespace pFlow +{ + + +class rcb1DPartitioning +: +public partitioning +{ +public: + + enum Direction + { + X = 0, + Y = 1, + Z = 2 + }; + +protected: + + /// Direction of partitioning + Direction direction_ = Direction::X; + + realx3 dirVector_ = {1.0, 0.0, 0.0}; + + word directionName_ = "x"; + + MPI::procVector> exportIds_; + + bool partition(pointCollection& points) override; + +public: + + + rcb1DPartitioning( + const dictionary& dict, + const box& globalBox); + + + ~rcb1DPartitioning() override=default; + + span exportList(int procNo)const override + { + return span( + const_cast(exportIds_[procNo].data()), + exportIds_[procNo].size()); + } + + + pFlow::MPI::procVector> allExportLists()const override + { + pFlow::MPI::procVector> allList(pFlowProcessors()); + + for(int i=0; i(data); + + if ( (sizeGID != 1) || (sizeLID != 1) || (num_dim != 1)) + { + *ierr = ZOLTAN_FATAL; + return; + } + + *ierr = ZOLTAN_OK; + + for (int i=0; i < num_obj ; i++) + { + geom_vec[i] = obj->pointList()[i].y_; + } + + return; + } + + + static + int getNumGeometry(void *data, int *ierr) + { + *ierr = ZOLTAN_OK; + return 1; + } + +}; + + +class RCB_x_partitioning +: +public partitioning +{ +public: + + + RCB_x_partitioning(int argc, char *argv[], pointCollection& collection, const box& gBox) + : + partitioning(argc, argv, collection, gBox) + {} + + virtual + ~RCB_x_partitioning()=default; + + + bool partition() override; + + + static + void getGeometryList( + void *data, + int sizeGID, + int sizeLID, + int num_obj, + ZOLTAN_ID_PTR globalID, + ZOLTAN_ID_PTR localID, + int num_dim, + double *geom_vec, + int *ierr); + + static + int getNumGeometry(void *data, int *ierr); + + +};*/ + +} // pFlow +#endif //__rcb1DPartitioning_hpp__ \ No newline at end of file diff --git a/src/phasicFlow/MPIParallelization/pointField/processorBoundaryField.cpp b/src/phasicFlow/MPIParallelization/pointField/processorBoundaryField.cpp new file mode 100644 index 00000000..2595ebaa --- /dev/null +++ b/src/phasicFlow/MPIParallelization/pointField/processorBoundaryField.cpp @@ -0,0 +1,110 @@ +/*------------------------------- phasicFlow --------------------------------- + O C enter of + O O E ngineering and + O O M ultiscale modeling of + OOOOOOO F luid flow +------------------------------------------------------------------------------ + Copyright (C): www.cemf.ir + email: hamid.r.norouzi AT gmail.com +------------------------------------------------------------------------------ +Licence: + This file is part of phasicFlow code. It is a free software for simulating + granular and multiphase flows. You can redistribute it and/or modify it under + the terms of GNU General Public License v3 or any other later versions. + + phasicFlow is distributed to help others in their research in the field of + granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the + implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +-----------------------------------------------------------------------------*/ + +template +void +pFlow::MPI::processorBoundaryField::checkDataRecieved() const +{ + if (!dataRecieved_) + { + //uint32 nRecv = reciever_.waitComplete(); + dataRecieved_ = true; + /*if (nRecv != this->neighborProcSize()) + { + fatalErrorInFunction; + fatalExit; + }*/ + } +} + +template +bool +pFlow::MPI::processorBoundaryField::updateBoundary( + int step, + DataDirection direction +) +{ + /*if (step == 1) + { + // Isend + if (direction == DataDirection::TwoWay || + ( this->isBoundaryMaster() && direction == DataDirection::MasterToSlave) || + (!this->isBoundaryMaster() && direction == DataDirection::SlaveToMaster)) + { + sender_.sendData(pFlowProcessors(), this->thisField()); + dataRecieved_ = false; + } + } + else if (step == 2) + { + // Irecv + if (direction == DataDirection::TwoWay || + (!this->isBoundaryMaster() && direction == DataDirection::MasterToSlave) || + ( this->isBoundaryMaster() && direction == DataDirection::SlaveToMaster)) + { + reciever_.recieveData(pFlowProcessors(), this->neighborProcSize()); + dataRecieved_ = false; + } + } + else + { + fatalErrorInFunction << "Invalid step number " << step << endl; + return false; + }*/ + + return true; +} + +template +pFlow::MPI::processorBoundaryField::processorBoundaryField( + const boundaryBase& boundary, + const pointStructure& pStruct, + InternalFieldType& internal +) + : BoundaryFieldType(boundary, pStruct, internal), + sender_( + groupNames("sendBufferField", boundary.name()), + boundary.neighborProcessorNo(), + boundary.thisBoundaryIndex() + ), + reciever_( + groupNames("neighborProcField", boundary.name()), + boundary.neighborProcessorNo(), + boundary.mirrorBoundaryIndex() + ) +{ +} + +template +typename pFlow::MPI::processorBoundaryField::ProcVectorType& +pFlow::MPI::processorBoundaryField::neighborProcField() +{ + checkDataRecieved(); + return reciever_.buffer(); +} + +template +const typename pFlow::MPI::processorBoundaryField:: + ProcVectorType& + pFlow::MPI::processorBoundaryField::neighborProcField() const +{ + checkDataRecieved(); + return reciever_.buffer(); +} \ No newline at end of file diff --git a/src/phasicFlow/MPIParallelization/pointField/processorBoundaryField.hpp b/src/phasicFlow/MPIParallelization/pointField/processorBoundaryField.hpp new file mode 100644 index 00000000..5fb0780a --- /dev/null +++ b/src/phasicFlow/MPIParallelization/pointField/processorBoundaryField.hpp @@ -0,0 +1,113 @@ +/*------------------------------- phasicFlow --------------------------------- + O C enter of + O O E ngineering and + O O M ultiscale modeling of + OOOOOOO F luid flow +------------------------------------------------------------------------------ + Copyright (C): www.cemf.ir + email: hamid.r.norouzi AT gmail.com +------------------------------------------------------------------------------ +Licence: + This file is part of phasicFlow code. It is a free software for simulating + granular and multiphase flows. You can redistribute it and/or modify it under + the terms of GNU General Public License v3 or any other later versions. + + phasicFlow is distributed to help others in their research in the field of + granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the + implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +-----------------------------------------------------------------------------*/ +#ifndef __processorBoundaryField_hpp__ +#define __processorBoundaryField_hpp__ + +#include "boundaryField.hpp" +#include "dataSender.hpp" +#include "dataReciever.hpp" + +namespace pFlow::MPI +{ + +template< class T, class MemorySpace = void> +class processorBoundaryField +: + public boundaryField +{ +public: + + using processorBoundaryFieldType = processorBoundaryField; + + using BoundaryFieldType = boundaryField; + + using InternalFieldType = typename BoundaryFieldType::InternalFieldType; + + using memory_space = typename BoundaryFieldType::memory_space; + + using execution_space = typename BoundaryFieldType::execution_space; + + using FieldAccessType = typename BoundaryFieldType::FieldAccessType; + + using ProcVectorType = typename BoundaryFieldType::ProcVectorType; + +private: + + dataSender sender_; + + mutable dataReciever reciever_; + + mutable bool dataRecieved_ = true; + + void checkDataRecieved()const; + + bool updateBoundary(int step, DataDirection direction); + + +public: + + TypeInfoTemplate211("boundaryField","processor", T, memory_space::name()); + + processorBoundaryField( + const boundaryBase& boundary, + const pointStructure& pStruct, + InternalFieldType& internal); + + + ~processorBoundaryField()override = default; + + add_vCtor + ( + BoundaryFieldType, + processorBoundaryFieldType, + boundaryBase + ); + + ProcVectorType& neighborProcField() override; + + + const ProcVectorType& neighborProcField()const override; + + bool hearChanges + ( + real t, + real dt, + uint32 iter, + const message& msg, + const anyList& varList + ) override + { + BoundaryFieldType::hearChanges(t,dt,iter, msg,varList); + + if(msg.equivalentTo(message::BNDR_DELETE)) + { + // do nothing; + } + + return true; + } + +}; + +} + +#include "processorBoundaryField.cpp" + +#endif //__processorBoundaryField_hpp__ diff --git a/src/phasicFlow/MPIParallelization/pointField/processorBoundaryFields.cpp b/src/phasicFlow/MPIParallelization/pointField/processorBoundaryFields.cpp new file mode 100644 index 00000000..f07f20d9 --- /dev/null +++ b/src/phasicFlow/MPIParallelization/pointField/processorBoundaryFields.cpp @@ -0,0 +1,24 @@ + +//#include "Field.hpp" + +#include "processorBoundaryField.hpp" + +template class pFlow::MPI::processorBoundaryField; +template class pFlow::MPI::processorBoundaryField; + +template class pFlow::MPI::processorBoundaryField; +template class pFlow::MPI::processorBoundaryField; + +template class pFlow::MPI::processorBoundaryField; +template class pFlow::MPI::processorBoundaryField; + +template class pFlow::MPI::processorBoundaryField; +template class pFlow::MPI::processorBoundaryField; + +template class pFlow::MPI::processorBoundaryField; +template class pFlow::MPI::processorBoundaryField; + +template class pFlow::MPI::processorBoundaryField; +template class pFlow::MPI::processorBoundaryField; + + diff --git a/src/phasicFlow/MPIParallelization/pointStructure/boundaries/boundaryProcessor.cpp b/src/phasicFlow/MPIParallelization/pointStructure/boundaries/boundaryProcessor.cpp new file mode 100644 index 00000000..50098e0a --- /dev/null +++ b/src/phasicFlow/MPIParallelization/pointStructure/boundaries/boundaryProcessor.cpp @@ -0,0 +1,148 @@ +/*------------------------------- phasicFlow --------------------------------- + O C enter of + O O E ngineering and + O O M ultiscale modeling of + OOOOOOO F luid flow +------------------------------------------------------------------------------ + Copyright (C): www.cemf.ir + email: hamid.r.norouzi AT gmail.com +------------------------------------------------------------------------------ +Licence: + This file is part of phasicFlow code. It is a free software for simulating + granular and multiphase flows. You can redistribute it and/or modify it under + the terms of GNU General Public License v3 or any other later versions. + + phasicFlow is distributed to help others in their research in the field of + granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the + implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +-----------------------------------------------------------------------------*/ + +#include "boundaryProcessor.hpp" +#include "dictionary.hpp" +#include "mpiCommunication.hpp" + +void +pFlow::MPI::boundaryProcessor::checkSize() const +{ + if (!sizeObtained_) + { + //MPI_Wait(&sizeRequest_, StatusIgnore); + sizeObtained_ = true; + } +} + +void +pFlow::MPI::boundaryProcessor::checkDataRecieved() const +{ + if (!dataRecieved_) + { + //uint32 nRecv = reciever_.waitComplete(); + dataRecieved_ = true; + /*if (nRecv != neighborProcSize()) + { + fatalErrorInFunction; + fatalExit; + }*/ + } +} + +pFlow::MPI::boundaryProcessor::boundaryProcessor( + const dictionary& dict, + const plane& bplane, + internalPoints& internal, + boundaryList& bndrs, + uint32 thisIndex +) + : boundaryBase(dict, bplane, internal, bndrs, thisIndex), + sender_( + groupNames("sendBuffer", name()), + neighborProcessorNo(), + thisBoundaryIndex() + ), + reciever_( + groupNames("neighborProcPoints", name()), + neighborProcessorNo(), + mirrorBoundaryIndex() + ) +{ +} + +bool +pFlow::MPI::boundaryProcessor::beforeIteration(uint32 iterNum, real t, real dt) +{ + thisNumPoints_ = size(); + + auto req = MPI_REQUEST_NULL; + MPI_Isend( + &thisNumPoints_, + 1, + MPI_UNSIGNED, + neighborProcessorNo(), + thisBoundaryIndex(), + pFlowProcessors().localCommunicator(), + &req); + + MPI_Recv( + &neighborProcNumPoints_, + 1, + MPI_UNSIGNED, + neighborProcessorNo(), + mirrorBoundaryIndex(), + pFlowProcessors().localCommunicator(), + MPI_STATUS_IGNORE + ); + + sizeObtained_ = false; + + return true; +} + +pFlow::uint32 +pFlow::MPI::boundaryProcessor::neighborProcSize() const +{ + checkSize(); + return neighborProcNumPoints_; +} + +pFlow::realx3Vector_D& +pFlow::MPI::boundaryProcessor::neighborProcPoints() +{ + checkDataRecieved(); + return reciever_.buffer(); +} + +const pFlow::realx3Vector_D& +pFlow::MPI::boundaryProcessor::neighborProcPoints() const +{ + checkDataRecieved(); + return reciever_.buffer(); +} + +bool +pFlow::MPI::boundaryProcessor::updataBoundary(int step) +{ + if (step == 1) + { + sender_.sendData(pFlowProcessors(), thisPoints()); + dataRecieved_ = false; + } + else if (step == 2) + { + reciever_.recieveData(pFlowProcessors(), neighborProcSize()); + dataRecieved_ = false; + } + return true; +} + +bool +pFlow::MPI::boundaryProcessor::iterate(uint32 iterNum, real t, real dt) +{ + return true; +} + +bool +pFlow::MPI::boundaryProcessor::afterIteration(uint32 iterNum, real t, real dt) +{ + return true; +} \ No newline at end of file diff --git a/src/phasicFlow/MPIParallelization/pointStructure/boundaries/boundaryProcessor.hpp b/src/phasicFlow/MPIParallelization/pointStructure/boundaries/boundaryProcessor.hpp new file mode 100644 index 00000000..cb278461 --- /dev/null +++ b/src/phasicFlow/MPIParallelization/pointStructure/boundaries/boundaryProcessor.hpp @@ -0,0 +1,116 @@ +/*------------------------------- phasicFlow --------------------------------- + O C enter of + O O E ngineering and + O O M ultiscale modeling of + OOOOOOO F luid flow +------------------------------------------------------------------------------ + Copyright (C): www.cemf.ir + email: hamid.r.norouzi AT gmail.com +------------------------------------------------------------------------------ +Licence: + This file is part of phasicFlow code. It is a free software for simulating + granular and multiphase flows. You can redistribute it and/or modify it under + the terms of GNU General Public License v3 or any other later versions. + + phasicFlow is distributed to help others in their research in the field of + granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the + implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +-----------------------------------------------------------------------------*/ + +#ifndef __boundaryProcessor_hpp__ +#define __boundaryProcessor_hpp__ + + +#include "boundaryBase.hpp" +#include "mpiTypes.hpp" +#include "dataSender.hpp" +#include "dataReciever.hpp" + +namespace pFlow::MPI +{ + +class boundaryProcessor +: + public boundaryBase +{ +private: + + uint32 neighborProcNumPoints_ = 0; + + uint32 thisNumPoints_; + + realx3Vector_D neighborProcPoints_; + + mutable Request sizeRequest_; + + mutable Request sSizeRequest_; + + int req_=0; + + mutable bool sizeObtained_ = true; + + mutable dataSender sender_; + + mutable dataReciever reciever_; + + mutable bool dataRecieved_ = true; + + void checkSize()const; + + void checkDataRecieved()const; + + /// @brief Update processor boundary data for this processor + /// @param step It is either 1 or 2 in the input to indicate + /// the update step + /// @return true if successful + /// @details This method is called by boundaryList two times to + /// allow processor boundaries to exchange data in two steps. + /// The first step is a buffered non-blocking send and the second + /// step is non-blocking recieve to get data. + bool updataBoundary(int step)override; + +public: + + TypeInfo("boundary"); + + boundaryProcessor( + const dictionary& dict, + const plane& bplane, + internalPoints& internal, + boundaryList& bndrs, + uint32 thisIndex + ); + + ~boundaryProcessor() override = default; + + add_vCtor + ( + boundaryBase, + boundaryProcessor, + dictionary + ); + + bool beforeIteration(uint32 iterNum, real t, real dt) override; + + bool iterate(uint32 iterNum, real t, real dt) override; + + bool afterIteration(uint32 iterNum, real t, real dt) override; + + /// @brief Return number of points in the neighbor processor boundary. + /// This is overriden from boundaryBase. + uint32 neighborProcSize() const override; + + /// @brief Return a reference to point positions in the neighbor + /// processor boundary. + realx3Vector_D& neighborProcPoints() override; + + /// @brief Return a const reference to point positions in the + /// neighbor processor boundary. + const realx3Vector_D& neighborProcPoints() const override; + +}; + +} // namespace pFlow::MPI + +#endif //__boundaryProcessor_hpp__ \ No newline at end of file diff --git a/src/phasicFlow/MPIParallelization/pointStructure/boundaries/dataReciever.hpp b/src/phasicFlow/MPIParallelization/pointStructure/boundaries/dataReciever.hpp new file mode 100644 index 00000000..13069b2a --- /dev/null +++ b/src/phasicFlow/MPIParallelization/pointStructure/boundaries/dataReciever.hpp @@ -0,0 +1,108 @@ + +#ifndef __dataReciever_hpp__ +#define __dataReciever_hpp__ + + +#include "span.hpp" +#include "localProcessors.hpp" +#include "mpiCommunication.hpp" + +namespace pFlow::MPI +{ + +template +class dataReciever +{ +public: + + using BufferVectorType = VectorSingle; + + using BufferVectorTypeHost = VectorSingle; + + using memory_space = typename BufferVectorType::memory_space; + + using execution_space = typename BufferVectorType::execution_space; + +private: + + BufferVectorType buffer_; + + std::vector buffer0_; + + int fromProc_; + + int tag_; + + Request recvRequest_; + +public: + + dataReciever(const word& name, int from, int tag) + : + buffer_(name), + fromProc_(from), + tag_(tag) + {} + + ~dataReciever()=default; + + void recieveData( + const localProcessors& processors, + uint32 numToRecv + ) + { + + buffer0_.clear(); + buffer0_.resize(numToRecv); + MPI_Status status; + + /*CheckMPI(recv( + buffer_.getSpan(), + fromProc_, + tag_, + processors.localCommunicator(), + &status), true);*/ + MPI_Recv( + buffer0_.data(), + buffer0_.size(), + realx3Type__, + fromProc_, + tag_, + processors.localCommunicator(), + &status + ); + int c; + getCount(&status, c); + pOutput<<"Number of data recieved "<(&status, count), true); + + return static_cast(count);*/ + return buffer_.size(); + } + +}; + +} + + +#endif //__dataReciever_hpp__ diff --git a/src/phasicFlow/MPIParallelization/pointStructure/boundaries/dataSender.hpp b/src/phasicFlow/MPIParallelization/pointStructure/boundaries/dataSender.hpp new file mode 100644 index 00000000..11c1782f --- /dev/null +++ b/src/phasicFlow/MPIParallelization/pointStructure/boundaries/dataSender.hpp @@ -0,0 +1,120 @@ +#ifndef __dataSender_hpp__ +#define __dataSender_hpp__ + +#include "VectorSingles.hpp" +#include "localProcessors.hpp" +#include "mpiCommunication.hpp" + +namespace pFlow::MPI +{ + +template +class dataSender +{ +public: + + using BufferVectorType = VectorSingle; + + using BufferVectorTypeHost = VectorSingle; + + using memory_space = typename BufferVectorType::memory_space; + + using execution_space = typename BufferVectorType::execution_space; + + // This is device vector + + +private: + + //BufferVectorType buffer_; + + std::vector buffer_; + + int toProc_; + + int tag_; + + Request sendRequest_ = RequestNull; + +public: + + dataSender(const word& name, int toProc, int tag) + : + toProc_(toProc), + tag_(tag) + {} + + ~dataSender()=default; + + void sendData( + const localProcessors& processors, + const scatteredFieldAccess& scatterField + ) + { + using RPolicy = Kokkos::RangePolicy< + DefaultExecutionSpace, + Kokkos::Schedule, + Kokkos::IndexType>; + + uint32 n = scatterField.size(); + + // clear the buffer to prevent data copy if capacity increases + buffer_.clear(); + buffer_.resize(n); + + auto* buffView = buffer_.data(); + + Kokkos::parallel_for( + "dataSender::sendData", + RPolicy(0,n), + LAMBDA_HD(uint32 i) + { + buffView[i] = scatterField[i]; + } + ); + Kokkos::fence(); + auto req = MPI_REQUEST_NULL; + + MPI_Isend( + buffer_.data(), + buffer_.size(), + realx3Type__, + toProc_, + tag_, + processors.localCommunicator(), + &req); + + /*CheckMPI(send( + buffer_.getSpan(), + toProc_, + tag_, + processors.localCommunicator(), + MPI_STATUS_IGNORE), true);*/ + } + + /*auto& buffer() + { + return buffer_; + } + + const auto& buffer()const + { + return buffer_; + }*/ + + bool sendComplete() + { + return true; + /*int test; + MPI_Test(&sendRequest_, &test, StatusIgnore); + if(test) + return true; + else + return false;*/ + } + +}; + +} + +#endif //__dataSender_hpp__ \ No newline at end of file diff --git a/src/phasicFlow/containers/pointField/boundaryField/boundaryField/boundaryField.cpp b/src/phasicFlow/containers/pointField/boundaryField/boundaryField/boundaryField.cpp index 2d2cc3bf..b2674920 100644 --- a/src/phasicFlow/containers/pointField/boundaryField/boundaryField/boundaryField.cpp +++ b/src/phasicFlow/containers/pointField/boundaryField/boundaryField/boundaryField.cpp @@ -36,6 +36,25 @@ pFlow::boundaryField::boundaryField internal_(internal) {} +template +typename pFlow::boundaryField::ProcVectorType& +pFlow::boundaryField::neighborProcField() +{ + static ProcVectorType dummyVector{"dummyVector"}; + notImplementedFunction; + fatalExit; + return dummyVector; +} + +template +const typename pFlow::boundaryField::ProcVectorType& +pFlow::boundaryField::neighborProcField() const +{ + static ProcVectorType dummyVector{"dummyVector"}; + notImplementedFunction; + fatalExit; + return dummyVector; +} template pFlow::uniquePtr> diff --git a/src/phasicFlow/containers/pointField/boundaryField/boundaryField/boundaryField.hpp b/src/phasicFlow/containers/pointField/boundaryField/boundaryField/boundaryField.hpp index 7e1935a5..d30b3f18 100644 --- a/src/phasicFlow/containers/pointField/boundaryField/boundaryField/boundaryField.hpp +++ b/src/phasicFlow/containers/pointField/boundaryField/boundaryField/boundaryField.hpp @@ -26,6 +26,18 @@ Licence: namespace pFlow { + +// forward +template< class T, class MemorySpace> + class boundaryFieldList; + +enum class DataDirection +{ + MasterToSlave, + SlaveToMaster, + TwoWay +}; + template< class T, class MemorySpace = void> class boundaryField : @@ -43,12 +55,22 @@ public: using FieldAccessType = scatteredFieldAccess; -protected: + using ProcVectorType = VectorSingle; +private: + + /// friend et al. + friend class boundaryFieldList; + /// @brief a ref to the internal field InternalFieldType& internal_; - + virtual + bool updateBoundary(int step, DataDirection direction) + { + return true; + } + public: TypeInfoTemplate211("boundaryField","none" ,T, memory_space::name()); @@ -78,7 +100,6 @@ public: boundaryBase ); - bool hearChanges ( real t, @@ -116,6 +137,12 @@ public: this->mirrorBoundary().indexList().deviceViewAll(), internal_.deviceViewAll()); } + + virtual + ProcVectorType& neighborProcField(); + + virtual + const ProcVectorType& neighborProcField()const; void fill(const std::any& val)override { diff --git a/src/phasicFlow/containers/pointField/boundaryField/boundaryFieldList.hpp b/src/phasicFlow/containers/pointField/boundaryField/boundaryFieldList.hpp index e4179bc2..31a13661 100644 --- a/src/phasicFlow/containers/pointField/boundaryField/boundaryFieldList.hpp +++ b/src/phasicFlow/containers/pointField/boundaryField/boundaryFieldList.hpp @@ -43,6 +43,8 @@ protected: const boundaryList& boundaries_; + uint32 slaveToMasterUpdateIter_ = -1; + public: boundaryFieldList(const boundaryList& boundaries, InternalFieldType& internal) @@ -60,6 +62,29 @@ public: } } + void updateBoundaries(uint32 iter, DataDirection direction) + { + if( direction == DataDirection::SlaveToMaster + && slaveToMasterUpdateIter_ == iter) return; + + // first step + for(auto b:*this) + { + b->updateBoundary(1, direction); + } + + // second step + for(auto b:*this) + { + b->updateBoundary(2, direction); + } + + if(direction == DataDirection::SlaveToMaster) + { + slaveToMasterUpdateIter_ = iter; + } + } + void fill(const T& val) { for(auto& bf: *this) @@ -68,6 +93,12 @@ public: } } + bool slaveToMasterUpdateRequested()const + { + return slaveToMasterUpdateIter_ != -1; + } + + }; } diff --git a/src/phasicFlow/containers/pointField/boundaryField/generalBoundary/generalBoundary.cpp b/src/phasicFlow/containers/pointField/boundaryField/generalBoundary/generalBoundary.cpp index 3e2f0141..c8e3c2c1 100644 --- a/src/phasicFlow/containers/pointField/boundaryField/generalBoundary/generalBoundary.cpp +++ b/src/phasicFlow/containers/pointField/boundaryField/generalBoundary/generalBoundary.cpp @@ -31,7 +31,8 @@ pFlow::generalBoundary::generalBoundary : observer(&boundary, defaultMessage_), boundary_(boundary), - pStruct_(pStruct) + pStruct_(pStruct), + isBoundaryMaster_(boundary.isBoundaryMaster()) {} diff --git a/src/phasicFlow/containers/pointField/boundaryField/generalBoundary/generalBoundary.hpp b/src/phasicFlow/containers/pointField/boundaryField/generalBoundary/generalBoundary.hpp index 18105542..c55a7f71 100644 --- a/src/phasicFlow/containers/pointField/boundaryField/generalBoundary/generalBoundary.hpp +++ b/src/phasicFlow/containers/pointField/boundaryField/generalBoundary/generalBoundary.hpp @@ -45,16 +45,10 @@ private: const pointStructure& pStruct_; + const bool isBoundaryMaster_; static inline const message defaultMessage_{message::BNDR_RESET}; - - template - inline - bool checkForType()const - { - return typeName() == BoundaryFieldType::TYPENAME(); - } public: @@ -68,7 +62,6 @@ public: ~generalBoundary()override = default; - inline uint32 thisBoundaryIndex()const { @@ -93,6 +86,24 @@ public: return boundary_.capacity(); } + inline + auto neighborProcSize()const + { + return boundary_.neighborProcSize(); + } + + inline + int neighborProcessorNo()const + { + return boundary_.neighborProcessorNo(); + } + + inline + int thisProcessorNo()const + { + return boundary_.thisProcessorNo(); + } + inline const auto& boundary()const { @@ -129,22 +140,17 @@ public: return pStruct_; } + inline + bool isBoundaryMaster()const + { + return isBoundaryMaster_; + } + const Time& time()const; virtual void fill(const std::any& val)=0; - /*template - BoundaryFieldType& thisField() - { - return static_cast(*this); - } - - template - const BoundaryFieldType& thisField()const - { - return static_cast(*this); - }*/ }; diff --git a/src/phasicFlow/containers/pointField/internalField/internalField.hpp b/src/phasicFlow/containers/pointField/internalField/internalField.hpp index 779b0c61..423f1ef7 100644 --- a/src/phasicFlow/containers/pointField/internalField/internalField.hpp +++ b/src/phasicFlow/containers/pointField/internalField/internalField.hpp @@ -181,6 +181,11 @@ public: return field_.insertSetElement(indices, val); } + inline const Time& time()const + { + return internalPoints_.time(); + } + bool hearChanges ( real t, diff --git a/src/phasicFlow/containers/pointField/pointField/pointField.hpp b/src/phasicFlow/containers/pointField/pointField/pointField.hpp index d42870d5..345dfdcc 100644 --- a/src/phasicFlow/containers/pointField/pointField/pointField.hpp +++ b/src/phasicFlow/containers/pointField/pointField/pointField.hpp @@ -24,7 +24,7 @@ Licence: #include "pointStructure.hpp" #include "internalField.hpp" #include "boundaryFieldList.hpp" - +#include "Time.hpp" namespace pFlow { @@ -103,6 +103,23 @@ public: return static_cast(*this); } + void updateBoundaries(DataDirection direction)const + { + uint32 iter = this->time().currentIter(); + auto& bList = const_cast(boundaryFieldList_); + bList.updateBoundaries(iter, direction); + } + + /// @brief update boundaries if it is requested previousely (slave to master). + /// It only checks for SlaveToMaster direction. + void updateBoundariesSlaveToMasterIfRequested() + { + if(boundaryFieldList_.slaveToMasterUpdateRequested()) + { + updateBoundaries(DataDirection::SlaveToMaster); + } + } + const auto& boundaryFields()const { return boundaryFieldList_; diff --git a/src/phasicFlow/processors/localProcessors.cpp b/src/phasicFlow/processors/localProcessors.cpp index 0dc7e658..7b1067c4 100644 --- a/src/phasicFlow/processors/localProcessors.cpp +++ b/src/phasicFlow/processors/localProcessors.cpp @@ -1,3 +1,22 @@ +/*------------------------------- phasicFlow --------------------------------- + O C enter of + O O E ngineering and + O O M ultiscale modeling of + OOOOOOO F luid flow +------------------------------------------------------------------------------ + Copyright (C): www.cemf.ir + email: hamid.r.norouzi AT gmail.com +------------------------------------------------------------------------------ +Licence: + This file is part of phasicFlow code. It is a free software for simulating + granular and multiphase flows. You can redistribute it and/or modify it under + the terms of GNU General Public License v3 or any other later versions. + + phasicFlow is distributed to help others in their research in the field of + granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the + implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +-----------------------------------------------------------------------------*/ #include "localProcessors.hpp" pFlow::localProcessors::localProcessors(const word &name) @@ -8,8 +27,8 @@ pFlow::localProcessors::localProcessors(const word &name) name_(name) { #ifdef pFlow_Build_MPI - parrentCommunicator_ = pFlow::MPI::CommWorld; - localCommunicator_ = pFlow::MPI::CommWorld; + parrentCommunicator_ = MPI_COMM_WORLD; + localCommunicator_ = MPI_COMM_WORLD; #endif } @@ -24,7 +43,7 @@ pFlow::localProcessors::localProcessors name_(name) { #ifdef pFlow_Build_MPI - parrentCommunicator_ = pFlow::MPI::CommWorld; + parrentCommunicator_ = MPI_COMM_WORLD; if(ranks.size()> this->globalSize() ) { @@ -33,17 +52,17 @@ pFlow::localProcessors::localProcessors fatalExit; } - pFlow::MPI::Group globalGroup; + MPI_Group globalGroup; MPI_Comm_group(parrentCommunicator_, &globalGroup); - pFlow::MPI::Group localGroup; + MPI_Group localGroup; MPI_Group_incl(globalGroup, ranks.size(), ranks.data(), &localGroup); // Create the new communicator from that group of processes. CheckMPI( MPI_Comm_create(parrentCommunicator_, localGroup, &localCommunicator_), true); - isPartOfLocal_ = localCommunicator_ != MPI::CommNull; + isPartOfLocal_ = localCommunicator_ != MPI_COMM_NULL; if(isPartOfLocal_) { @@ -71,7 +90,7 @@ pFlow::localProcessors::localProcessors pFlow::localProcessors::localProcessors ( - pFlow::MPI::Comm parrentComm, + MPI_Comm parrentComm, const word &name, const std::vector &ranks ) @@ -92,17 +111,17 @@ pFlow::localProcessors::localProcessors fatalExit; } - pFlow::MPI::Group parGroup; + MPI_Group parGroup; MPI_Comm_group(parrentCommunicator_, &parGroup); - pFlow::MPI::Group localGroup; + MPI_Group localGroup; MPI_Group_incl(parGroup, ranks.size(), ranks.data(), &localGroup); // Create the new communicator from that group of processes. CheckMPI( MPI_Comm_create(parrentCommunicator_, localGroup, &localCommunicator_), true); - isPartOfLocal_ = localCommunicator_ != MPI::CommNull; + isPartOfLocal_ = localCommunicator_ != MPI_COMM_NULL; if(isPartOfLocal_) { diff --git a/src/phasicFlow/processors/localProcessors.hpp b/src/phasicFlow/processors/localProcessors.hpp index 41e07f9e..eab18c08 100644 --- a/src/phasicFlow/processors/localProcessors.hpp +++ b/src/phasicFlow/processors/localProcessors.hpp @@ -25,7 +25,7 @@ Licence: #include "types.hpp" #ifdef pFlow_Build_MPI - #include "mpiTypes.hpp" + #include "mpi.h" #endif @@ -39,9 +39,9 @@ class localProcessors private: #ifdef pFlow_Build_MPI - pFlow::MPI::Comm parrentCommunicator_; + MPI_Comm parrentCommunicator_; - pFlow::MPI::Comm localCommunicator_; + MPI_Comm localCommunicator_; #endif int localSize_ = 1 ; @@ -70,7 +70,7 @@ public: #ifdef pFlow_Build_MPI localProcessors( - pFlow::MPI::Comm parrentComm, + MPI_Comm parrentComm, const word& name, const std::vector& ranks); #endif diff --git a/src/phasicFlow/processors/processors.cpp b/src/phasicFlow/processors/processors.cpp index 67f71bb2..f5eb066a 100644 --- a/src/phasicFlow/processors/processors.cpp +++ b/src/phasicFlow/processors/processors.cpp @@ -58,11 +58,13 @@ void pFlow::processors::initProcessors(int argc, char *argv[]) if(processors::globalParallel()) { pFlow::pOutput.activatePrefix(); - pFlow::pOutput.setPrefixNum(processors::globalRank_); + pFlow::pOutput.setPrefixNum(processors::globalRank_); + pFlow::errReport.activatePrefix(); + pFlow::errReport.setPrefixNum(processors::globalRank_); } pFlow::mOutput.setMasterSlave(processors::globalMaster()); - pFlow::errReport.setMasterSlave(processors::globalMaster()); + MPI_Type_contiguous(3, pFlow::MPI::Type(), &pFlow::MPI::realx3Type__); MPI_Type_commit(&pFlow::MPI::realx3Type__); diff --git a/src/phasicFlow/repository/IOobject/IOPattern.hpp b/src/phasicFlow/repository/IOobject/IOPattern.hpp index 9bd46e09..9c1e3a13 100644 --- a/src/phasicFlow/repository/IOobject/IOPattern.hpp +++ b/src/phasicFlow/repository/IOobject/IOPattern.hpp @@ -41,7 +41,7 @@ public: * * MasterProcessorDistribute: Read is done on master processor, but * the data should be distributed between processors based on an externally - * specified pattern. Write is done on master processors and the data is + * specified pattern. Write is done on master processor and the data is * collected from all processors (collection is done based on the externally * specified pattern). * @@ -146,7 +146,6 @@ public: bool thisProcWriteData()const { if(isAllProcessorsDifferent()) return true; - if(isMasterProcessorDistribute())return true; return isMaster(); } diff --git a/src/phasicFlow/repository/IOobject/IOobject.cpp b/src/phasicFlow/repository/IOobject/IOobject.cpp index c3956d1e..b5857ea1 100644 --- a/src/phasicFlow/repository/IOobject/IOobject.cpp +++ b/src/phasicFlow/repository/IOobject/IOobject.cpp @@ -118,8 +118,9 @@ bool pFlow::IOobject::readObject(bool rdHdr) bool pFlow::IOobject::writeObject() const { - - if(implyWrite()&& ioPattern().thisCallWrite()) + if(!implyWrite()) return true; + + if(ioPattern().thisCallWrite()) { if(auto ptrOS = outStream(); ptrOS ) @@ -160,7 +161,9 @@ bool pFlow::IOobject::readObject(iIstream& is, bool rdHdr) bool pFlow::IOobject::writeObject(iOstream& os) const { if(this->writeHeader() && ioPattern().thisProcWriteHeader()) - writeHeader(os, typeName()); + { + writeHeader(os, typeName()); + } if(ioPattern().thisCallWrite()) { diff --git a/src/phasicFlow/repository/systemControl/systemControl.cpp b/src/phasicFlow/repository/systemControl/systemControl.cpp index eef9fea3..ab9a52b8 100644 --- a/src/phasicFlow/repository/systemControl/systemControl.cpp +++ b/src/phasicFlow/repository/systemControl/systemControl.cpp @@ -199,7 +199,7 @@ bool pFlow::systemControl::operator++(int) if (time().timersReportTime() && timersReport()) { - timers_.write(output, true); + timers_.write(mOutput, true); } } else if (time().finalTime()) @@ -212,7 +212,7 @@ bool pFlow::systemControl::operator++(int) } writeToFileTimer_.end(); - timers_.write(output, true); + timers_.write(mOutput, true); } return toContinue; diff --git a/src/phasicFlow/streams/dataIO/dataIO.cpp b/src/phasicFlow/streams/dataIO/dataIO.cpp index f1b0974d..39fbd9e0 100644 --- a/src/phasicFlow/streams/dataIO/dataIO.cpp +++ b/src/phasicFlow/streams/dataIO/dataIO.cpp @@ -202,7 +202,7 @@ bool pFlow::dataIO::writeData(iOstream& os, span data) if( ioPattern_.thisProcWriteData()) { - return writeDataAsciiBinary(os, data); + return writeDataAsciiBinary(os, bufferSpan_); } else { @@ -215,7 +215,7 @@ inline bool pFlow::dataIO::writeData(iOstream& os, span data) { - if( ioPattern_.isParallel() ) + if( ioPattern_.isParallel() && ioPattern_.isMasterProcessorDistribute()) { notImplementedFunction<< "data transfer for type word is not supported in parallel mode!"<::writeData(iOstream& os, span data) if( ioPattern_.thisProcWriteData()) { - return writeDataASCII(os, data); + return writeDataASCII(os, bufferSpan_); } else { diff --git a/src/phasicFlow/streams/dataIO/dataIO.hpp b/src/phasicFlow/streams/dataIO/dataIO.hpp index 04f28c79..281fe832 100644 --- a/src/phasicFlow/streams/dataIO/dataIO.hpp +++ b/src/phasicFlow/streams/dataIO/dataIO.hpp @@ -31,8 +31,6 @@ Licence: #include "virtualConstructor.hpp" #include "pFlowProcessors.hpp" - - namespace pFlow { diff --git a/src/phasicFlow/streams/dataIO/dataIOMPI.hpp b/src/phasicFlow/streams/dataIO/dataIOMPI.hpp deleted file mode 100644 index 850cf69b..00000000 --- a/src/phasicFlow/streams/dataIO/dataIOMPI.hpp +++ /dev/null @@ -1,97 +0,0 @@ -#ifndef __datIOMPI_hpp__ -#define __datIOMPI_hpp__ - -#include "dataIO.hpp" -#include "pFlowProcessors.hpp" - -#ifdef pFlow_Build_MPI - #include "gatherMaster.hpp" -#endif - -namespace pFlow -{ - -template -class dataIOMPI -: - public dataIO -{ -protected: - - bool gatherData(span data ) override - { - - if(this->ioPattern_.isAllProcessorsDifferent()) - { - this->bufferSpan_ = data; - return true; - } - - if( this->ioPattern_.isMasterProcessorDistribute()) - { - -#ifdef pFlow_Build_MPI - - auto gatherT = pFlow::MPI::gatherMaster(pFlowProcessors()); - - if(!gatherT.gatherData(data)) - { - fatalErrorInFunction<<"Error in gathering data to master"<buffer_ = gatherT.moveData(); - - this->bufferSpan_ = makeSpan(this->buffer_); - - return true; -#else - notImplementedFunction; - fatalExit; - return false; -#endif //pFlow_Build_MPI - - } - - if( this->ioPattern_.isMasterProcessorOnly() || this->ioPattern_.isAllProcessorSimilar() ) - { - if( this->ioPattern_.isMaster() ) - { - this->bufferSpan_ = data; - } - else - { - this->bufferSpan_ = span(nullptr, 0); - return true; - } - } - - return false; - } -public: - - TypeInfo("dataIO"); - - dataIOMPI(const IOPattern& iop) - : - dataIO(iop) - {} - - dataIOMPI(const dataIOMPI&) = default; - - dataIOMPI(dataIOMPI&&) = default; - - - dataIOMPI& operator=(const dataIOMPI&) = default; - - dataIOMPI& operator=(dataIOMPI&&) = default; - - ~dataIOMPI() = default; - -}; - - -} - - -#endif \ No newline at end of file diff --git a/src/phasicFlow/streams/dataIO/dataIORegulars.cpp b/src/phasicFlow/streams/dataIO/dataIORegulars.cpp index 740c0401..d1067504 100644 --- a/src/phasicFlow/streams/dataIO/dataIORegulars.cpp +++ b/src/phasicFlow/streams/dataIO/dataIORegulars.cpp @@ -19,9 +19,15 @@ template class pFlow::dataIORegular; template class pFlow::dataIO; template class pFlow::dataIORegular; +template class pFlow::dataIO; +template class pFlow::dataIORegular; + template class pFlow::dataIO; template class pFlow::dataIORegular; +template class pFlow::dataIO; +template class pFlow::dataIORegular; + template class pFlow::dataIO; template class pFlow::dataIORegular; diff --git a/src/phasicFlow/streams/iStream/iIstream.hpp b/src/phasicFlow/streams/iStream/iIstream.hpp index 2dbf0fa9..817d42e3 100755 --- a/src/phasicFlow/streams/iStream/iIstream.hpp +++ b/src/phasicFlow/streams/iStream/iIstream.hpp @@ -260,6 +260,8 @@ inline iIstream& operator>>( iIstream& is, float& val); inline iIstream& operator>>( iIstream& is, double& val); +inline iIstream& operator>>( iIstream& is, size_t& val); + } // pFlow diff --git a/src/phasicFlow/streams/iStream/iIstreamI.hpp b/src/phasicFlow/streams/iStream/iIstreamI.hpp index 9ddb739a..1c73f87b 100755 --- a/src/phasicFlow/streams/iStream/iIstreamI.hpp +++ b/src/phasicFlow/streams/iStream/iIstreamI.hpp @@ -295,3 +295,35 @@ inline pFlow::iIstream& pFlow::operator>>( iIstream& is, double& val) return is; } +inline pFlow::iIstream& pFlow::operator>>( iIstream& is, size_t& val) +{ + token t(is); + if (!t.good()) + { + ioErrorInFile(is.name(), is.lineNumber()) + << "Bad token - could not get double value"; + fatalExit; + is.setBad(); + return is; + } + + if (t.isNumber()) + { + val = static_cast(t.number()); + } + else + { + ioErrorInFile(is.name(), is.lineNumber()) + << "Wrong token type - expected double value, found " + << t; + fatalExit; + is.setBad(); + return is; + } + + is.check(FUNCTION_NAME); + return is; +} + + + diff --git a/src/phasicFlow/streams/streams.cpp b/src/phasicFlow/streams/streams.cpp index 12ad3690..9b49dbf8 100755 --- a/src/phasicFlow/streams/streams.cpp +++ b/src/phasicFlow/streams/streams.cpp @@ -9,5 +9,5 @@ pFlow::processorOstream pFlow::pOutput(cout, "pFlow processorOstream"); pFlow::Istream pFlow::input( std::cin, "sFlow Istream"); -pFlow::masterOstream pFlow::errReport( std::cerr, "pFlow error report stream"); +pFlow::processorOstream pFlow::errReport( std::cerr, "pFlow error report stream"); diff --git a/src/phasicFlow/streams/streams.hpp b/src/phasicFlow/streams/streams.hpp index 683bba58..56e2f2b6 100755 --- a/src/phasicFlow/streams/streams.hpp +++ b/src/phasicFlow/streams/streams.hpp @@ -28,12 +28,12 @@ namespace pFlow extern Istream input; - extern masterOstream errReport; + extern processorOstream errReport; } -#define INFORMATION pFlow::mOutput< INFO: "< INFO: "<("mirrorProcessorNo")), + neighborProcessorNo_(dict.getVal("neighborProcessorNo")), + isBoundaryMaster_(thisProcessorNo()>=neighborProcessorNo()), name_(dict.name()), type_(dict.getVal("type")) { @@ -231,13 +232,22 @@ pFlow::boundaryBase::thisPoints()const } -typename pFlow::boundaryBase::pointFieldAccessType - pFlow::boundaryBase::neighborPoints()const +pFlow::realx3Vector_D& pFlow::boundaryBase::neighborProcPoints() { - notImplementedFunction; - return pointFieldAccessType(); + static realx3Vector_D dummyVector{"dummyVector"}; + notImplementedFunction; + fatalExit; + return dummyVector; } +const pFlow::realx3Vector_D& +pFlow::boundaryBase::neighborProcPoints()const +{ + static realx3Vector_D dummyVector{"dummyVector"}; + notImplementedFunction; + fatalExit; + return dummyVector; +} pFlow::realx3 pFlow::boundaryBase::displacementVectroToMirror() const { diff --git a/src/phasicFlow/structuredData/boundaries/boundaryBase/boundaryBase.hpp b/src/phasicFlow/structuredData/boundaries/boundaryBase/boundaryBase.hpp index 15b8284f..54fb94d0 100644 --- a/src/phasicFlow/structuredData/boundaries/boundaryBase/boundaryBase.hpp +++ b/src/phasicFlow/structuredData/boundaries/boundaryBase/boundaryBase.hpp @@ -51,42 +51,48 @@ public: private: - // friend et al. + // friend et al. friend boundaryList; - const plane& boundaryPlane_; + const plane& boundaryPlane_; - /// list of particles indices on device - uint32Vector_D indexList_; + /// list of particles indices on device + uint32Vector_D indexList_; /// list of particles indieces on host - mutable uint32Vector_H indexListHost_; + mutable uint32Vector_H indexListHost_; /// device and host list are sync - mutable bool listsSync_ = false; + mutable bool listsSync_ = false; - /// The length defined for creating neighbor list - real neighborLength_; + /// The length defined for creating neighbor list + real neighborLength_; - /// a reference to internal points - internalPoints& internal_; + /// a reference to internal points + internalPoints& internal_; - /// a reference to the list of boundaries + /// a reference to the list of boundaries /// (never use this in the constructor). - boundaryList& boundaries_; + boundaryList& boundaries_; - uint32 thisBoundaryIndex_; + uint32 thisBoundaryIndex_; - uint32 mirrorProcessoNo_; + int neighborProcessorNo_; - word name_; + bool isBoundaryMaster_; - word type_; + word name_; + + word type_; protected: - /// @brief set the size of indexList - void setSize(uint32 newSize); + /// @brief Set the size of indexList. + /// It is virtual to let derived classed to be aware of + /// the fact that the size of boundary points has been changed. + /// So, any drived class that override this method should call + /// boundaryBase::setSize(newSize) too. + virtual void setSize(uint32 newSize); void setNewIndices(const uint32Vector_D& newIndices); @@ -116,6 +122,17 @@ protected: } } + /// Update this boundary data in two steps (1 and 2). + /// This is called after calling beforeIteration for + /// all boundaries, so any particle addition, deletion, + /// and transfer has been finished up to this point. + /// This two-step update help to have a flexible mechanism + /// for data transfer, mostly for MPI related jobs. + virtual + bool updataBoundary(int step) + { + return true; + } public: @@ -213,6 +230,24 @@ public: return indexList_.capacity(); } + inline + int neighborProcessorNo()const + { + return neighborProcessorNo_; + } + + inline + int thisProcessorNo()const + { + return pFlowProcessors().localRank(); + } + + inline + bool isBoundaryMaster()const + { + return isBoundaryMaster_; + } + inline uint32 thisBoundaryIndex()const { @@ -283,8 +318,22 @@ public: pointFieldAccessType thisPoints()const; + /// @brief Return number of points in the neighbor processor boundary + virtual + uint32 neighborProcSize()const + { + return 0; + } + + /// @brief Return a reference to point positions in the neighbor + /// processor boundary. virtual - pointFieldAccessType neighborPoints()const; + realx3Vector_D& neighborProcPoints(); + + /// @brief Return a const reference to point positions in the + /// neighbor processor boundary. + virtual + const realx3Vector_D& neighborProcPoints()const; /// - static create static diff --git a/src/phasicFlow/structuredData/boundaries/boundaryList.cpp b/src/phasicFlow/structuredData/boundaries/boundaryList.cpp index 9073d9e7..a00476d6 100644 --- a/src/phasicFlow/structuredData/boundaries/boundaryList.cpp +++ b/src/phasicFlow/structuredData/boundaries/boundaryList.cpp @@ -2,17 +2,17 @@ O C enter of O O E ngineering and O O M ultiscale modeling of - OOOOOOO F luid flow + OOOOOOO F luid flow ------------------------------------------------------------------------------ Copyright (C): www.cemf.ir email: hamid.r.norouzi AT gmail.com ------------------------------------------------------------------------------- +------------------------------------------------------------------------------ Licence: - This file is part of phasicFlow code. It is a free software for simulating + This file is part of phasicFlow code. It is a free software for simulating granular and multiphase flows. You can redistribute it and/or modify it under - the terms of GNU General Public License v3 or any other later versions. - - phasicFlow is distributed to help others in their research in the field of + the terms of GNU General Public License v3 or any other later versions. + + phasicFlow is distributed to help others in their research in the field of granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. @@ -21,49 +21,44 @@ Licence: #include "boundaryList.hpp" #include "pointStructure.hpp" -void pFlow::boundaryList::setExtendedDomain() +void +pFlow::boundaryList::setExtendedDomain() { - if(!listSet_) + if (!listSet_) { - fatalErrorInFunction<< - "boundary list is not set yet and you used the objects."< dist; + std::array dist; dist[0] = boundary(0).neighborLength(); dist[1] = boundary(1).neighborLength(); dist[2] = boundary(2).neighborLength(); @@ -71,71 +66,66 @@ bool pFlow::boundaryList::updateLists() dist[4] = boundary(4).neighborLength(); dist[5] = boundary(5).neighborLength(); - pStruct_.updateFlag( - extendedDomain_, - dist); + pStruct_.updateFlag(extendedDomain_, dist); const auto& maskD = pStruct_.activePointsMaskDevice(); - boundary(0).setSize( maskD.leftSize() ); - boundary(1).setSize( maskD.rightSize() ); - boundary(2).setSize( maskD.bottomSize() ); - boundary(3).setSize( maskD.topSize() ); - boundary(4).setSize( maskD.rearSize() ); - boundary(5).setSize( maskD.frontSize() ); + boundary(0).setSize(maskD.leftSize()); + boundary(1).setSize(maskD.rightSize()); + boundary(2).setSize(maskD.bottomSize()); + boundary(3).setSize(maskD.topSize()); + boundary(4).setSize(maskD.rearSize()); + boundary(5).setSize(maskD.frontSize()); pStruct_.fillNeighborsLists( - boundary(0).indexList().deviceViewAll(), - boundary(1).indexList().deviceViewAll(), - boundary(2).indexList().deviceViewAll(), - boundary(3).indexList().deviceViewAll(), - boundary(4).indexList().deviceViewAll(), - boundary(5).indexList().deviceViewAll()); - - - return true; + boundary(0).indexList().deviceViewAll(), + boundary(1).indexList().deviceViewAll(), + boundary(2).indexList().deviceViewAll(), + boundary(3).indexList().deviceViewAll(), + boundary(4).indexList().deviceViewAll(), + boundary(5).indexList().deviceViewAll() + ); + + return true; } -pFlow::boundaryList::boundaryList -( - pointStructure& pStruct -) -: - ListPtr(pStruct.simDomain().sizeOfBoundaries()), - pStruct_(pStruct), - timeControl_ - ( - pStruct.simDomain().subDict("boundaries"), - "update", - pStruct.currentTime() - ) -{} - -bool pFlow::boundaryList::updateLists(uint32 iter, real t, real dt) +pFlow::boundaryList::boundaryList(pointStructure& pStruct) + : ListPtr(pStruct.simDomain().sizeOfBoundaries()), + pStruct_(pStruct), + timeControl_( + pStruct.simDomain().subDict("boundaries"), + "update", + pStruct.currentTime() + ) { - if( timeControl_.timeEvent(iter, t, dt)) +} + +bool +pFlow::boundaryList::updateNeighborLists(uint32 iter, real t, real dt) +{ + if (timeControl_.timeEvent(iter, t, dt)) { - return updateLists(); + return updateNeighborLists(); } return true; } -bool pFlow::boundaryList::setLists() +bool +pFlow::boundaryList::createBoundaries() { - if(listSet_)return true; + if (listSet_) + return true; - for(auto i=0; iset - ( - i, - boundaryBase::create - ( - pStruct_.simDomain().boundaryDict(i), - pStruct_.simDomain().boundaryPlane(i), - pStruct_, - *this, - i - ) + this->set( + i, + boundaryBase::create( + pStruct_.simDomain().boundaryDict(i), + pStruct_.simDomain().boundaryPlane(i), + pStruct_, + *this, + i + ) ); } listSet_ = true; @@ -146,81 +136,81 @@ bool pFlow::boundaryList::setLists() pFlow::box pFlow::boundaryList::internalDomainBox() const { - const auto& thisBox = pStruct_.thisDomain().domainBox(); - + const auto& thisBox = pStruct_.thisDomain().domainBox(); + const realx3 lowerPointDisplacement = { - boundary(0).neighborLengthIntoInternal(), - boundary(2).neighborLengthIntoInternal(), - boundary(4).neighborLengthIntoInternal()}; + boundary(0).neighborLengthIntoInternal(), + boundary(2).neighborLengthIntoInternal(), + boundary(4).neighborLengthIntoInternal() + }; const realx3 upperPointDisplacement = { - boundary(1).neighborLengthIntoInternal(), - boundary(3).neighborLengthIntoInternal(), - boundary(5).neighborLengthIntoInternal()}; - - return {thisBox.minPoint() + lowerPointDisplacement, - thisBox.maxPoint() - upperPointDisplacement}; + boundary(1).neighborLengthIntoInternal(), + boundary(3).neighborLengthIntoInternal(), + boundary(5).neighborLengthIntoInternal() + }; + + return { thisBox.minPoint() + lowerPointDisplacement, + thisBox.maxPoint() - upperPointDisplacement }; } bool pFlow::boundaryList::beforeIteration(uint32 iter, real t, real dt) { - // it is time to update lists - if(timeControl_.timeEvent(iter, t, dt)) + // it is time to update lists + if (timeControl_.timeEvent(iter, t, dt) && !updateNeighborLists()) { - if(!updateLists()) + fatalErrorInFunction; + return false; + } + + for (auto bdry : *this) + { + if (!bdry->beforeIteration(iter, t, dt)) { - fatalErrorInFunction; + fatalErrorInFunction << "Error in beforeIteration in boundary " + << bdry->name() << endl; return false; } } - for(auto& bdry:*this) + for (auto bdry : *this) { - if( !bdry->beforeIteration(iter, t, dt)) - { - fatalErrorInFunction<< - "Error in beforeIteration in boundary "<name()<updataBoundary(1); } - return true; + for (auto bdry : *this) + { + bdry->updataBoundary(2); + } + + return true; } -bool pFlow::boundaryList::iterate -( - uint32 iter, - real t, - real dt -) +bool +pFlow::boundaryList::iterate(uint32 iter, real t, real dt) { - for(auto& bdry:*this) + for (auto& bdry : *this) { - if( !bdry->iterate(iter, t, dt)) + if (!bdry->iterate(iter, t, dt)) { - fatalErrorInFunction<< - "Error in iterate in boundary "<name()<afterIteration(iter, t, dt)) - { - fatalErrorInFunction<< - "Error in afterIteration in boundary "<name()<name() << endl; + return false; + } + } + return true; +} + +bool +pFlow::boundaryList::afterIteration(uint32 iter, real t, real dt) +{ + for (auto& bdry : *this) + { + if (!bdry->afterIteration(iter, t, dt)) + { + fatalErrorInFunction << "Error in afterIteration in boundary " + << bdry->name() << endl; return false; } } diff --git a/src/phasicFlow/structuredData/boundaries/boundaryList.hpp b/src/phasicFlow/structuredData/boundaries/boundaryList.hpp index c5e3b25e..5eba6399 100644 --- a/src/phasicFlow/structuredData/boundaries/boundaryList.hpp +++ b/src/phasicFlow/structuredData/boundaries/boundaryList.hpp @@ -40,24 +40,23 @@ class boundaryList private: //// - data members - pointStructure& pStruct_; + pointStructure& pStruct_; - baseTimeControl timeControl_; + baseTimeControl timeControl_; - domain extendedDomain_; + domain extendedDomain_; - box internalDomainBox_; + box internalDomainBox_; - bool listSet_ = false; + bool listSet_ = false; - void setExtendedDomain(); + void setExtendedDomain(); - bool resetLists(); - - /// @brief update neighbor list of boundaries regardless - /// of the time intervals - bool updateLists(); + bool resetLists(); + /// @brief update neighbor list of boundaries regardless + /// of the time intervals + bool updateNeighborLists(); public: @@ -66,16 +65,16 @@ public: //// - Constructors - boundaryList(pointStructure& pStruct); + explicit boundaryList(pointStructure& pStruct); - ~boundaryList() = default; + virtual ~boundaryList() = default; /// @brief update neighbor list of boundaries based on /// the time intervals - bool updateLists(uint32 iter, real t, real dt); + bool updateNeighborLists(uint32 iter, real t, real dt); - bool setLists(); + bool createBoundaries(); inline const pointStructure& pStruct()const @@ -115,8 +114,6 @@ public: box internalDomainBox()const; - - bool beforeIteration(uint32 iter, real t, real dt); bool iterate(uint32 iter, real t, real dt); diff --git a/src/phasicFlow/structuredData/domain/regularSimulationDomain.cpp b/src/phasicFlow/structuredData/domain/regularSimulationDomain.cpp index 7f999541..e94d111e 100644 --- a/src/phasicFlow/structuredData/domain/regularSimulationDomain.cpp +++ b/src/phasicFlow/structuredData/domain/regularSimulationDomain.cpp @@ -51,9 +51,9 @@ bool pFlow::regularSimulationDomain::createBoundaryDicts() return false; } - if(!bDict.add("mirrorProcessorNo",(uint32) processors::globalRank())) + if(!bDict.add("neighborProcessorNo",(uint32) processors::globalRank())) { - fatalErrorInFunction<<"error in adding mirrorProcessorNo to "<< bName << + fatalErrorInFunction<<"error in adding neighborProcessorNo to "<< bName << " in dictionary "<< boundaries.globalName()< src, - span dst, - size_t sizeOfElement +bool +pFlow::regularSimulationDomain::domainActive() const +{ + return true; +} + +const pFlow::domain& +pFlow::regularSimulationDomain::thisDomain() const +{ + return thisDomain_; +} + +bool +pFlow::regularSimulationDomain::initialTransferBlockData( + span src, + span dst, + size_t sizeOfElement ) const { size_t requiredSize = sizeOfElement*initialNumberInThis(); @@ -209,7 +221,3 @@ const pFlow::dictionary &pFlow::regularSimulationDomain::thisBoundaryDict() cons return this->subDict("regularBoundaries"); } -bool pFlow::regularSimulationDomain::requiresDataTransfer()const -{ - return false; -} diff --git a/src/phasicFlow/structuredData/domain/regularSimulationDomain.hpp b/src/phasicFlow/structuredData/domain/regularSimulationDomain.hpp index 39491387..74bea6bc 100644 --- a/src/phasicFlow/structuredData/domain/regularSimulationDomain.hpp +++ b/src/phasicFlow/structuredData/domain/regularSimulationDomain.hpp @@ -2,17 +2,17 @@ O C enter of O O E ngineering and O O M ultiscale modeling of - OOOOOOO F luid flow + OOOOOOO F luid flow ------------------------------------------------------------------------------ Copyright (C): www.cemf.ir email: hamid.r.norouzi AT gmail.com ------------------------------------------------------------------------------- +------------------------------------------------------------------------------ Licence: - This file is part of phasicFlow code. It is a free software for simulating + This file is part of phasicFlow code. It is a free software for simulating granular and multiphase flows. You can redistribute it and/or modify it under - the terms of GNU General Public License v3 or any other later versions. - - phasicFlow is distributed to help others in their research in the field of + the terms of GNU General Public License v3 or any other later versions. + + phasicFlow is distributed to help others in their research in the field of granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. @@ -26,81 +26,70 @@ Licence: namespace pFlow { -class regularSimulationDomain -: +class regularSimulationDomain +: public simulationDomain { -protected: +private: - uint32 initialNumPoints_=0; + domain thisDomain_; - bool createBoundaryDicts() override; + uint32 initialNumPoints_ = 0; - bool setThisDomain()override; + bool createBoundaryDicts() final; + + bool setThisDomain() final; public: TypeInfo("simulationDomain"); - regularSimulationDomain(systemControl& control); + explicit regularSimulationDomain(systemControl& control); - virtual - ~regularSimulationDomain()=default; - - add_vCtor - ( - simulationDomain, - regularSimulationDomain, - systemControl - ); + ~regularSimulationDomain() final = default; - bool initialUpdateDomains(span pointPos) override; + add_vCtor(simulationDomain, regularSimulationDomain, systemControl); - uint32 initialNumberInThis()const override; + bool initialUpdateDomains(span pointPos) final; - bool initialThisDomainActive()const override - { - return true; - } + uint32 initialNumberInThis() const final; - /*bool updateDomains( - span pointPos, - pFlagTypeHost flags) override;*/ - - uint32 numberToBeImported()const override; + uint32 numberToBeImported() const final; - uint32 numberToBeExported()const override; + uint32 numberToBeExported() const final; + + /// @brief Is this domain active? + /// Active mean, there is particle in it and + /// boundaries and other entities of simulation domains are valid + bool domainActive() const final; + + /// @brief return the simulation domain of this processor + const domain& thisDomain() const final; bool initialTransferBlockData( - span src, - span dst, - size_t sizeOfElement) const override; + span src, + span dst, + size_t sizeOfElement + ) const final; + /// @brief + /// @param src + /// @param dst + /// @return + bool initialTransferBlockData(span src, span dst) + const final; - /// @brief - /// @param src - /// @param dst - /// @return - bool initialTransferBlockData( - span src, - span dst) const override; + bool initialTransferBlockData(span src, span dst) + const final; - bool initialTransferBlockData( - span src, - span dst) const override; + bool initialTransferBlockData(span src, span dst) + const final; - bool initialTransferBlockData( - span src, - span dst) const override; + bool initialTransferBlockData(span src, span dst) + const final; - bool initialTransferBlockData( - span src, - span dst) const override; - - - const dictionary& thisBoundaryDict()const override; + const dictionary& thisBoundaryDict() const final; - bool requiresDataTransfer() const override; }; } diff --git a/src/phasicFlow/structuredData/domain/simulationDomain.cpp b/src/phasicFlow/structuredData/domain/simulationDomain.cpp index dde24ae9..07a3a479 100644 --- a/src/phasicFlow/structuredData/domain/simulationDomain.cpp +++ b/src/phasicFlow/structuredData/domain/simulationDomain.cpp @@ -52,8 +52,14 @@ pFlow::domain pFlow::simulationDomain::extendThisDomain return domain({minP, maxP}); } +const pFlow::plane& +pFlow::simulationDomain::boundaryPlane(uint32 i) const +{ + return thisDomain().boundaryPlane(i); +} + pFlow::uniquePtr - pFlow::simulationDomain::create(systemControl &control) +pFlow::simulationDomain::create(systemControl& control) { word sType = angleBracketsNames( "simulationDomain", diff --git a/src/phasicFlow/structuredData/domain/simulationDomain.hpp b/src/phasicFlow/structuredData/domain/simulationDomain.hpp index af49ee21..071e1326 100644 --- a/src/phasicFlow/structuredData/domain/simulationDomain.hpp +++ b/src/phasicFlow/structuredData/domain/simulationDomain.hpp @@ -39,33 +39,27 @@ class simulationDomain : public fileDictionary { -private: +private: -static inline constexpr uint32 sizeOfBoundaries_ = 6; - -static -inline const std::array boundaryNames_ = -{ - "left", "right", - "bottom", "top", - "rear", "front" -}; - -protected: - - //fileDictionary globalDomainDict_ ; - - /// @brief acutal limits of the simulation domain + /// @brief acutal limits of the global box of simulation box globalBox_; - /// @brief the actual limits of this processor domain - domain thisDomain_; - bool boundariesSet_ = false; + static constexpr uint32 sizeOfBoundaries_ = 6; + + static + inline const std::array boundaryNames_ = + { + "left", "right", + "bottom", "top", + "rear", "front" + }; - virtual bool createBoundaryDicts() = 0; + virtual + bool createBoundaryDicts() = 0; - virtual bool setThisDomain() = 0; + virtual + bool setThisDomain() = 0; public: @@ -73,11 +67,10 @@ public: TypeInfo("simulationDomain"); /// Constrcut from components - simulationDomain(systemControl& control); + explicit simulationDomain(systemControl& control); /// Destructor - virtual - ~simulationDomain() = default; + ~simulationDomain() override = default; create_vCtor @@ -88,6 +81,11 @@ public: (control) ); + const auto& globalBox()const + { + return globalBox_; + } + virtual const dictionary& thisBoundaryDict()const = 0; @@ -97,9 +95,6 @@ public: virtual uint32 initialNumberInThis()const = 0; - virtual - bool initialThisDomainActive()const = 0; - virtual bool initialTransferBlockData( span src, @@ -139,9 +134,7 @@ public: charSpan(dst), el ); - } - - + } /// @brief Number of points to be imported after updating domains /// @return number of points @@ -151,40 +144,46 @@ public: virtual uint32 numberToBeExported()const = 0; - - + /// @brief Is this domain active? + /// Active mean, there is particle in it and + /// boundaries and other entities of simulation domains are valid virtual - bool requiresDataTransfer() const = 0; - - inline - const auto& thisDomain()const - { - return thisDomain_; - } + bool domainActive()const = 0; + /// @brief return the simulation domain of this processor + virtual + const domain& thisDomain()const = 0; + + domain extendThisDomain( const realx3& lowerPointExtension, const realx3& upperPointExtension)const; + /// @brief The original dictionary supplied by the user as input inline const auto& globalBoundaryDict()const { return static_cast(*this); } + /// @brief return a const ref to dicrionary of boundary i of this processor inline const dictionary& boundaryDict(uint32 i)const { return thisBoundaryDict().subDict(bundaryName(i)); } - inline - const auto& boundaryPlane(uint32 i)const - { - return thisDomain_.boundaryPlane(i); - } + /// @brief return a const ref to the plane of boundary i of this processor + const plane& boundaryPlane(uint32 i)const; + + static + uniquePtr create(systemControl& control); + + /// @brief Boundary name based on boundary index + /// @param i boundary index (range from 0 to 5) + /// @return const reference to name of the boundary static inline const word& bundaryName(uint32 i) { @@ -197,9 +196,6 @@ public: return boundaryNames_.size(); } - static - uniquePtr create(systemControl& control); - }; // simulationDomain diff --git a/src/phasicFlow/structuredData/pointStructure/pointStructure/pointStructure.cpp b/src/phasicFlow/structuredData/pointStructure/pointStructure/pointStructure.cpp index 30b81c10..9a20395c 100644 --- a/src/phasicFlow/structuredData/pointStructure/pointStructure/pointStructure.cpp +++ b/src/phasicFlow/structuredData/pointStructure/pointStructure/pointStructure.cpp @@ -55,7 +55,7 @@ bool pFlow::pointStructure::setupPointStructure(const PointsTypeHost &points) thisN, RESERVE() ); - + auto pSpan = points.getSpan(); if(auto iSpan = internal.getSpan(); @@ -72,7 +72,7 @@ bool pFlow::pointStructure::setupPointStructure(const PointsTypeHost &points) return false; } - boundaries_.setLists(); + boundaries_.createBoundaries(); return true; }