From d0798dfc0b09605537c7df585d70bc3934e9cf14 Mon Sep 17 00:00:00 2001 From: Hamidreza Norouzi Date: Sat, 27 Apr 2024 09:11:09 -0700 Subject: [PATCH] clean up of un-used codes --- .../MPIParallelization/CMakeLists.txt | 36 -- .../MPIParallelization/boundaryProcessor.cpp | 61 --- .../MPIParallelization/boundaryProcessor.hpp | 67 ---- .../MPIParallelization/dataIOMPI.cpp | 5 - .../MPIParallelization/dataIOMPI.hpp | 97 ----- .../MPIParallelization/gatherMaster.hpp | 105 ----- .../MPIParallelization/mpiCommunication.hpp | 377 ------------------ .../MPIParallelization/mpiTypes.hpp | 75 ---- .../MPIParallelization/partitioning.cpp | 113 ------ .../MPIParallelization/partitioning.hpp | 168 -------- .../MPIParallelization/procCommunication.cpp | 30 -- .../MPIParallelization/procCommunication.hpp | 178 --------- .../MPIParallelization/procVector.hpp | 199 --------- .../processorBoundaryField.cpp | 29 -- .../processorBoundaryField.hpp | 80 ---- .../processorBoundaryFields.cpp | 10 - .../MPIParallelization/rcb1DPartitioning.cpp | 325 --------------- .../MPIParallelization/rcb1DPartitioning.hpp | 240 ----------- .../scatteredMasterDistribute.cpp | 158 -------- .../scatteredMasterDistribute.hpp | 69 ---- .../scatteredMasterDistributeChar.cpp | 166 -------- .../scatteredMasterDistributeChar.hpp | 67 ---- 22 files changed, 2655 deletions(-) delete mode 100644 src/phasicFlow/MPIParallelization/CMakeLists.txt delete mode 100644 src/phasicFlow/MPIParallelization/boundaryProcessor.cpp delete mode 100644 src/phasicFlow/MPIParallelization/boundaryProcessor.hpp delete mode 100644 src/phasicFlow/MPIParallelization/dataIOMPI.cpp delete mode 100644 src/phasicFlow/MPIParallelization/dataIOMPI.hpp delete mode 100644 src/phasicFlow/MPIParallelization/gatherMaster.hpp delete mode 100644 src/phasicFlow/MPIParallelization/mpiCommunication.hpp delete mode 100644 src/phasicFlow/MPIParallelization/mpiTypes.hpp delete mode 100644 src/phasicFlow/MPIParallelization/partitioning.cpp delete mode 100644 src/phasicFlow/MPIParallelization/partitioning.hpp delete mode 100644 src/phasicFlow/MPIParallelization/procCommunication.cpp delete mode 100644 src/phasicFlow/MPIParallelization/procCommunication.hpp delete mode 100644 src/phasicFlow/MPIParallelization/procVector.hpp delete mode 100644 src/phasicFlow/MPIParallelization/processorBoundaryField.cpp delete mode 100644 src/phasicFlow/MPIParallelization/processorBoundaryField.hpp delete mode 100644 src/phasicFlow/MPIParallelization/processorBoundaryFields.cpp delete mode 100644 src/phasicFlow/MPIParallelization/rcb1DPartitioning.cpp delete mode 100644 src/phasicFlow/MPIParallelization/rcb1DPartitioning.hpp delete mode 100644 src/phasicFlow/MPIParallelization/scatteredMasterDistribute.cpp delete mode 100644 src/phasicFlow/MPIParallelization/scatteredMasterDistribute.hpp delete mode 100644 src/phasicFlow/MPIParallelization/scatteredMasterDistributeChar.cpp delete mode 100644 src/phasicFlow/MPIParallelization/scatteredMasterDistributeChar.hpp diff --git a/src/phasicFlow/MPIParallelization/CMakeLists.txt b/src/phasicFlow/MPIParallelization/CMakeLists.txt deleted file mode 100644 index 32ab1c6b..00000000 --- a/src/phasicFlow/MPIParallelization/CMakeLists.txt +++ /dev/null @@ -1,36 +0,0 @@ -#add Zoltan -set(Zoltan_Install_DIR) -if(DEFINED ENV{Zoltan_DIR}) - set(Zoltan_Install_DIR $ENV{Zoltan_DIR}) -else() - set(Zoltan_Install_DIR $ENV{HOME}/PhasicFlow/Zoltan) -endif() -message(STATUS "Zoltan install directory is ${Zoltan_Install_DIR}") - -set(ZOLTAN_PREFIX "${Zoltan_Install_DIR}" CACHE STRING "Zoltan install directory") - -find_path(ZOLTAN_INCLUDE_DIR zoltan.h PATHS "${ZOLTAN_PREFIX}/include") - -message(STATUS "Zoltan include path: ${ZOLTAN_INCLUDE_DIR}") - -find_library(ZOLTAN_LIBRARY zoltan PATHS "${ZOLTAN_PREFIX}/lib") -message(STATUS "Zoltan lib path: ${ZOLTAN_LIBRARY}") - - -set(SourceFiles - partitioning.cpp - rcb1DPartitioning.cpp - domain/MPISimulationDomain.cpp) - -set(link_libs Kokkos::kokkos phasicFlow PRIVATE MPI::MPI_CXX ${ZOLTAN_LIBRARY} -lm ) - -pFlow_add_library_install(MPIParallelization SourceFiles link_libs) -target_include_directories(MPIParallelization PUBLIC ${ZOLTAN_INCLUDE_DIR}) - - - - - - - - diff --git a/src/phasicFlow/MPIParallelization/boundaryProcessor.cpp b/src/phasicFlow/MPIParallelization/boundaryProcessor.cpp deleted file mode 100644 index a5622691..00000000 --- a/src/phasicFlow/MPIParallelization/boundaryProcessor.cpp +++ /dev/null @@ -1,61 +0,0 @@ -/*------------------------------- phasicFlow --------------------------------- - O C enter of - O O E ngineering and - O O M ultiscale modeling of - OOOOOOO F luid flow ------------------------------------------------------------------------------- - Copyright (C): www.cemf.ir - email: hamid.r.norouzi AT gmail.com ------------------------------------------------------------------------------- -Licence: - This file is part of phasicFlow code. It is a free software for simulating - granular and multiphase flows. You can redistribute it and/or modify it under - the terms of GNU General Public License v3 or any other later versions. - - phasicFlow is distributed to help others in their research in the field of - granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - ------------------------------------------------------------------------------*/ - -#include "boundaryProcessor.hpp" -#include "dictionary.hpp" - -pFlow::boundaryProcessor::boundaryProcessor -( - const dictionary& dict, - const plane& bplane, - internalPoints& internal -) -: - boundaryBase(dict, bplane, internal) -{ - -} - -bool pFlow::boundaryProcessor::beforeIteratoin -( - uint32 iterNum, - real t -) -{ - return true; -} - -bool pFlow::boundaryProcessor::iterate -( - uint32 iterNum, - real t -) -{ - return true; -} - -bool pFlow::boundaryProcessor::afterIteration -( - uint32 iterNum, - real t -) -{ - return true; -} \ No newline at end of file diff --git a/src/phasicFlow/MPIParallelization/boundaryProcessor.hpp b/src/phasicFlow/MPIParallelization/boundaryProcessor.hpp deleted file mode 100644 index 66b3b468..00000000 --- a/src/phasicFlow/MPIParallelization/boundaryProcessor.hpp +++ /dev/null @@ -1,67 +0,0 @@ -/*------------------------------- phasicFlow --------------------------------- - O C enter of - O O E ngineering and - O O M ultiscale modeling of - OOOOOOO F luid flow ------------------------------------------------------------------------------- - Copyright (C): www.cemf.ir - email: hamid.r.norouzi AT gmail.com ------------------------------------------------------------------------------- -Licence: - This file is part of phasicFlow code. It is a free software for simulating - granular and multiphase flows. You can redistribute it and/or modify it under - the terms of GNU General Public License v3 or any other later versions. - - phasicFlow is distributed to help others in their research in the field of - granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - ------------------------------------------------------------------------------*/ - -#ifndef __boundaryProcessor_hpp__ -#define __boundaryProcessor_hpp__ - - -#include "boundaryBase.hpp" - -namespace pFlow -{ - -class boundaryProcessor -: - public boundaryBase -{ -protected: - - -public: - - TypeInfo("boundary"); - - boundaryProcessor( - const dictionary& dict, - const plane& bplane, - internalPoints& internal); - - virtual - ~boundaryProcessor() = default; - - add_vCtor - ( - boundaryBase, - boundaryProcessor, - dictionary - ); - - bool beforeIteratoin(uint32 iterNum, real t) override; - - bool iterate(uint32 iterNum, real t) override; - - bool afterIteration(uint32 iterNum, real t) override; - - -}; - -} - -#endif //__boundaryProcessor_hpp__ \ No newline at end of file diff --git a/src/phasicFlow/MPIParallelization/dataIOMPI.cpp b/src/phasicFlow/MPIParallelization/dataIOMPI.cpp deleted file mode 100644 index 30fd93cf..00000000 --- a/src/phasicFlow/MPIParallelization/dataIOMPI.cpp +++ /dev/null @@ -1,5 +0,0 @@ - -#include "gatherMaster.hpp" - - - diff --git a/src/phasicFlow/MPIParallelization/dataIOMPI.hpp b/src/phasicFlow/MPIParallelization/dataIOMPI.hpp deleted file mode 100644 index 850cf69b..00000000 --- a/src/phasicFlow/MPIParallelization/dataIOMPI.hpp +++ /dev/null @@ -1,97 +0,0 @@ -#ifndef __datIOMPI_hpp__ -#define __datIOMPI_hpp__ - -#include "dataIO.hpp" -#include "pFlowProcessors.hpp" - -#ifdef pFlow_Build_MPI - #include "gatherMaster.hpp" -#endif - -namespace pFlow -{ - -template -class dataIOMPI -: - public dataIO -{ -protected: - - bool gatherData(span data ) override - { - - if(this->ioPattern_.isAllProcessorsDifferent()) - { - this->bufferSpan_ = data; - return true; - } - - if( this->ioPattern_.isMasterProcessorDistribute()) - { - -#ifdef pFlow_Build_MPI - - auto gatherT = pFlow::MPI::gatherMaster(pFlowProcessors()); - - if(!gatherT.gatherData(data)) - { - fatalErrorInFunction<<"Error in gathering data to master"<buffer_ = gatherT.moveData(); - - this->bufferSpan_ = makeSpan(this->buffer_); - - return true; -#else - notImplementedFunction; - fatalExit; - return false; -#endif //pFlow_Build_MPI - - } - - if( this->ioPattern_.isMasterProcessorOnly() || this->ioPattern_.isAllProcessorSimilar() ) - { - if( this->ioPattern_.isMaster() ) - { - this->bufferSpan_ = data; - } - else - { - this->bufferSpan_ = span(nullptr, 0); - return true; - } - } - - return false; - } -public: - - TypeInfo("dataIO"); - - dataIOMPI(const IOPattern& iop) - : - dataIO(iop) - {} - - dataIOMPI(const dataIOMPI&) = default; - - dataIOMPI(dataIOMPI&&) = default; - - - dataIOMPI& operator=(const dataIOMPI&) = default; - - dataIOMPI& operator=(dataIOMPI&&) = default; - - ~dataIOMPI() = default; - -}; - - -} - - -#endif \ No newline at end of file diff --git a/src/phasicFlow/MPIParallelization/gatherMaster.hpp b/src/phasicFlow/MPIParallelization/gatherMaster.hpp deleted file mode 100644 index ca1ecc77..00000000 --- a/src/phasicFlow/MPIParallelization/gatherMaster.hpp +++ /dev/null @@ -1,105 +0,0 @@ -/*------------------------------- phasicFlow --------------------------------- - O C enter of - O O E ngineering and - O O M ultiscale modeling of - OOOOOOO F luid flow ------------------------------------------------------------------------------- - Copyright (C): www.cemf.ir - email: hamid.r.norouzi AT gmail.com ------------------------------------------------------------------------------- -Licence: - This file is part of phasicFlow code. It is a free software for simulating - granular and multiphase flows. You can redistribute it and/or modify it under - the terms of GNU General Public License v3 or any other later versions. - - phasicFlow is distributed to help others in their research in the field of - granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - ------------------------------------------------------------------------------*/ -#ifndef __gatherMaster_hpp__ -#define __gatherMaster_hpp__ - -#include - -#include "procCommunication.hpp" - -namespace pFlow::MPI -{ - -template -class gatherMaster -: - public procCommunication -{ -protected: - - std::vector buffer_; - -public: - - gatherMaster(const localProcessors& procs) - : - procCommunication(procs) - {} - - span getData() - { - if(this->localMaster()) - return span( buffer_.data(), buffer_.size()); - else - return span(nullptr, 0); - } - - std::vector moveData() - { - return std::move(buffer_); - } - - bool gatherData(span data) - { - int thisN = data.size(); - - bool succss; - - procVector numElems(this->processors(), true); - procVector displ(this->processors(), true); - - if( !this->collectAllToMaster(thisN, numElems) ) - { - fatalErrorInFunction<< - "error in collecting number of elements from processors"<(0)); - - buffer_.resize(totalN); - - std::exclusive_scan( - numElems.begin(), - numElems.end(), - displ.begin(), - 0); - - auto bufferSpan = makeSpan(buffer_); - - return CheckMPI( - Gatherv( - data, - bufferSpan, - makeSpan(numElems), - makeSpan(displ), - this->localMasterNo(), - this->localCommunicator()), - false); - - } - - -}; -} - -#endif \ No newline at end of file diff --git a/src/phasicFlow/MPIParallelization/mpiCommunication.hpp b/src/phasicFlow/MPIParallelization/mpiCommunication.hpp deleted file mode 100644 index 05a41fd5..00000000 --- a/src/phasicFlow/MPIParallelization/mpiCommunication.hpp +++ /dev/null @@ -1,377 +0,0 @@ -/*------------------------------- phasicFlow --------------------------------- - O C enter of - O O E ngineering and - O O M ultiscale modeling of - OOOOOOO F luid flow ------------------------------------------------------------------------------- - Copyright (C): www.cemf.ir - email: hamid.r.norouzi AT gmail.com ------------------------------------------------------------------------------- -Licence: - This file is part of phasicFlow code. It is a free software for simulating - granular and multiphase flows. You can redistribute it and/or modify it under - the terms of GNU General Public License v3 or any other later versions. - - phasicFlow is distributed to help others in their research in the field of - granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - ------------------------------------------------------------------------------*/ - -#ifndef __mpiCommunication_H__ -#define __mpiCommunication_H__ - - -#include "mpiTypes.hpp" -#include "types.hpp" -#include "span.hpp" - - -namespace pFlow::MPI -{ - -extern DataType realx3Type__; - -extern DataType realx4Type__; - -extern DataType int32x3Type__; - -template -auto constexpr Type() -{ - return MPI_BYTE; -} - -template -auto constexpr sFactor() -{ - return sizeof(T); -} - -template -auto constexpr Type() -{ - return MPI_CHAR; -} -template -auto constexpr sFactor() -{ - return 1; -} - -template -auto constexpr Type() -{ - return MPI_SHORT; -} -template -auto constexpr sFactor() -{ - return 1; -} - -template -auto constexpr Type() -{ - return MPI_UNSIGNED_SHORT; -} -template -auto constexpr sFactor() -{ - return 1; -} - -template -auto constexpr Type() -{ - return MPI_INT; -} -template -auto constexpr sFactor() -{ - return 1; -} - -template<> -auto constexpr Type() -{ - return MPI_UNSIGNED; -} -template<> -auto constexpr sFactor() -{ - return 1; -} - -template<> -auto constexpr Type() -{ - return MPI_LONG; -} -template<> -auto constexpr sFactor() -{ - return 1; -} - -template<> -auto constexpr Type() -{ - return MPI_UNSIGNED_LONG; -} -template<> -auto constexpr sFactor() -{ - return 1; -} - - -template<> -auto constexpr Type() -{ - return MPI_FLOAT; -} -template<> -auto constexpr sFactor() -{ - return 1; -} - -template<> -auto constexpr Type() -{ - return MPI_DOUBLE; -} -template<> -auto constexpr sFactor() -{ - return 1; -} - -template<> -inline -auto Type() -{ - return realx3Type__; -} - -template<> -auto constexpr sFactor() -{ - return 1; -} - -template<> -inline -auto Type() -{ - return realx4Type__; -} - -template<> -auto constexpr sFactor() -{ - return 1; -} - - -template<> -inline -auto Type() -{ - return int32x3Type__; -} - - -template<> -auto constexpr sFactor() -{ - return 1; -} - -/*inline -auto createByteSequence(int sizeOfElement) -{ - DataType newType; - MPI_Type_contiguous(sizeOfElement, MPI_CHAR, &newType); - MPI_Type_commit(&newType); - return newType; -}*/ - -inline -auto TypeCommit(DataType* type) -{ - return MPI_Type_commit(type); -} - -inline -auto TypeFree(DataType* type) -{ - return MPI_Type_free(type); - -} -template -inline auto getCount(Status* status, int& count) -{ - int lCount; - auto res = MPI_Get_count(status, Type(), &lCount); - count = lCount/sFactor(); - return res; -} - -template -inline int convertIndex(const int& ind) -{ - return ind*sFactor(); -} - -template -inline auto send(span data, int dest, int tag, Comm comm) -{ - return MPI_Send( - data.data(), - sFactor()*data().size(), - Type(), - dest, - tag, - comm); -} - - - -template -inline auto recv(span data, int source, int tag, Comm comm, Status *status) -{ - return MPI_Recv( - data.data(), - sFactor()*data.size(), - Type(), - source, - tag, - comm, - status); -} - - -template -inline auto scan(T sData, T& rData, Comm comm, Operation op = SumOp) -{ - return MPI_Scan(&sData, &rData, sFactor()*1, Type(), op , comm ); -} - -// gathering one scalar data to root processor -template -inline auto gather(T sendData, span& recvData, int root, Comm comm) -{ - return MPI_Gather( - &sendData, - sFactor()*1, - Type(), - recvData.data(), - sFactor()*1, - Type(), - root, - comm); -} - -template -inline auto allGather(T sendData, span& recvData, Comm comm) -{ - return MPI_Allgather( - &sendData, - sFactor()*1, - Type(), - recvData.data(), - sFactor()*1, - Type(), - comm); -} - -template -inline auto scatter(span sendData, T& recvData, int root, Comm comm) -{ - return MPI_Scatter( - sendData.data(), - sFactor()*1, - Type(), - &recvData, - sFactor()*1, - Type(), - root, - comm); -} - -template -inline auto Bcast(T& sendData, int root, Comm comm) -{ - return MPI_Bcast( - &sendData, sFactor()*1, Type(), root, comm); - -} - -template -bool typeCreateIndexedBlock( - span index, - DataType &newType) -{ - auto res = MPI_Type_create_indexed_block( - index.size(), - sFactor(), - index.data(), - Type(), - &newType); - - if(res == Success) - { - TypeCommit(&newType); - } - else - { - return false; - } - - return true; -} - - -template -inline auto Gatherv -( - span sendData, - span& recvData, - span recvCounts, - span displs, - int root, - Comm comm) -{ - - return MPI_Gatherv( - sendData.data(), - sendData.size()*sFactor(), - Type(), - recvData.data(), - recvCounts.data(), - displs.data(), - Type(), - root, - comm - ); - -} - -inline auto Wait(Request* request, Status* status) -{ - return MPI_Wait(request, status); -} - -inline auto typeFree(DataType& type) -{ - return MPI_Type_free(&type); -} - - -} - - - -#endif //__mpiCommunication_H__ diff --git a/src/phasicFlow/MPIParallelization/mpiTypes.hpp b/src/phasicFlow/MPIParallelization/mpiTypes.hpp deleted file mode 100644 index c1721290..00000000 --- a/src/phasicFlow/MPIParallelization/mpiTypes.hpp +++ /dev/null @@ -1,75 +0,0 @@ -/*------------------------------- phasicFlow --------------------------------- - O C enter of - O O E ngineering and - O O M ultiscale modeling of - OOOOOOO F luid flow ------------------------------------------------------------------------------- - Copyright (C): www.cemf.ir - email: hamid.r.norouzi AT gmail.com ------------------------------------------------------------------------------- -Licence: - This file is part of phasicFlow code. It is a free software for simulating - granular and multiphase flows. You can redistribute it and/or modify it under - the terms of GNU General Public License v3 or any other later versions. - - phasicFlow is distributed to help others in their research in the field of - granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - ------------------------------------------------------------------------------*/ - -#ifndef __mpiTypes_H__ -#define __mpiTypes_H__ - - -#ifdef pFlow_Build_MPI - -#include - -namespace pFlow::MPI -{ - // types - using Comm = MPI_Comm; - using Group = MPI_Group; - using Status = MPI_Status; - using Offset = MPI_Offset; - using Request = MPI_Request; - using Operation = MPI_Op; - using Information = MPI_Info; - using DataType = MPI_Datatype; - - inline Comm CommWorld = MPI_COMM_WORLD; - - // all nulls - - inline auto ProcNull = MPI_PROC_NULL; - inline auto InfoNull = MPI_INFO_NULL; - inline auto RequestNull = MPI_REQUEST_NULL; - inline auto StatusIgnore = MPI_STATUS_IGNORE; - inline auto StatusesIgnore = MPI_STATUSES_IGNORE; - inline auto FileNull = MPI_FILE_NULL; - inline Comm CommNull = MPI_COMM_NULL; - inline auto TypeNull = MPI_DATATYPE_NULL; - - // errors - inline const auto Success = MPI_SUCCESS; - inline const auto ErrOp = MPI_ERR_OP; - - inline const auto SumOp = MPI_SUM; - - inline const size_t MaxNoProcessors = 2048; - -} - -#else - -namespace pFlow::MPI -{ - -} - -#endif // pFlow_Build_MPI - - - -#endif //__mpiTypes_H__ diff --git a/src/phasicFlow/MPIParallelization/partitioning.cpp b/src/phasicFlow/MPIParallelization/partitioning.cpp deleted file mode 100644 index 0ae5cf82..00000000 --- a/src/phasicFlow/MPIParallelization/partitioning.cpp +++ /dev/null @@ -1,113 +0,0 @@ - - -#include "partitioning.hpp" -#include "error.hpp" -#include "streams.hpp" - -void pFlow::partitioning::freeZoltan() -{ - if(validPointers_) - { - Zoltan::LB_Free_Part(&importGlobalGids_, &importLocalGids_, - &importProcs_, &importToPart_); - - Zoltan::LB_Free_Part(&exportGlobalGids_, &exportLocalGids_, - &exportProcs_, &exportToPart_); - validPointers_ = false; - } - - zoltan_.release(); -} - - -pFlow::partitioning::partitioning -( - const dictionary& dict, - const box& globalBox -) -: - globalBox_(globalBox) -{ - if(!zoltanInitialized__) - { - auto rc = Zoltan_Initialize - ( - processors::argc(), - processors::argv(), - &version_ - ); - - if (rc != ZOLTAN_OK) - { - fatalErrorInFunction<<"Cannot initialize zoltan"<(pFlowProcessors().localCommunicator()); - - zoltan_->Set_Param("DEBUG_LEVEL", "0"); - zoltan_->Set_Param("LB_METHOD", "RCB"); - zoltan_->Set_Param("NUM_GID_ENTRIES", "1"); - zoltan_->Set_Param("NUM_LID_ENTRIES", "1"); - zoltan_->Set_Param("OBJ_WEIGHT_DIM", "0"); - zoltan_->Set_Param("RETURN_LISTS", "ALL"); - -} - -bool pFlow::partitioning::partition(span points, pFlagTypeHost flags) -{ - pointCollection pointCollctn{points, flags}; - - return partition(pointCollctn); -} -int GetObjectSize -( - void *data, - int num_gid_entries, - int num_lid_entries, - ZOLTAN_ID_PTR global_id, - ZOLTAN_ID_PTR local_id, - int *ierr -) -{ - *ierr = ZOLTAN_OK; - pFlow::uint32 s = *(static_cast(data)); - return static_cast(s); -} - -void PackObject -( - void *data, - int num_gid_entries, - int num_lid_entries, - ZOLTAN_ID_PTR global_id, - ZOLTAN_ID_PTR local_id, - int dest, - int size, - char *buf, - int *ierr -) -{ - -} - -bool pFlow::partitioning::migrateData(span src, span dst, uint32 elementSize) -{ - dataCollection data{src, dst, elementSize}; - - zoltan_->Set_Obj_Size_Fn(GetObjectSize, &elementSize); - return false; -} - -pFlow::partitioning::~partitioning() -{ - freeZoltan(); -} - -void pFlow::partitioning::printBox()const -{ - pOutput<< "localBox:" << localBox_< points_; - pFlagTypeHost pFlag_; - - uint32 numActivePoints()const - { - return pFlag_.numActive(); - } -}; - -struct dataCollection -{ - span srcData_; - span dstData_; - uint32 elementSize_; -}; - -class partitioning -{ -protected: - - float version_ = 0.0; - - std::unique_ptr zoltan_ = nullptr; - - bool validPointers_ = false; - - box globalBox_; - - box localBox_; - - int32 changes_, numImport_, numExport_; - - id_t *importGlobalGids_, *importLocalGids_, *exportGlobalGids_, *exportLocalGids_; - - int32 *importProcs_, *importToPart_, *exportProcs_, *exportToPart_; - - uint32 numBeforePartition_ = 0 ; - - static inline bool zoltanInitialized__ = false; - - void freeZoltan(); - - virtual - bool partition(pointCollection& points) = 0; - -public: - - partitioning( - const dictionary& dict, - const box& globalBox); - - virtual - ~partitioning(); - - create_vCtor( - partitioning, - dictionary, - ( - const dictionary& dict, - const box& globalBox - ), - (dict, globalBox)); - - bool partition( - span points, - pFlagTypeHost flags); - - - bool migrateData(span src, span dst, uint32 elementSize); - - inline - auto localBox()const - { - return localBox_; - } - - inline - const auto& globalBox()const - { - return globalBox_; - } - - inline - bool partitionsChanged()const - { - return changes_ == 1; - } - - - uint32 numberImportThisProc()const - { - return numImport_; - } - - uint32 numberExportThisProc()const - { - return numExport_; - } - - virtual - span exportList(int procNo)const = 0; - - virtual - pFlow::MPI::procVector> allExportLists()const=0; - - void printBox()const; - - -}; - - -} - - -#endif //__partitioning_hpp__ - - - -/*static - int getNumberOfPoints(void *data, int32 *ierr); - - static - void getPointList( - void *data, - int32 sizeGID, - int32 sizeLID, - id_t* globalID, - id_t* localID, - int32 wgt_dim, - float *obj_wgts, - int32 *ierr);*/ \ No newline at end of file diff --git a/src/phasicFlow/MPIParallelization/procCommunication.cpp b/src/phasicFlow/MPIParallelization/procCommunication.cpp deleted file mode 100644 index 81869453..00000000 --- a/src/phasicFlow/MPIParallelization/procCommunication.cpp +++ /dev/null @@ -1,30 +0,0 @@ -/*------------------------------- phasicFlow --------------------------------- - O C enter of - O O E ngineering and - O O M ultiscale modeling of - OOOOOOO F luid flow ------------------------------------------------------------------------------- - Copyright (C): www.cemf.ir - email: hamid.r.norouzi AT gmail.com ------------------------------------------------------------------------------- -Licence: - This file is part of phasicFlow code. It is a free software for simulating - granular and multiphase flows. You can redistribute it and/or modify it under - the terms of GNU General Public License v3 or any other later versions. - - phasicFlow is distributed to help others in their research in the field of - granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - ------------------------------------------------------------------------------*/ - -#include "procCommunication.hpp" - - -pFlow::MPI::procCommunication::procCommunication -( - const localProcessors& proc -) -: - processors_(proc) -{} diff --git a/src/phasicFlow/MPIParallelization/procCommunication.hpp b/src/phasicFlow/MPIParallelization/procCommunication.hpp deleted file mode 100644 index db600386..00000000 --- a/src/phasicFlow/MPIParallelization/procCommunication.hpp +++ /dev/null @@ -1,178 +0,0 @@ -/*------------------------------- phasicFlow --------------------------------- - O C enter of - O O E ngineering and - O O M ultiscale modeling of - OOOOOOO F luid flow ------------------------------------------------------------------------------- - Copyright (C): www.cemf.ir - email: hamid.r.norouzi AT gmail.com ------------------------------------------------------------------------------- -Licence: - This file is part of phasicFlow code. It is a free software for simulating - granular and multiphase flows. You can redistribute it and/or modify it under - the terms of GNU General Public License v3 or any other later versions. - - phasicFlow is distributed to help others in their research in the field of - granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - ------------------------------------------------------------------------------*/ -#ifndef __procCommunication_hpp__ -#define __procCommunication_hpp__ - - -#include "procVector.hpp" -#include "localProcessors.hpp" -#include "mpiCommunication.hpp" - -namespace pFlow::MPI -{ - - -class procCommunication -{ -protected: - - const localProcessors& processors_; - -public: - - procCommunication(const localProcessors& proc); - - ~procCommunication()=default; - - /// @brief Tell if this processor is master processor in the local - /// communicator - /// @return true if this processor is master - - inline - const auto& processors()const - { - return processors_; - } - - inline - bool localMaster()const - { - return processors_.localMaster();; - } - - inline - auto localSize()const - { - return processors_.localSize(); - } - - inline - auto localRank()const - { - return processors_.localRank(); - } - - inline - auto localCommunicator()const - { - return processors_.localCommunicator(); - } - - /// @brief return the master number in the local communicator - auto localMasterNo()const - { - return processors_.localMasterNo(); - } - - /// Send a single val to all processors including itself (local communicator) - template - std::pair distributeMasterToAll(const T& val) - { - - T retVal = val; - auto res = CheckMPI( - Bcast(retVal, localMasterNo(),localCommunicator() ), - false); - - return {retVal, res}; - } - - /// @brief Send a single value to all processor including master (in local communicator) - /// @param val value to be sent - /// @param recvVal recieved value - /// @return true if successful and false if fail - template - bool distributeMasterToAll(const T& val, T& recvVal) - { - recvVal = val; - return CheckMPI( - Bcast(recvVal, localMasterNo(), localCommunicator()), - false); - } - - /// @brief values in the vector (size is equal to number of - // processors in local communicator) to each processor - template - std::pair distributeMasterToAll(const procVector& vals) - { - T val; - auto vec = vals.getSpan(); - auto res = CheckMPI( - scatter(vec, val, localMasterNo(), localCommunicator()), - false); - - return {val, res}; - } - - /// @brief Each processor in the local communicator calls this funtion with a value - /// and the values are distributed among all processors - template - std::pair, bool> collectAllToAll(const T& val) - { - procVector allVec; - auto vec = allVec.getSpan(); - auto res = CheckMPI( - allGather(val, vec, localCommunicator()), - false); - return {allVec, res}; - } - - /// @brief Each processor in the local communicator calls this funtion with a value - /// and the values are distributed among all processors - template - bool collectAllToAll(const T& val, procVector& allVec) - { - auto vec = allVec.getSpan(); - return CheckMPI( - allGather(val, vec, localCommunicator()), - false); - } - - /// @brief Each processor in the local communicator calls this function with a value - /// and all values are collected in the master processor - template - std::pair,bool> collectAllToMaster(const T& val) - { - // only on master processor - procVector masterVec(processors_, true); - - auto masterSpan = masterVec.getSpan(); - auto res = CheckMPI( - gather(val,masterSpan, localMasterNo(), localCommunicator()), - false); - - return {masterVec, res}; - - } - - template - bool collectAllToMaster(const T& val, procVector& masterVec) - { - // only on master processor - auto [vec, res] = collectAllToMaster(val); - masterVec = vec; - return res; - } - -}; //procCommunication - -} // pFlow::MPI - -#endif //__procCommunication_hpp__ diff --git a/src/phasicFlow/MPIParallelization/procVector.hpp b/src/phasicFlow/MPIParallelization/procVector.hpp deleted file mode 100644 index f9a80037..00000000 --- a/src/phasicFlow/MPIParallelization/procVector.hpp +++ /dev/null @@ -1,199 +0,0 @@ -#ifndef __procVector_hpp__ -#define __procVector_hpp__ - -// from PhasicFlow - -#include "localProcessors.hpp" -#include "span.hpp" -#include "streams.hpp" -#include "IOPattern.hpp" - -#include "mpiTypes.hpp" - -namespace pFlow::MPI -{ - -template -class procVector -: - public std::vector -{ -public: - - using ProcVectorType = procVector; - - using VectorType = std::vector; - -protected: - - int rank_ = 0; - - bool isMaster_ = false; - - using VectorType::reserve; - - using VectorType::resize; - - using VectorType::assign; - - using VectorType::clear; - - using VectorType::erase; - -public: - - procVector( - const localProcessors& procs, - bool onlyMaster = false) - : - rank_(procs.localRank()), - isMaster_(procs.localMaster()) - { - - if( onlyMaster && !isMaster_ ) return; - this->reserve(procs.localSize()); - this->resize(procs.localSize()); - } - - procVector( - const T& val, - const localProcessors& procs, - bool onlyMaster = false) - : - procVector(procs, onlyMaster) - { - std::fill(this->begin(), this->end(), val); - } - - procVector(const T& val, const procVector& src) - { - this->reserve(src.size()); - this->resize(src.size()); - std::fill(this->begin(), this->end(), val); - } - - procVector(const localProcessors& procs, const VectorType& src) - : - procVector(procs) - { - if(src.size()!= this->size()) - { - fatalErrorInFunction<< - "Size of std::vector and procVector does not match in construction"<assign(src.begin(), src.end()); - } - - procVector(const procVector&) = default; - - procVector(procVector&&) = default; - - procVector& operator=(const procVector&) = default; - - procVector& operator=(procVector&&) = default; - - procVector& operator=(const VectorType& src) - { - if(src.size() != this->size()) - { - fatalErrorInFunction<< - "Size of std::vector and procVector does not match in copy assignment"<(*this).operator=(src); - return *this; - } - - procVector& operator=(VectorType&& src) - { - if(src.size() != this->size()) - { - fatalErrorInFunction<< - "Size of std::vector and procVector does not match in move assignment" - <(*this).operator=(std::move(src)); - return *this; - } - - procVector(const localProcessors& procs, VectorType&& src) - : - VectorType(std::move(src)) - { - if(this->size()!= static_cast(procs.localSize())) - { - fatalErrorInFunction<< - "Size of std::vector and procVector does not match in move"<(this->data(), this->size()); - } - - inline - auto getSpan()const - { - return span(const_cast(this->data()), this->size()); - } - - bool write( - iOstream& os, - const IOPattern& iop ) const - { - return writeStdVector(os, *this, iop); - } - -}; - -template -inline iOstream& operator << (iOstream& os, const procVector& ovec ) -{ - if( !ovec.write(os, IOPattern::AllProcessorsDifferent) ) - { - ioErrorInFile(os.name(), os.lineNumber()); - fatalExit; - } - return os; -} - -} - - -#endif diff --git a/src/phasicFlow/MPIParallelization/processorBoundaryField.cpp b/src/phasicFlow/MPIParallelization/processorBoundaryField.cpp deleted file mode 100644 index 5e94d0aa..00000000 --- a/src/phasicFlow/MPIParallelization/processorBoundaryField.cpp +++ /dev/null @@ -1,29 +0,0 @@ -/*------------------------------- phasicFlow --------------------------------- - O C enter of - O O E ngineering and - O O M ultiscale modeling of - OOOOOOO F luid flow ------------------------------------------------------------------------------- - Copyright (C): www.cemf.ir - email: hamid.r.norouzi AT gmail.com ------------------------------------------------------------------------------- -Licence: - This file is part of phasicFlow code. It is a free software for simulating - granular and multiphase flows. You can redistribute it and/or modify it under - the terms of GNU General Public License v3 or any other later versions. - - phasicFlow is distributed to help others in their research in the field of - granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - ------------------------------------------------------------------------------*/ - -template - pFlow::processorBoundaryField::processorBoundaryField -( - const boundaryBase& boundary, - InternalFieldType& internal -) -: - BoundaryFieldType(boundary, internal) -{} \ No newline at end of file diff --git a/src/phasicFlow/MPIParallelization/processorBoundaryField.hpp b/src/phasicFlow/MPIParallelization/processorBoundaryField.hpp deleted file mode 100644 index b3e83a22..00000000 --- a/src/phasicFlow/MPIParallelization/processorBoundaryField.hpp +++ /dev/null @@ -1,80 +0,0 @@ -/*------------------------------- phasicFlow --------------------------------- - O C enter of - O O E ngineering and - O O M ultiscale modeling of - OOOOOOO F luid flow ------------------------------------------------------------------------------- - Copyright (C): www.cemf.ir - email: hamid.r.norouzi AT gmail.com ------------------------------------------------------------------------------- -Licence: - This file is part of phasicFlow code. It is a free software for simulating - granular and multiphase flows. You can redistribute it and/or modify it under - the terms of GNU General Public License v3 or any other later versions. - - phasicFlow is distributed to help others in their research in the field of - granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - ------------------------------------------------------------------------------*/ -#ifndef __processorBoundaryField_hpp__ -#define __processorBoundaryField_hpp__ - -#include "boundaryField.hpp" - -namespace pFlow -{ - -template< class T, class MemorySpace = void> -class processorBoundaryField -: - public boundaryField -{ -public: - - using processorBoundaryFieldType = processorBoundaryField; - - using BoundaryFieldType = boundaryField; - - using InternalFieldType = typename BoundaryFieldType::InternalFieldType; - - using memory_space = typename BoundaryFieldType::memory_space; - - using execution_space = typename BoundaryFieldType::execution_space; - - - -public: - - TypeInfo("boundaryField"); - - processorBoundaryField( - const boundaryBase& boundary, - InternalFieldType& internal); - - - add_vCtor - ( - BoundaryFieldType, - processorBoundaryFieldType, - boundaryBase - ); - - - bool hearChanges - ( - const message& msg, - const anyList& varList - ) override - { - notImplementedFunction; - return false; - } - -}; - -} - -#include "processorBoundaryField.cpp" - -#endif //__processorBoundaryField_hpp__ diff --git a/src/phasicFlow/MPIParallelization/processorBoundaryFields.cpp b/src/phasicFlow/MPIParallelization/processorBoundaryFields.cpp deleted file mode 100644 index a81b5249..00000000 --- a/src/phasicFlow/MPIParallelization/processorBoundaryFields.cpp +++ /dev/null @@ -1,10 +0,0 @@ - -//#include "Field.hpp" -#include "createBoundaryFields.hpp" -#include "processorBoundaryField.hpp" - -createBoundary(pFlow::int8, pFlow::HostSpace, processor); - -createBoundary(pFlow::real, pFlow::HostSpace, processor); - - diff --git a/src/phasicFlow/MPIParallelization/rcb1DPartitioning.cpp b/src/phasicFlow/MPIParallelization/rcb1DPartitioning.cpp deleted file mode 100644 index c2345ab6..00000000 --- a/src/phasicFlow/MPIParallelization/rcb1DPartitioning.cpp +++ /dev/null @@ -1,325 +0,0 @@ -/*------------------------------- phasicFlow --------------------------------- - O C enter of - O O E ngineering and - O O M ultiscale modeling of - OOOOOOO F luid flow ------------------------------------------------------------------------------- - Copyright (C): www.cemf.ir - email: hamid.r.norouzi AT gmail.com ------------------------------------------------------------------------------- -Licence: - This file is part of phasicFlow code. It is a free software for simulating - granular and multiphase flows. You can redistribute it and/or modify it under - the terms of GNU General Public License v3 or any other later versions. - - phasicFlow is distributed to help others in their research in the field of - granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - ------------------------------------------------------------------------------*/ - -#include "zoltan_cpp.h" - - -#include "error.hpp" -#include "processors.hpp" -#include "rcb1DPartitioning.hpp" - -bool pFlow::rcb1DPartitioning::partition(pointCollection &points) -{ - - zoltan_->Set_Param("RCB_OUTPUT_LEVEL", "0"); - zoltan_->Set_Param("RCB_RECTILINEAR_BLOCKS", "1"); - zoltan_->Set_Param("KEEP_CUTS", "1"); - zoltan_->Set_Param("REDUCE_DIMENSIONS", "1"); - zoltan_->Set_Param("RCB_RECOMPUTE_BOX", "1"); - zoltan_->Set_Param("AVERAGE_CUTS", "0"); - zoltan_->Set_Param("MIGRATE_ONLY_PROC_CHANGES", "0"); - - zoltan_->Set_Num_Obj_Fn(rcb1DPartitioning::getNumberOfPoints, &points); - zoltan_->Set_Obj_List_Fn(rcb1DPartitioning::getPointList, &points); - zoltan_->Set_Num_Geom_Fn(rcb1DPartitioning::getNumGeometry, &points); - switch (direction_) - { - case Direction::X: - zoltan_->Set_Geom_Multi_Fn(rcb1DPartitioning::getGeometryList_x, &points); - break; - case Direction::Y: - zoltan_->Set_Geom_Multi_Fn(rcb1DPartitioning::getGeometryList_y, &points); - break; - case Direction::Z: - zoltan_->Set_Geom_Multi_Fn(rcb1DPartitioning::getGeometryList_z, &points); - break; - } - - int numGidEntries_, numLidEntries_; - int rc = zoltan_->LB_Partition(changes_, numGidEntries_, numLidEntries_, - numImport_, importGlobalGids_, importLocalGids_, importProcs_, importToPart_, - numExport_, exportGlobalGids_, exportLocalGids_, exportProcs_, exportToPart_); - - if (rc != ZOLTAN_OK) - { - fatalErrorInFunction<< "Zoltan faild to perform partitioning."< thisProc(points.numActivePoints(),0); - - - for(auto i =0; iRCB_Box - ( - processors::globalRank(), - nDim, - x0, y0, z0, - x1, y1, z1 - ); - - localBox_ = globalBox_; - - if(equal(x0, x1)) - { - x0 = x0 - 0.00001; - x1 = x1 + 0.00001; - } - - switch (direction_) - { - case Direction::X : - localBox_.minPoint().x_ = x0; - localBox_.maxPoint().x_ = x1; - break; - - case Direction::Y : - localBox_.minPoint().y_ = x0; - localBox_.maxPoint().y_ = x1; - break; - - case Direction::Z : - localBox_.minPoint().z_ = x0; - localBox_.maxPoint().z_ = x1; - break; - } - - - localBox_.minPoint() = max(localBox_.minPoint(), globalBox_.minPoint()); - localBox_.maxPoint() = min(localBox_.maxPoint(), globalBox_.maxPoint()); - - - return true; -} - -pFlow::rcb1DPartitioning::rcb1DPartitioning -( - const dictionary &dict, - const box &globalBox -) -: - partitioning(dict, globalBox), - exportIds_(pFlowProcessors()) -{ - - word directionName = dict.getVal("direction"); - - if(toUpper(directionName)== "X") - { - direction_ = Direction::X; - dirVector_ ={1.0, 0.0, 0.0}; - } - else if( toUpper(directionName) == "Y") - { - direction_ = Direction::Y; - dirVector_ ={0.0, 1.0, 0.0}; - } - else if( toUpper(directionName) == "Z") - { - direction_ = Direction::Z; - dirVector_ ={0.0, 0.0, 1.0}; - } - else - { - fatalErrorInFunction<< "wrong direction in dictionary "<< - dict.globalName()<<". Directions should be one of x, y, or z."<(data); - - *ierr = ZOLTAN_OK; - - return obj->numActivePoints(); -} - -void pFlow::rcb1DPartitioning::getPointList -( - void *data, - int sizeGID, - int sizeLID, - ZOLTAN_ID_PTR globalID, - ZOLTAN_ID_PTR localID, - int wgt_dim, - float *obj_wgts, - int *ierr -) -{ - auto* obj = static_cast(data); - *ierr = ZOLTAN_OK; - - auto activeRange = obj->pFlag_.activeRange(); - uint32 n = 0; - for (auto i=activeRange.start(); ipFlag_.isActive(i) ) - { - globalID[n] = i; - localID[n] = n; - n++; - } - } - -} - -void pFlow::rcb1DPartitioning::getGeometryList_x -( - void *data, - int sizeGID, - int sizeLID, - int num_obj, - ZOLTAN_ID_PTR globalID, - ZOLTAN_ID_PTR localID, - int num_dim, - double *geom_vec, - int *ierr -) -{ - - auto* obj = static_cast(data); - - if ( (sizeGID != 1) || (sizeLID != 1) || (num_dim != 1)) - { - *ierr = ZOLTAN_FATAL; - return; - } - - auto activeRange = obj->pFlag_.activeRange(); - uint32 n = 0; - for (auto i=activeRange.start(); ipFlag_.isActive(i) ) - { - geom_vec[n] = obj->points_[i].x_; - n++; - } - } - - *ierr = ZOLTAN_OK; - - return; -} - -void pFlow::rcb1DPartitioning::getGeometryList_y -( - void *data, - int sizeGID, - int sizeLID, - int num_obj, - ZOLTAN_ID_PTR globalID, - ZOLTAN_ID_PTR localID, - int num_dim, - double *geom_vec, - int *ierr -) -{ - - auto* obj = static_cast(data); - - if ( (sizeGID != 1) || (sizeLID != 1) || (num_dim != 1)) - { - *ierr = ZOLTAN_FATAL; - return; - } - - auto activeRange = obj->pFlag_.activeRange(); - uint32 n = 0; - for (auto i=activeRange.start(); ipFlag_.isActive(i) ) - { - geom_vec[n] = obj->points_[i].y_; - n++; - } - } - - *ierr = ZOLTAN_OK; - - return; -} - -void pFlow::rcb1DPartitioning::getGeometryList_z -( - void *data, - int sizeGID, - int sizeLID, - int num_obj, - ZOLTAN_ID_PTR globalID, - ZOLTAN_ID_PTR localID, - int num_dim, - double *geom_vec, - int *ierr -) -{ - - auto* obj = static_cast(data); - - if ( (sizeGID != 1) || (sizeLID != 1) || (num_dim != 1)) - { - *ierr = ZOLTAN_FATAL; - return; - } - - auto activeRange = obj->pFlag_.activeRange(); - uint32 n = 0; - for (auto i=activeRange.start(); ipFlag_.isActive(i) ) - { - geom_vec[n] = obj->points_[i].z_; - n++; - } - } - - *ierr = ZOLTAN_OK; - - return; -} - diff --git a/src/phasicFlow/MPIParallelization/rcb1DPartitioning.hpp b/src/phasicFlow/MPIParallelization/rcb1DPartitioning.hpp deleted file mode 100644 index b58532e3..00000000 --- a/src/phasicFlow/MPIParallelization/rcb1DPartitioning.hpp +++ /dev/null @@ -1,240 +0,0 @@ -/*------------------------------- phasicFlow --------------------------------- - O C enter of - O O E ngineering and - O O M ultiscale modeling of - OOOOOOO F luid flow ------------------------------------------------------------------------------- - Copyright (C): www.cemf.ir - email: hamid.r.norouzi AT gmail.com ------------------------------------------------------------------------------- -Licence: - This file is part of phasicFlow code. It is a free software for simulating - granular and multiphase flows. You can redistribute it and/or modify it under - the terms of GNU General Public License v3 or any other later versions. - - phasicFlow is distributed to help others in their research in the field of - granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - ------------------------------------------------------------------------------*/ -#ifndef __rcb1DPartitioning_hpp__ -#define __rcb1DPartitioning_hpp__ - -#include "partitioning.hpp" -#include "procVector.hpp" - -namespace pFlow -{ - - -class rcb1DPartitioning -: -public partitioning -{ -public: - - enum Direction - { - X = 0, - Y = 1, - Z = 2 - }; - -protected: - - /// Direction of partitioning - Direction direction_ = Direction::X; - - realx3 dirVector_ = {1.0, 0.0, 0.0}; - - word directionName_ = "x"; - - MPI::procVector> exportIds_; - - bool partition(pointCollection& points) override; - -public: - - - rcb1DPartitioning( - const dictionary& dict, - const box& globalBox); - - - ~rcb1DPartitioning() override=default; - - span exportList(int procNo)const override - { - return span( - const_cast(exportIds_[procNo].data()), - exportIds_[procNo].size()); - } - - - pFlow::MPI::procVector> allExportLists()const override - { - pFlow::MPI::procVector> allList(pFlowProcessors()); - - for(int i=0; i(data); - - if ( (sizeGID != 1) || (sizeLID != 1) || (num_dim != 1)) - { - *ierr = ZOLTAN_FATAL; - return; - } - - *ierr = ZOLTAN_OK; - - for (int i=0; i < num_obj ; i++) - { - geom_vec[i] = obj->pointList()[i].y_; - } - - return; - } - - - static - int getNumGeometry(void *data, int *ierr) - { - *ierr = ZOLTAN_OK; - return 1; - } - -}; - - -class RCB_x_partitioning -: -public partitioning -{ -public: - - - RCB_x_partitioning(int argc, char *argv[], pointCollection& collection, const box& gBox) - : - partitioning(argc, argv, collection, gBox) - {} - - virtual - ~RCB_x_partitioning()=default; - - - bool partition() override; - - - static - void getGeometryList( - void *data, - int sizeGID, - int sizeLID, - int num_obj, - ZOLTAN_ID_PTR globalID, - ZOLTAN_ID_PTR localID, - int num_dim, - double *geom_vec, - int *ierr); - - static - int getNumGeometry(void *data, int *ierr); - - -};*/ - -} // pFlow -#endif //__rcb1DPartitioning_hpp__ \ No newline at end of file diff --git a/src/phasicFlow/MPIParallelization/scatteredMasterDistribute.cpp b/src/phasicFlow/MPIParallelization/scatteredMasterDistribute.cpp deleted file mode 100644 index a771dc54..00000000 --- a/src/phasicFlow/MPIParallelization/scatteredMasterDistribute.cpp +++ /dev/null @@ -1,158 +0,0 @@ - - -template -pFlow::MPI::scatteredMasterDistribute::scatteredMasterDistribute -( - const localProcessors& procs -) -: - procCommunication(procs), - indexedMap_(TypeNull, procs, true) -{ - -} - -template -bool pFlow::MPI::scatteredMasterDistribute::setDataMaps -( - procVector>& maps -) -{ - if(this->localMaster()) - { - if(maps.size() != this->localSize() ) - { - fatalErrorInFunction<<"size mismatch"; - return false; - } - - std::vector index; - - freeIndexedMap(); - - for(auto proc = 0; proc< maps.size(); proc++) - { - auto m = maps[proc]; - index.resize(m.size()); - for(auto i=0; i( makeSpan(index), dt)) - { - fatalErrorInFunction; - return false; - } - else - { - indexedMap_[proc] = dt; - } - } - } - return true; -} - - -template -bool pFlow::MPI::scatteredMasterDistribute::setDataMaps -( - procVector>& maps -) -{ - if(this->localMaster()) - { - if(maps.size() != this->localSize() ) - { - fatalErrorInFunction<<"size mismatch"; - return false; - } - - freeIndexedMap(); - - - for(auto proc = 0; proc< maps.size(); proc++) - { - DataType dt; - if( !typeCreateIndexedBlock(maps[proc], dt) ) - { - fatalErrorInFunction; - return false; - } - else - { - indexedMap_[proc] = dt; - } - } - } - return true; -} - -template -void pFlow::MPI::scatteredMasterDistribute::freeIndexedMap() -{ - for(auto i=0; i -bool pFlow::MPI::scatteredMasterDistribute::distribute -( - span& sendBuff, - span& recvb -) -{ - procVector requests(processors(), true); - procVector statuses(processors(), true); - - if(this->localMaster()) - { - bool res = true; - for(int32 i = indexedMap_.size()-1; i>=0; i--) - { - res = res&&CheckMPI( - MPI_Issend( - sendBuff.data(), - 1, - indexedMap_[i], - i, - 0, - localCommunicator(), - &requests[i]), - false); - } - - if(!res)return false; - } - - Status stat; - bool sucss = CheckMPI( - MPI_Recv( - recvb.data(), - recvb.size()*sFactor(), - Type(), - 0, - 0, - localCommunicator(), - &stat), - false); - - if(this->localMaster()) - { - CheckMPI( - MPI_Waitall(requests.size(), requests.data(), statuses.data()), - false - ); - } - - return sucss; -} \ No newline at end of file diff --git a/src/phasicFlow/MPIParallelization/scatteredMasterDistribute.hpp b/src/phasicFlow/MPIParallelization/scatteredMasterDistribute.hpp deleted file mode 100644 index dfffb384..00000000 --- a/src/phasicFlow/MPIParallelization/scatteredMasterDistribute.hpp +++ /dev/null @@ -1,69 +0,0 @@ -/*------------------------------- phasicFlow --------------------------------- - O C enter of - O O E ngineering and - O O M ultiscale modeling of - OOOOOOO F luid flow ------------------------------------------------------------------------------- - Copyright (C): www.cemf.ir - email: hamid.r.norouzi AT gmail.com ------------------------------------------------------------------------------- -Licence: - This file is part of phasicFlow code. It is a free software for simulating - granular and multiphase flows. You can redistribute it and/or modify it under - the terms of GNU General Public License v3 or any other later versions. - - phasicFlow is distributed to help others in their research in the field of - granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - ------------------------------------------------------------------------------*/ - -#ifndef __scatteredMasterDistribute_hpp__ -#define __scatteredMasterDistribute_hpp__ - -#include "procCommunication.hpp" -#include "mpiCommunication.hpp" -#include "procVector.hpp" -#include "streams.hpp" - - -namespace pFlow::MPI -{ - -template -class scatteredMasterDistribute -: - public procCommunication -{ -protected: - - procVector indexedMap_; - - void freeIndexedMap(); - -public: - - scatteredMasterDistribute(const localProcessors& procs); - - ~scatteredMasterDistribute() - { - freeIndexedMap(); - } - - scatteredMasterDistribute(const scatteredMasterDistribute&)=delete; - - scatteredMasterDistribute& operator=(const scatteredMasterDistribute&) = delete; - - bool setDataMaps(procVector>& maps); - - bool setDataMaps(procVector>& maps); - - bool distribute(span& sendBuff, span& recvb); - -}; - -} //pFlow::MPI - -#include "scatteredMasterDistribute.cpp" - -#endif //__scatteredMasterDistribute_hpp__ diff --git a/src/phasicFlow/MPIParallelization/scatteredMasterDistributeChar.cpp b/src/phasicFlow/MPIParallelization/scatteredMasterDistributeChar.cpp deleted file mode 100644 index 7579e8d5..00000000 --- a/src/phasicFlow/MPIParallelization/scatteredMasterDistributeChar.cpp +++ /dev/null @@ -1,166 +0,0 @@ - -#include "scatteredMasterDistributeChar.hpp" - -pFlow::MPI::scatteredMasterDistribute::scatteredMasterDistribute -( - size_t sizeOfElement, - const localProcessors& procs -) -: - procCommunication(procs), - indexedMap_(TypeNull, procs, true), - sizeOfElement_(sizeOfElement) -{} - - -bool pFlow::MPI::scatteredMasterDistribute::setDataMaps -( - procVector>& maps -) -{ - if(this->localMaster()) - { - if(maps.size() != this->localSize() ) - { - fatalErrorInFunction<<"size mismatch"; - return false; - } - - freeIndexedMap(); - - std::vector index; - - for(auto proc = 0; proc< maps.size(); proc++) - { - auto m = maps[proc]; - index.resize(m.size()); - for(auto i=0; i::setDataMaps -( - procVector>& maps -) -{ - if(this->localMaster()) - { - if(maps.size() != this->localSize() ) - { - fatalErrorInFunction<<"size mismatch"; - return false; - } - - std::vector index; - freeIndexedMap(); - - for(auto proc = 0; proc< maps.size(); proc++) - { - - auto m = maps[proc]; - index.resize(m.size()); - for(auto i=0; i::freeIndexedMap() -{ - for(auto i=0; i::distribute -( - span& sendBuff, - span& recvb -) -{ - procVector requests(processors(), true); - procVector statuses(processors(), true); - - - if(this->localMaster()) - { - bool res = true; - for(int32 i = indexedMap_.size()-1; i>=0; i--) - { - res = res&&CheckMPI( - MPI_Issend( - sendBuff.data(), - 1, - indexedMap_[i], - i, - 0, - localCommunicator(), - &requests[i]), - false); - } - - if(!res)return false; - } - - Status stat; - bool sucss = CheckMPI( - MPI_Recv( - recvb.data(), - recvb.size(), - MPI_CHAR, - 0, - 0, - localCommunicator(), - &stat), - true); - - if(this->localMaster()) - { - CheckMPI( - MPI_Waitall(requests.size(), requests.data(), statuses.data()), - false - ); - } - - return sucss; -} \ No newline at end of file diff --git a/src/phasicFlow/MPIParallelization/scatteredMasterDistributeChar.hpp b/src/phasicFlow/MPIParallelization/scatteredMasterDistributeChar.hpp deleted file mode 100644 index e0cee3b4..00000000 --- a/src/phasicFlow/MPIParallelization/scatteredMasterDistributeChar.hpp +++ /dev/null @@ -1,67 +0,0 @@ -/*------------------------------- phasicFlow --------------------------------- - O C enter of - O O E ngineering and - O O M ultiscale modeling of - OOOOOOO F luid flow ------------------------------------------------------------------------------- - Copyright (C): www.cemf.ir - email: hamid.r.norouzi AT gmail.com ------------------------------------------------------------------------------- -Licence: - This file is part of phasicFlow code. It is a free software for simulating - granular and multiphase flows. You can redistribute it and/or modify it under - the terms of GNU General Public License v3 or any other later versions. - - phasicFlow is distributed to help others in their research in the field of - granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - ------------------------------------------------------------------------------*/ - -#ifndef __scatteredMasterDistributeChar_hpp__ -#define __scatteredMasterDistributeChar_hpp__ - -#include "scatteredMasterDistribute.hpp" - -namespace pFlow::MPI -{ - -template<> -class scatteredMasterDistribute -: - public procCommunication -{ -protected: - - procVector indexedMap_; - - size_t sizeOfElement_; - - void freeIndexedMap(); - -public: - - scatteredMasterDistribute( - size_t sizeOfElement, - const localProcessors& procs); - - ~scatteredMasterDistribute() - { - freeIndexedMap(); - } - - scatteredMasterDistribute(const scatteredMasterDistribute&)=delete; - - scatteredMasterDistribute& operator=(const scatteredMasterDistribute&) = delete; - - bool setDataMaps(procVector>& maps); - - bool setDataMaps(procVector>& maps); - - bool distribute(span& sendBuff, span& recvb); - -}; - -} // pFlow::MPI - -#endif //__scatteredMasterDistributeChar_hpp__