98 Commits

Author SHA1 Message Date
0053ef002a CMakeList modified for automatic compile of Zoltan 2025-05-16 18:55:25 +03:30
ad5233bb77 Merge branch 'main' into local-MPI 2025-05-16 10:10:46 +03:30
d7479cf1bd Zoltan is added as thirdParty package 2025-05-15 21:58:43 +03:30
83a6e4baa1 Merge branch 'main' of github.com:PhasicFlow/phasicFlow 2025-05-15 12:01:40 +03:30
bf0197f643 change in the type name of timeValue and sample dictionaries for settingsDict and domain 2025-05-15 12:00:13 +03:30
5a149f3d85 Merge pull request #224 from wanqing0421/postMesh
fixed the cuda bug
2025-05-15 03:35:08 +03:30
80df6bfc9b fixed the cuda bug 2025-05-11 02:29:28 +08:00
b2581cc2a9 Merge pull request #223 from wanqing0421/patch-1
fix the warning messages
2025-05-09 09:19:05 +03:30
1831c2c6c5 fix the warning messages 2025-05-06 10:48:06 +08:00
14731a6c31 This is the first merge from main into MPI branch
Merge branch 'main' into local-MPI
2025-05-03 16:40:46 +03:30
ab856e1df2 move of markdonwList 2025-05-03 12:48:42 +03:30
b4bc724a68 readme helical 2025-05-02 22:28:56 +03:30
ee33469295 readme helical 2025-05-02 22:26:38 +03:30
3933d65303 yaml update5 2025-05-02 22:03:16 +03:30
cf4d22c963 yaml update4 2025-05-02 21:59:31 +03:30
86367c7e2c yaml update3 2025-05-02 21:51:03 +03:30
a7e51a91aa yaml update2 2025-05-02 21:46:43 +03:30
5e56bf1b8c yaml update1 2025-05-02 21:28:40 +03:30
343ac1fc04 yaml update 2025-05-02 21:27:23 +03:30
6b04d17c7f sync-wiki to process img<> tags 2025-05-02 20:47:21 +03:30
97f46379c7 image resize 2025-05-02 20:25:20 +03:30
32fd6cb12e features update 2025-05-02 20:06:49 +03:30
be16fb0684 tutorials link added 2025-05-02 18:29:08 +03:30
4c96c6fa1e test 2025-04-30 19:01:51 +03:30
196b7a1833 how to build readme.md to wiki 2025-04-30 18:52:15 +03:30
316e71ff7a test readme.md 2025-04-30 18:36:53 +03:30
7a4a33ef37 a new workflow for readme.md files to wiki 2025-04-30 18:34:53 +03:30
edfbdb22e9 readmd.md update8 2025-04-30 08:56:11 +03:30
c6725625b3 readmd.md update7 2025-04-30 08:45:28 +03:30
253d6fbaf7 readmd.md update6 2025-04-30 08:40:46 +03:30
701baf09e6 readmd.md update5 2025-04-30 08:37:17 +03:30
20c94398a9 readmd.md update4 2025-04-30 08:34:51 +03:30
dd36e32da4 readmd.md update3 2025-04-30 08:31:19 +03:30
a048c2f5d7 readmd.md update2 2025-04-30 08:27:07 +03:30
8b324bc2b6 readmd.md update1 2025-04-30 08:18:29 +03:30
c7f790a1fa readmd.md update 2025-04-30 08:14:10 +03:30
166d7e72c2 rrr 2025-04-29 20:23:08 +03:30
c126f9a8a3 rr 2025-04-29 20:19:25 +03:30
7104a33a4b r 2025-04-29 20:14:34 +03:30
16b6084d98 readme update 2025-04-29 20:10:06 +03:30
2afea7b273 workflow update 2025-04-29 20:09:22 +03:30
2c5b4f55d1 readme.test 2025-04-29 20:01:13 +03:30
a7dc69a801 Merge branch 'main' of github.com:PhasicFlow/phasicFlow 2025-04-29 19:59:36 +03:30
32287404fa workflow update 2025-04-29 19:54:20 +03:30
8b3530c289 Merge pull request #221 from wanqing0421/benchmarks
update phasicFlow snapshot
2025-04-29 19:47:25 +03:30
d8c3fc02d5 update phasicFlow snapshot 2025-04-29 20:46:30 +08:00
4dab700a47 update image 2025-04-29 20:30:10 +08:00
a50ceeee2c update readme and figure 2025-04-29 20:25:00 +08:00
468730289b test for wiki 2025-04-28 23:06:29 +03:30
27f0202002 workflow for wiki 2025-04-28 23:04:42 +03:30
c69bfc79e1 endsolid bug fix for space separated names 2025-04-28 19:42:49 +03:30
69909b3c01 bug fix in reading stl file 2025-04-28 13:56:21 +03:30
8986c47b69 readmd.md for benchmark is updated 2025-04-28 12:25:53 +03:30
37282f16ac Merge branch 'PhasicFlow:main' into importStl 2025-04-28 09:35:49 +08:00
cd051a6497 Merge pull request #220 from wanqing0421/benchmarks
update readme
2025-04-27 21:57:40 +03:30
8b5d14afe6 update readme figure 2025-04-28 02:20:42 +08:00
eb37affb94 update readme 2025-04-28 02:17:04 +08:00
c0d12f4243 Merge pull request #219 from PhasicFlow/postprocessPhasicFlow
diameter -> distance for benchmarks
2025-04-27 21:08:04 +03:30
a1b5a9bd5d Merge pull request #218 from wanqing0421/benchmarks
upload readme for benchmarks
2025-04-27 20:59:37 +03:30
dc0edbc845 diameter -> distance for benchmarks 2025-04-26 21:22:59 +03:30
b423b6ceb7 upload readme for benchmarks 2025-04-26 15:17:57 +08:00
1f6a953154 fix bug when endsolid with a suffix name 2025-04-26 14:58:56 +08:00
bbd3afea0e Merge pull request #216 from PhasicFlow/postprocessPhasicFlow
readme.md for geometryPhasicFlow
2025-04-25 21:04:53 +03:30
53f0e959b0 readme.md for geometryPhasicFlow 2025-04-25 21:04:18 +03:30
c12022fb19 Merge pull request #215 from wanqing0421/importStl
add scale and transform function during the stl model importing process
2025-04-25 20:45:53 +03:30
d876bb6246 correction for tab 2025-04-26 01:13:42 +08:00
cb40e01b7e Merge pull request #206 from wanqing0421/main
fixed selectorStride bug
2025-04-25 20:35:11 +03:30
5f6400c032 add scale and transform function during the stl model importing process 2025-04-26 00:43:56 +08:00
8863234c1c update stride selector 2025-04-25 23:11:19 +08:00
1cd64fb2ec Merge branch 'PhasicFlow:main' into main 2025-04-25 23:00:10 +08:00
5f8ea2d841 fixed selectorStride bug 2025-04-22 14:46:12 +08:00
3d0f31a2b2 Merge pull request #3 from hamidrezanorouzi/MPIdev
Mp idev
2024-11-22 22:40:25 +03:30
HRN
ba8f307c56 Code recovery MPI part 2024-10-18 23:10:42 +03:30
HRN
f317cef0ef Merge branch 'develop' into MPIdev after first commit after code loss 2024-09-21 13:39:17 +03:30
HRN
93617a6ee5 MPI developement zeroTime 2024-08-07 15:13:15 +03:30
HRN
d7e6292e41 Merge branch 'develop' into MPIdev 2024-05-24 20:51:13 +03:30
HRN
5a25556b8a changing blocking-recive to non-blocking (in boundaryProcessor) to improve MPI run 2024-05-24 00:10:15 +03:30
HRN
a0c51e94e1 Merge branch 'develop' into MPIdev 2024-05-23 22:04:47 +03:30
74c281617b Merge pull request #2 from hamidrezanorouzi/MPIdev
Mp idev
2024-05-22 10:08:38 +03:30
d2003b35e6 bug fix to work with CUDA run 2024-05-22 09:52:48 +03:30
e05bd2c350 Merge branch 'develop' into MPIdev 2024-05-18 19:14:01 +03:30
HRN
665879f8ca Data transfer between processors
- Data transfer is done
- contact search at processor boundary is done
- contact force calculation at processor boundary is done
- tests have been done on rotating drum using serial and openMP
2024-05-12 19:06:53 +03:30
HRN
e756d471ba Merge branch 'develop' into MPIdev 2024-05-05 23:07:48 +03:30
30d5349fcf Merge pull request #1 from hamidrezanorouzi/MPIdev
Mp idev
2024-05-05 23:02:16 +03:30
HRN
68b7d141fa boundaryProcessor -> transferData
- point data is being transferred (no notification yet).
- field data should be transferred
2024-05-05 22:54:12 +03:30
HRN
525e972c20 Merge branch 'develop' into MPIdev 2024-05-05 18:46:33 +03:30
HRN
6f48eca95b The problem with memory leak in MPI data transfer fixed and tested. 2024-04-30 00:28:29 +03:30
HRN
b5a81bc0fc Merge branch 'MPI' of github.com:hamidrezanorouzi/phasicFlowMPI into MPIdev 2024-04-28 19:13:54 +03:30
d0798dfc0b clean up of un-used codes 2024-04-27 09:11:09 -07:00
729c088c9f Merge branch 'develop' into MPI 2024-04-27 09:05:49 -07:00
6241fa6dd3 MPI particle id handler 2024-04-27 08:59:13 -07:00
5f90605a41 MPI-boundaries for processor 2024-04-27 08:55:00 -07:00
94fcc3d01b MPI devleopment with boundaries for contact search and data communication, memory leak 2024-04-27 08:44:35 -07:00
1321e6340e Merge branch 'develop' into MPI 2024-04-20 06:16:28 -07:00
5864cea7e8 Merge branch 'develop' into MPI
- This is the merge from develop branch to get latest update from the this branch to start MPI developemet.
- This stage boundaries and data exchange between processors through boundaries are handled.
2024-04-18 10:19:19 -07:00
656e03de36 bug fix for empty domains in partitioning (tested) 2024-01-21 13:23:45 -08:00
41187b0e03 Merge branch 'develop' into MPI 2024-01-20 12:34:26 -08:00
6969b71cc5 MPI code 2024-01-20 11:30:49 -08:00
3504 changed files with 325283 additions and 422 deletions

153
.github/scripts/sync-wiki.py vendored Executable file
View File

@ -0,0 +1,153 @@
#!/usr/bin/env python3
import os
import re
import yaml
import sys
# Constants
REPO_URL = "https://github.com/PhasicFlow/phasicFlow"
REPO_PATH = os.path.join(os.environ.get("GITHUB_WORKSPACE", ""), "repo")
WIKI_PATH = os.path.join(os.environ.get("GITHUB_WORKSPACE", ""), "wiki")
MAPPING_FILE = os.path.join(REPO_PATH, "doc/mdDocs/markdownList.yml")
def load_mapping():
"""Load the markdown to wiki page mapping file."""
try:
with open(MAPPING_FILE, 'r') as f:
data = yaml.safe_load(f)
return data.get('mappings', [])
except Exception as e:
print(f"Error loading mapping file: {e}")
return []
def convert_relative_links(content, source_path):
"""Convert relative links in markdown content to absolute URLs."""
# Find markdown links with regex pattern [text](url)
md_pattern = r'\[([^\]]+)\]\(([^)]+)\)'
# Find HTML img tags
img_pattern = r'<img\s+src=[\'"]([^\'"]+)[\'"]'
def replace_link(match):
link_text = match.group(1)
link_url = match.group(2)
# Skip if already absolute URL or anchor
if link_url.startswith(('http://', 'https://', '#', 'mailto:')):
return match.group(0)
# Get the directory of the source file
source_dir = os.path.dirname(source_path)
# Create absolute path from repository root
if link_url.startswith('/'):
# If link starts with /, it's already relative to repo root
abs_path = link_url
else:
# Otherwise, it's relative to the file location
abs_path = os.path.normpath(os.path.join(source_dir, link_url))
if not abs_path.startswith('/'):
abs_path = '/' + abs_path
# Convert to GitHub URL
github_url = f"{REPO_URL}/blob/main{abs_path}"
return f"[{link_text}]({github_url})"
def replace_img_src(match):
img_src = match.group(1)
# Skip if already absolute URL
if img_src.startswith(('http://', 'https://')):
return match.group(0)
# Get the directory of the source file
source_dir = os.path.dirname(source_path)
# Create absolute path from repository root
if img_src.startswith('/'):
# If link starts with /, it's already relative to repo root
abs_path = img_src
else:
# Otherwise, it's relative to the file location
abs_path = os.path.normpath(os.path.join(source_dir, img_src))
if not abs_path.startswith('/'):
abs_path = '/' + abs_path
# Convert to GitHub URL (use raw URL for images)
github_url = f"{REPO_URL}/raw/main{abs_path}"
return f'<img src="{github_url}"'
# Replace all markdown links
content = re.sub(md_pattern, replace_link, content)
# Replace all img src tags
content = re.sub(img_pattern, replace_img_src, content)
return content
def process_file(source_file, target_wiki_page):
"""Process a markdown file and copy its contents to a wiki page."""
source_path = os.path.join(REPO_PATH, source_file)
target_path = os.path.join(WIKI_PATH, f"{target_wiki_page}.md")
print(f"Processing {source_path} -> {target_path}")
try:
# Check if source exists
if not os.path.exists(source_path):
print(f"Source file not found: {source_path}")
return False
# Read source content
with open(source_path, 'r') as f:
content = f.read()
# Convert relative links
content = convert_relative_links(content, source_file)
# Write to wiki page
with open(target_path, 'w') as f:
f.write(content)
return True
except Exception as e:
print(f"Error processing {source_file}: {e}")
return False
def main():
# Check if wiki directory exists
if not os.path.exists(WIKI_PATH):
print(f"Wiki path not found: {WIKI_PATH}")
sys.exit(1)
# Load mapping
mappings = load_mapping()
if not mappings:
print("No mappings found in the mapping file")
sys.exit(1)
print(f"Found {len(mappings)} mappings to process")
# Process each mapping
success_count = 0
for mapping in mappings:
source = mapping.get('source')
target = mapping.get('target')
if not source or not target:
print(f"Invalid mapping: {mapping}")
continue
if process_file(source, target):
success_count += 1
print(f"Successfully processed {success_count} of {len(mappings)} files")
# Exit with error if any file failed
if success_count < len(mappings):
sys.exit(1)
if __name__ == "__main__":
main()

60
.github/workflows/sync-wiki.yml vendored Normal file
View File

@ -0,0 +1,60 @@
name: Sync-Wiki
on:
push:
branches:
- main
paths:
- "**/*.md"
- ".github/workflows/sync-wiki.yml"
- "doc/mdDocs/markdownList.yml"
- ".github/scripts/sync-wiki.py"
workflow_dispatch:
jobs:
sync-wiki:
runs-on: ubuntu-latest
steps:
- name: Checkout Repository
uses: actions/checkout@v3
with:
path: repo
- name: Checkout Wiki
uses: actions/checkout@v3
with:
repository: ${{ github.repository }}.wiki
path: wiki
continue-on-error: true
- name: Create Wiki Directory if Not Exists
run: |
if [ ! -d "wiki" ]; then
mkdir -p wiki
cd wiki
git init
git config user.name "${{ github.actor }}"
git config user.email "${{ github.actor }}@users.noreply.github.com"
git remote add origin "https://github.com/${{ github.repository }}.wiki.git"
fi
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.10'
- name: Install dependencies
run: pip install pyyaml
- name: Sync markdown files to Wiki
run: |
python $GITHUB_WORKSPACE/repo/.github/scripts/sync-wiki.py
env:
GITHUB_REPOSITORY: ${{ github.repository }}
- name: Push changes to wiki
run: |
cd wiki
git config user.name "${{ github.actor }}"
git config user.email "${{ github.actor }}@users.noreply.github.com"
git add .
if git status --porcelain | grep .; then
git commit -m "Auto sync wiki from main repository"
git push --set-upstream https://${{ github.actor }}:${{ github.token }}@github.com/${{ github.repository }}.wiki.git master -f
else
echo "No changes to commit"
fi

8
.gitignore vendored
View File

@ -37,11 +37,15 @@
*.out *.out
*.app *.app
# directories # Exclude specific directories wherever they appear
build/** build/**
include/** include/**
bin/** bin/**
lib/** lib/**
**/build/
**/include/
**/bin/
**/lib/
test*/** test*/**
**/**notnow **/**notnow
doc/code-documentation/ doc/code-documentation/
@ -61,3 +65,5 @@ doc/DTAGS
**/[0-9]*.[0-9][0-9][0-9][0-9][0-9][0-9][0-9] **/[0-9]*.[0-9][0-9][0-9][0-9][0-9][0-9][0-9]
**/[0-9]*.[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9] **/[0-9]*.[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]
**/VTK **/VTK

View File

@ -96,7 +96,7 @@ public:
return Control_(); return Control_();
} }
auto inline constexpr usingDoulle()const auto inline constexpr usingDouble()const
{ {
return pFlow::usingDouble__; return pFlow::usingDouble__;
} }

View File

@ -0,0 +1 @@
# Helical Mixer Benchmark (phasicFlow v-1.0)

7
benchmarks/readme.md Normal file
View File

@ -0,0 +1,7 @@
# Benchmarks
Benchmakrs has been done on two different simulations: a simulation with simple geometry (rotating drum) and a simulation with complex geometry (helical mixer).
- [rotating drum](./rotatingDrum/readme.md)
- [helical mixer](./helicalMixer/readme.md)

Binary file not shown.

After

Width:  |  Height:  |  Size: 124 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 55 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 180 KiB

View File

@ -0,0 +1,96 @@
# Rotating Drum Benchmark (phasicFlow v-1.0)
## Overview
This benchmark compares the performance of phasicFlow with a well-stablished commercial DEM software for simulating a rotating drum with varying particle counts (250k to 8M particles). The benchmark measures both computational efficiency and memory usage across different hardware configurations.
## Simulation Setup
<div align="center">
<img src="./images/commericalDEMsnapshot.png"/>
<div align="center">
<p>Figure 1. Commercial DEM simulation snapshot</p>
</div>
</div>
<div align="center">
<img src="./images/phasicFlow_snapshot.png"/>
<div align="center">
<p>Figure 2. phasicFlow simulation snapshot and visualized using Paraview</p>
</div>
</div>
### Hardware Specifications
<div align="center">
Table 1. Hardware specifications used for benchmarking.
</div>
| System | CPU | GPU | Operating System |
| :---------: | :----------------------: | :--------------------------: | :--------------: |
| Laptop | Intel i9-13900HX 2.2 GHz | NVIDIA GeForce RTX 4050Ti 6G | Windows 11 24H2 |
| Workstation | Intel Xeon 4210 2.2 GHz | NVIDIA RTX A4000 16G | Ubuntu 22.04 |
### Simulation Parameters
<div align="center">
Table 2. Parameters for rotating drum simulations.
</div>
| Case | Particle Diameter | Particle Count | Drum Length | Drum Radius |
| :-------: | :---------------: | :--------------: | :------------------: | :------------------: |
| 250k | 6 mm | 250,000 | 0.8 m | 0.2 m |
| 500k | 5 mm | 500,000 | 0.8 m | 0.2 m |
| 1M | 4 mm | 1,000,000 | 0.8 m | 0.2 m |
| 2M | 3 mm | 2,000,000 | 1.2 m | 0.2 m |
| 4M | 3 mm | 4,000,000 | 1.6 m | 0.2 m |
| 8M | 2 mm | 8,000,000 | 1.6 m | 0.2 m |
The time step for all simulations was set to 1.0e-5 seconds and the simulation ran for 4 seconds.
## Performance Comparison
### Execution Time
<div align="center">
Table 3. Total calculation time (minutes) for different configurations.
</div>
| Software | 250k | 500k | 1M | 2M | 4M | 8M |
| :---------------: | :----: | :-----: | :-----: | :-----: | :-----: | :------: |
| phasicFlow-4050Ti | 54 min | 111 min | 216 min | 432 min | - | - |
| Commercial DEM-4050Ti | 68 min | 136 min | 275 min | 570 min | - | - |
| phasicFlow-A4000 | 38 min | 73 min | 146 min | 293 min | 589 min | 1188 min |
The execution time scales linearly with particle count. phasicFlow demonstrates approximately:
- 20% faster calculation than the well-established commercial DEM software on the same hardware
- 30% performance improvement when using the NVIDIA RTX A4000 compared to the RTX 4050Ti
<div align="center">
<img src="./images/performance1.png"/>
<p>Figure 3. Calculation time comparison between phasicFlow and the well-established commercial DEM software.</p>
</div>
### Memory Usage
<div align="center">
Table 4. Memory consumption for different configurations.
</div>
| Software | 250k | 500k | 1M | 2M | 4M | 8M |
| :---------------: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: |
| phasicFlow-4050Ti | 252 MB | 412 MB | 710 MB | 1292 MB | - | - |
| Commercial DEM-4050Ti | 485 MB | 897 MB | 1525 MB | 2724 MB | - | - |
| phasicFlow-A4000 | 344 MB | 480 MB | 802 MB | 1386 MB | 2590 MB | 4966 MB |
Memory efficiency comparison:
- phasicFlow uses approximately 0.7 GB of memory per million particles
- Commercial DEM software uses approximately 1.2 GB of memory per million particles
- phasicFlow shows ~42% lower memory consumption compared to the commercial alternative
- The memory usage scales linearly with particle count in both software packages. But due to memory limitations on GPUs, it is possible to run larger simulation on GPUs with phasicFlow.
## Run Your Own Benchmarks
The simulation case setup files are available in this folder for users interested in performing similar benchmarks on their own hardware. These files can be used to reproduce the tests and compare performance across different systems.

View File

@ -35,7 +35,7 @@ surfaces
radius2 0.2; // radius at p2 radius2 0.2; // radius at p2
resolution 24; // number of divisions resolution 60; // number of divisions
material wallMat; // material name of this wall material wallMat; // material name of this wall

View File

@ -27,7 +27,7 @@ positionParticles
orderedInfo orderedInfo
{ {
diameter 0.004; // minimum space between centers of particles distance 0.004; // minimum space between centers of particles
numPoints 1000000; // number of particles in the simulation numPoints 1000000; // number of particles in the simulation

View File

@ -35,7 +35,7 @@ surfaces
radius2 0.2; // radius at p2 radius2 0.2; // radius at p2
resolution 24; // number of divisions resolution 60; // number of divisions
material wallMat; // material name of this wall material wallMat; // material name of this wall

View File

@ -27,7 +27,7 @@ positionParticles
orderedInfo orderedInfo
{ {
diameter 0.006; // minimum space between centers of particles distance 0.006; // minimum space between centers of particles
numPoints 250000; // number of particles in the simulation numPoints 250000; // number of particles in the simulation

View File

@ -35,7 +35,7 @@ surfaces
radius2 0.2; // radius at p2 radius2 0.2; // radius at p2
resolution 24; // number of divisions resolution 60; // number of divisions
material wallMat; // material name of this wall material wallMat; // material name of this wall

View File

@ -27,7 +27,7 @@ positionParticles
orderedInfo orderedInfo
{ {
diameter 0.003; // minimum space between centers of particles distance 0.003; // minimum space between centers of particles
numPoints 2000000; // number of particles in the simulation numPoints 2000000; // number of particles in the simulation

View File

@ -35,7 +35,7 @@ surfaces
radius2 0.2; // radius at p2 radius2 0.2; // radius at p2
resolution 24; // number of divisions resolution 60; // number of divisions
material wallMat; // material name of this wall material wallMat; // material name of this wall

View File

@ -27,7 +27,7 @@ positionParticles
orderedInfo orderedInfo
{ {
diameter 0.003; // minimum space between centers of particles distance 0.003; // minimum space between centers of particles
numPoints 4000000; // number of particles in the simulation numPoints 4000000; // number of particles in the simulation

View File

@ -35,7 +35,7 @@ surfaces
radius2 0.2; // radius at p2 radius2 0.2; // radius at p2
resolution 24; // number of divisions resolution 60; // number of divisions
material wallMat; // material name of this wall material wallMat; // material name of this wall

View File

@ -27,7 +27,7 @@ positionParticles
orderedInfo orderedInfo
{ {
diameter 0.005; // minimum space between centers of particles distance 0.005; // minimum space between centers of particles
numPoints 500000; // number of particles in the simulation numPoints 500000; // number of particles in the simulation

View File

@ -35,7 +35,7 @@ surfaces
radius2 0.2; // radius at p2 radius2 0.2; // radius at p2
resolution 24; // number of divisions resolution 60; // number of divisions
material wallMat; // material name of this wall material wallMat; // material name of this wall

View File

@ -27,7 +27,7 @@ positionParticles
orderedInfo orderedInfo
{ {
diameter 0.003; // minimum space between centers of particles distance 0.003; // minimum space between centers of particles
numPoints 6000000; // number of particles in the simulation numPoints 6000000; // number of particles in the simulation

View File

@ -19,7 +19,7 @@ export pFlow_SRC_DIR="$pFlow_PROJECT_DIR/src"
export Kokkos_DIR="$kokkosDir" export Kokkos_DIR="$kokkosDir"
export Zoltan_DIR="$projectDir/Zoltan" #export Zoltan_DIR="$projectDir/Zoltan"
# Cleanup variables (done as final statement for a clean exit code) # Cleanup variables (done as final statement for a clean exit code)
unset projectDir unset projectDir

View File

@ -0,0 +1,44 @@
# Macro to check for Zoltan installation and build it if needed
# Usage: zoltan_find_or_build(ZOLTAN_DIR)
# Returns: ZOLTAN_INCLUDE_DIR, ZOLTAN_LIBRARY
macro(zoltan_find_or_build ZOLTAN_DIR)
# Set the Zoltan directory
set(ZOLTAN_PREFIX "${ZOLTAN_DIR}" CACHE STRING "Zoltan install directory")
message(STATUS "Zoltan install directory is ${ZOLTAN_PREFIX}")
# Check if the Zoltan library is already built
find_path(ZOLTAN_INCLUDE_DIR zoltan.h PATHS "${ZOLTAN_PREFIX}/include")
message(STATUS "Zoltan include path: ${ZOLTAN_INCLUDE_DIR}")
find_library(ZOLTAN_LIBRARY zoltan PATHS "${ZOLTAN_PREFIX}/lib")
message(STATUS "Zoltan lib path: ${ZOLTAN_LIBRARY}")
# Check if Zoltan library exists, if not compile it using buildlib script
if(NOT ZOLTAN_LIBRARY)
message(STATUS "Zoltan library not found. Compiling from source using buildlib script...")
# Execute the buildlib bash script
execute_process(
COMMAND bash ${ZOLTAN_PREFIX}/buildlib
WORKING_DIRECTORY ${ZOLTAN_PREFIX}
RESULT_VARIABLE ZOLTAN_BUILD_RESULT
OUTPUT_VARIABLE ZOLTAN_BUILD_OUTPUT
ERROR_VARIABLE ZOLTAN_BUILD_ERROR
)
if(NOT ZOLTAN_BUILD_RESULT EQUAL 0)
message(FATAL_ERROR "Failed to build Zoltan library using buildlib script. Error: ${ZOLTAN_BUILD_ERROR}")
endif()
# Try to find the library again after building
find_library(ZOLTAN_LIBRARY zoltan PATHS "${ZOLTAN_PREFIX}/lib" NO_DEFAULT_PATH)
find_path(ZOLTAN_INCLUDE_DIR zoltan.h PATHS "${ZOLTAN_PREFIX}/include" NO_DEFAULT_PATH)
if(NOT ZOLTAN_LIBRARY)
message(FATAL_ERROR "Failed to locate Zoltan library after building")
endif()
message(STATUS "Successfully built Zoltan library at ${ZOLTAN_LIBRARY}")
endif()
endmacro()

View File

@ -0,0 +1,136 @@
# How to build PhasicFlow-v-1.0
You can build PhasicFlow for CPU or GPU. You can have a single build or oven multiple builds on a machine. Here you learn how to have a single build of PhasicFlow, in various modes of execution. You can install PhasicFlow-v-1.0 on **Ubuntu-22.04 LTS** and **Ubuntu-24.04 LTS**. Installing it on older versions of Ubuntu needs some additional steps to meet the requirements which are not covered here.
If you want to install PhasicFlow on **Windows OS**, just see [this page](https://www.cemf.ir/installing-phasicflow-v-1-0-on-ubuntu/) for more information.
# Required packages
You need a list of packages installed on your computer before building PhasicFlow:
* git, for cloning the code and package management
* g++, for compiling the code
* cmake, for generating build system
* Cuda-12.x or above (if GPU is targeted), for compiling the code for CUDA execution.
### Installing packages
Execute the following commands to install the required packages (Except Cuda). tbb is installed automatically.
```bash
sudo apt update
sudo apt install -y git g++ cmake cmake-qt-gui
```
### Installing Cuda for GPU execution
If you want to build PhasicFlow to be executed on an nvidia-GPU, you need to install the latest version of Cuda compiler (Version 12.x or above), which is compatible with your hardware and OS, on your computer.
# How to build?
Here you will learn how to build PhasicFlow for single execution mode. Follow the steps below to install it on your computer.
Tested operating systems are:
* Ubuntu-22.04 LTS
* Ubuntu-24.04 LTS
### Step 1: Package check
Make sure that you have installed all the required packages on your computer. See above for more information.
### Step 2: Cloning PhasicFlow
Create the PhasicFlow folder in your home folder and then clone the source code into that folder:
```bash
cd ~
mkdir PhasicFlow
cd PhasicFlow
git clone https://github.com/PhasicFlow/phasicFlow.git
mv phasicFlow phasicFlow-v-1.0
```
### Step 3: Environmental variables
Opne the bashrc file using the following command:
```bash
$ gedit ~/.bashrc
```
and add the following line to the end of the file, **save** and **close** it.
```bash
source $HOME/PhasicFlow/phasicFlow-v-1.0/cmake/bashrc
```
this will introduce a new source file for setting the environmental variables of PhasicFlow. If you want to load these variables in the current open terminal, you need to source it. Or, simply **close the terminal** and **open a new terminal**.
### Step 4: Building PhasicFlow
Follow one of the followings to build PhasicFlow for one mode of execution.
#### Serial build for CPU
In a **new terminal** enter the following commands:
```bash
cd ~/PhasicFlow/phasicFlow-v-1.0
mkdir build
cd build
cmake ../ -DpFlow_Build_Serial=On -DCMAKE_BUILD_TYPE=Release
make install -j4
```
For faster builds, use `make install -j`. This will use all the CPU cores on your computer for building.
#### OpenMP build for CPU
```bash
cd ~/PhasicFlow/phasicFlow-v-1.0
mkdir build
cd build
cmake ../ -DpFlow_Build_OpenMP=On -DCMAKE_BUILD_TYPE=Release
make install -j4
```
#### GPU build for parallel execution on CUDA-enabled GPUs
```bash
cd ~/PhasicFlow/phasicFlow-v-1.0
mkdir build
cd build
cmake ../ -DpFlow_Build_Cuda=On -DCMAKE_BUILD_TYPE=Release
cmake ../ -DpFlow_Build_Cuda=On -DCMAKE_BUILD_TYPE=Release
make install -j4
```
After building, `bin`, `include`, and `lib` folders will be created in `~/PhasicFlow/phasicFlow-v-1.0/` folder. Now you are ready to use PhasicFlow.
**note 1**: When compiling the code in parallel, you need to have enough RAM on your computer. As a rule, you need 1 GB free RAM per each processor on your computer for compiling in parallel.
You may want to use fewer number of cores on your computer by using the following command:
```bash
make install -j3
```
the above command only uses 3 cores for compiling.
**note 2**: By default PhasicFlow is compiled with **double** as floating point variable. You can compile it with **float**. Just in the command line of camke added `-DpFlow_Build_Double=Off` flag to compile it with float. For example if you are building for cuda, you can enter the following command:
```bash
cmake ../ -DpFlow_Build_Cuda=On -DpFlow_Build_Double=Off
```
### Step 5: Testing
In the current terminal or a new terminal enter the following command:
```bash
checkPhasicFlow
```
This command shows the host and device environments and software version. If PhasicFlow was build correctly, you would get the following output:
```
Initializing host/device execution spaces . . .
Host execution space is Serial
Device execution space is Serial
You are using phasicFlow v-1.0 (copyright(C): www.cemf.ir)
In this build, double is used for floating point operations and uint32for indexing.
This is not a build for MPI execution
Finalizing host/device execution space ....
```

View File

@ -1,151 +0,0 @@
# How to build PhasicFlow {#howToBuildPhasicFlow}
You can build PhasicFlow for CPU or GPU. You can have a single build or oven multiple builds on a machine. Here you learn how to have a single build of PhasicFlow, in various modes of execution.
# Required packages
You need a list of packaged installed on your computer before building PhasicFlow:
* git, for cloning the code and package management
* g++, for compiling the code
* cmake, for generating build system
* tbb, a parallel library for STL algorithms
* Cuda (if GPU is targeted), for compiling the code for CUDA execution.
* Kokkos, the parallelization backend of PhasicFlow
### git
if git is not installed on your computer, enter the following commands
```
$ sudo apt update
$ sudo apt install git
```
### g++ (C++ compiler)
The code is tested with g++ (gnu C++ compiler). The default version of g++ on Ubuntu 18.04 LTS or upper is sufficient for compiling. If it is not installed on your operating system, enter the following command:
```
$ sudo apt update
$ sudo apt install g++
```
### CMake
You also need to have CMake-3.22 or higher installed on your computer.
```
$ sudo apt update
$ sudo apt install cmake
```
### tbb (2020.1-2 or higher)
For **Ubuntu 20.04 LTS or higher versions**, you can install tbb using apt. For now, some parallel algorithms on host side rely on tbb parallel library (C++ parallel backend). Use e following commands to install it:
```
$ sudo apt update
$ sudo apt install libtbb-dev
```
If you are compiling on **Ubuntu-18.04 LTS**, you need to enter the following commands to get the right version (2020.1-2 or higher) of tbb:
```
$ wget "http://archive.ubuntu.com/ubuntu/pool/universe/t/tbb/libtbb2_2020.1-2_amd64.deb"
$ sudo dpkg --install libtbb2_2020.1-2_amd64.deb
$ wget "http://archive.ubuntu.com/ubuntu/pool/universe/t/tbb/libtbb-dev_2020.1-2_amd64.deb"
$ sudo dpkg --install libtbb-dev_2020.1-2_amd64.deb
```
### Cuda
If you want to build PhasicFlow to be executed on an nvidia-GPU, you need to install the latest version of Cuda compiler, which is compatible with your hardware and OS, on your computer.
# How to build?
Here you will learn how to build PhasicFlow for single execution mode. Follow the steps below to install it on your computer.
Tested operating systems are:
* Ubuntu 18.04 LTS
* Ubuntu 20.04 LTS
* Ubuntu 22.04 LTS
### Step 1: Package check
Make sure that you have installed all the required packages on your computer. See above for more information.
### Step 2: Cloning Kokkos
It is assumed that Kokkos source is located in the home folder of your computer. Clone the latest version of Kokkos into your home folder:
```
$ cd ~
$ mkdir Kokkos
$ cd Kokkos
$ git clone https://github.com/kokkos/kokkos.git
```
or simply download and extract the source code of Kokkos in `~/Kokkos` folder. In the end, the top level CMakeLists.txt file should be located in `~/Kokkos/kokkos` folder.
### Step 3: Cloning PhasicFlow
Create the PhasicFlow folder in your home folder and then clone the source code into that folder:
```
$ cd ~
$ mkdir PhasicFlow
$ cd PhasicFlow
$ git clone https://github.com/PhasicFlow/phasicFlow.git
```
### Step 4: Environmental variables
Opne the bashrc file using the following command:
`$ gedit ~/.bashrc`
and add the following line to the end of the file, **save** and **close** it.
`source $HOME/PhasicFlow/phasicFlow/cmake/bashrc`
this will introduce a new source file for setting the environmental variables of PhasicFlow. If you want to load these variables in the current open terminal, you need to source it. Or, simply **close the terminal** and **open a new terminal**.
### Step 5: Building PhasicFlow
Follow one of the followings to build PhasicFlow for one mode of execution.
#### Serial build for CPU
In a **new terminal** enter the following commands:
```
$ cd ~/PhasicFlow/phasicFlow
$ mkdir build
$ cd build
$ cmake ../ -DpFlow_Build_Serial=On
$ make install
```
For faster builds, use `make install -j`. This will use all the CPU cores on your computer for building.
#### OpenMP build for CPU
```
$ cd ~/PhasicFlow/phasicFlow
$ mkdir build
$ cd build
$ cmake ../ -DpFlow_Build_OpenMP=On
$ make install
```
#### GPU build for parallel execution on CUDA-enabled GPUs
```
$ cd ~/PhasicFlow/phasicFlow
$ mkdir build
$ cd build
$ cmake ../ -DpFlow_Build_Cuda=On
$ make install
```
After building, `bin`, `include`, and `lib` folders will be created in `~/PhasicFlow/phasicFlow/` folder. Now you are ready to use PhasicFlow.
**note 1**: When compiling the code in parallel, you need to have enough RAM on your computer. As a rule, you need 1 GB free RAM per each processor in your computer for compiling in parallel.
You may want to use fewer number of cores on your computer by using the following command:
`$ make install -j 3`
the above command only uses 3 cores for compiling.
**note 2**: By default PhasicFlow is compiled with **double** as floating point variable. You can compile it with **float**. Just in the command line of camke added `-DpFlow_Build_Double=Off` flag to compile it with float. For example if you are building for cuda, you can enter the following command:
`$ cmake ../ -DpFlow_Build_Cuda=On -DpFlow_Build_Double=Off`
### Step 6: Testing
In the current terminal or a new terminal enter the following command:
`$ checkPhasicFlow`
This command shows the host and device environments and software version. If PhasicFlow was build correctly, you would get the following output:
```
Initializing host/device execution spaces . . .
Host execution space is Serial
Device execution space is Cuda
ou are using phasicFlow v-0.1 (copyright(C): www.cemf.ir)
In this build, double is used for floating point operations.
Finalizing host/device execution space ....
```

View File

@ -0,0 +1,18 @@
# This file maps source markdown files to their target wiki pages
# format:
# - source: path/to/markdown/file.md
# target: Wiki-Page-Name
mappings:
- source: benchmarks/readme.md
target: Performance-of-phasicFlow
- source: benchmarks/helicalMixer/readme.md
target: Helical-Mixer-Benchmark
- source: benchmarks/rotatingDrum/readme.md
target: Rotating-Drum-Benchmark
- source: doc/mdDocs/howToBuild-V1.0.md
target: How-to-build-PhasicFlowv1.0
- source: tutorials/README.md
target: Tutorials
- source: doc/mdDocs/phasicFlowFeatures.md
target: Features-of-PhasicFlow
# Add more mappings as needed

View File

@ -1,64 +1,116 @@
# PhasicFlow Features {#phasicFlowFeatures} # PhasicFlow Features (v-1.0)
The features of PhasicFlow described here are the main features that are implemented in the code for version 1.0. This document is not a complete list of all the features of PhasicFlow. The features are being added to the code continuously and this document may be behind the latest updates. Of course, the code review will give you the complete list.
## Table of Contents
- [1. Building options](#1-building-options)
- [2. Preprocessing tools](#2-preprocessing-tools)
- [3. Solvers for simulations](#3-solvers-for-simulations)
- [4. Postprocessing tools](#4-postprocessing-tools)
- [5. Models and features for simulations](#5-models-and-features-for-simulations)
- [5.1. General representation of walls](#51-general-representation-of-walls)
- [5.2. High precision integeration methods](#52-high-precision-integeration-methods)
- [5.3. Contact force models](#53-contact-force-models-needs-improvement)
- [5.4. Particle insertion](#54-particle-insertion)
- [5.5. Restarting/resuming a simulation](#55-restartingresuming-a-simulation)
- [5.6. Postprocessing data during simulation](#56-postprocessing-data-during-simulation)
## 1. Building options
## Building options
You can build PhasicFlow to be executed on multi-core CPUs or GPUs. It is also possible to select the type of floating point variables in PhasicFlow: double or float. float type requires less memory and mostly consumes less time of a processor to complete a mathematical operation. So, there is a benefit for using floats in DEM simulation specially when GPU is targeted for computations. You can build PhasicFlow to be executed on multi-core CPUs or GPUs. It is also possible to select the type of floating point variables in PhasicFlow: double or float. float type requires less memory and mostly consumes less time of a processor to complete a mathematical operation. So, there is a benefit for using floats in DEM simulation specially when GPU is targeted for computations.
Build options for PhasicFlow: Build options for PhasicFlow:
* **serial (double or float type)**: execution on one cpu core
* **OpenMp (double or float type)**: execution on multiple cores of a CPU
* **cuda (double or float type)**: execution on cuda-enabled GPUs
- **serial (double or float type)**: execution on one cpu core
- **OpenMp (double or float type)**: execution on multiple cores of a CPU
- **cuda (double or float type)**: execution on cuda-enabled GPUs
for more information on building PhasicFlow, please refer to the [installation guide](./howToBuild-V1.0.md).
## Preprocessing tools ## 2. Preprocessing tools
Preprocessing tools are used to facilitate the process of case setup. They include tools for defining initial state of particles and geometry conversion.
* **particlesPhasicFlow** tool can be used to define the initial position of particles (for example at t = 0 s) and to set the initial field values for particles (like velocity, orientation, acceleration and etc).
* **geometryPhasicFlow** converts user inputs for walls into a data structures that is used by PhasicFlow.
PhasicFlow provides a set of tools for preprocessing the simulation case. These tools are used to define the initial state of particles, walls and other parameters that are required for running a simulation.
- [**particlesPhasicFlow**](./../../utilities/particlesPhasicFlow/) tool can be used to define the initial position of particles (for example at t = 0 s) and to set the initial field values for particles (like velocity, orientation, acceleration, etc.).
## Models and features for simulations - [**geometryPhasicFlow**](./../../utilities/geometryPhasicFlow/) converts user inputs for walls into a data structure that is used by PhasicFlow.
## 3. Solvers for simulations
### General representation of walls - [**sphereGranFlow**](./../../solvers/sphereGranFlow/) is a solver for simulating the flow of spherical particles with particle insertion mechanism. A full set of tutorial on various possible simulations can be found here: [sphereGranFlow tutorial](./../../tutorials/sphereGranFlow/).
- [**grainGranFlow**](./../../solvers/grainGranFlow/) is a solver for simulating the flow of course-grained particles with particle insertion mechanism. A full set of tutorial on various possible simulations can be found here: [grainGranFlow tutorial](./../../tutorials/grainGranFlow/).
- [**iterateGeometry**](./../../solvers/iterateGeometry/) is a solver testing motion of walls without simulating particles. Since simulating with particles may take a long time and we may want to check the motion of geometry to be correct before actual simulation, we created this utility to test the motion of walls. A set of tutorial on various possible simulations can be found here: [iterateGeometry tutorial](./../../tutorials/iterateGeometry/).
## 4. Postprocessing tools
- [**pFlowToVTK**](./../../utilities/pFlowToVTK) is used to convert simulation results into vtk file format. vtk file format can be read by Paraview for visualizing the results.
- [**postprocessPhasicFlow**](./../../utilities/postprocessPhasicFlow/) is a tool for performing various averaging and summation on the fields. Particle probing is also possible.
## 5. Models and features for simulations
### 5.1. General representation of walls
Walls can be defined in three ways in PhasicFlow: Walls can be defined in three ways in PhasicFlow:
* **Builtin walls** in PhasicFlow that include plane wall, cylinder/cone wall, cuboid, circle.
* **stl wall** that reads the data of the wall from an ASCII stl file.
* **foamPatch wall** that reads the OpenFOAM mesh and converts the boundary patches into PhasicFlow walls (this feature is only available when performing CFD-DEM simulation using OpenFOAM).
Walls can be fixed or in motion during simulations. Various motion models are implemented to cover most of the wall motions in phasicFlow ([see the source code] (./../../../src/MotionModel/)): - **Builtin walls** in PhasicFlow that include plane wall, cylinder/cone wall, cuboid, circle.
* **fixedWall** model, in which all walls are fixed. This model is mostly useful for granular flow under gravity or gas-solid flows (CFD-DEM). - **stl wall** that reads the data of the wall from an ASCII stl file.
* **rotatingAxisMotion** model, in which walls are rotating around an axis of rotation with specified rotation speed. This model covers a wide range of granular flows in which the whole or a part of geometry is rotating, like mixers. - **foamPatch wall** that reads the OpenFOAM mesh and converts the boundary patches into PhasicFlow walls (this feature is only available when performing CFD-DEM simulation using OpenFOAM).
* **multiRotatingAxisMotion** model, in which a combination of rotations can be specified. One axis of rotation can itself have another axis of rotation, and so on. This creates the possibility of defining very complex motion pattern for walls, like what we see in Nauta blenders.
* **vibratingMotion** model, in which walls vibrates based on a sinusoidal model with specified frequency and amplitude. Walls can be fixed or in motion during simulations. Various motion models are implemented to cover most of the wall motions in phasicFlow ([see the source code](./../../src/MotionModel/)):
- **stationay** model, in which all walls are fixed. This model is mostly useful for granular flow under gravity or gas-solid flows (CFD-DEM).
- **rotatingAxis** model, in which walls are rotating around an axis of rotation with specified rotation speed. This model covers a wide range of granular flows in which the whole or a part of geometry is rotating, like mixers.
- **multiRotatingAxis** model, in which a combination of rotations can be specified. One axis of rotation can itself have another axis of rotation, and so on. This creates the possibility of defining very complex motion pattern for walls, like what we see in Nauta blenders.
- **vibrating** model, in which walls vibrates based on a sinusoidal model with specified frequency and amplitude.
In addition to these models, the user can add other motion models to the code based on their need. In addition to these models, the user can add other motion models to the code based on their need.
### 5.2. High precision integeration methods
### High precision integeration methods
The precision of integration in a DEM simulation is very important. Since sudden changes in the interaction forces occur during simulations (when objects contact or when they rebound). High precision integration methods makes it possible to accurately track position and velocity of objects (specially when they are in contact). When using these methods, it is possible to choose larger time steps for integration without loosing accuracy and causing instability in the simulation. Although a high-precision integration requires more computations, but the benefits of choosing larger time steps in simulation can totally compensate it. The precision of integration in a DEM simulation is very important. Since sudden changes in the interaction forces occur during simulations (when objects contact or when they rebound). High precision integration methods makes it possible to accurately track position and velocity of objects (specially when they are in contact). When using these methods, it is possible to choose larger time steps for integration without loosing accuracy and causing instability in the simulation. Although a high-precision integration requires more computations, but the benefits of choosing larger time steps in simulation can totally compensate it.
Various integration methods are implemented in PhasicFlow: Various integration methods are implemented in PhasicFlow:
| Integration Method | Order | Type| |Integration Method | Order | Type|
| :--- | :---: | :---: | | :--- | :---: | :---: |
| AdamsBashforth2 | 2 | one-step | | AdamsBashforth2 | 2 | one-step |
| AdamsBashforth3 | 3 | one-step | | AdamsBashforth3 | 3 | one-step |
| AdamsBashforth4 | 4 | one-step | | AdamsBashforth4 | 4 | one-step |
| AdamsBashforth5 | 5 | one-step | | AdamsBashforth5 | 5 | one-step |
| AdamsMoulton3 | 3 | predictor-corrector | | AdamsMoulton3 | 3 | predictor-corrector (not active)|
| AdamsMoulton4 | 4 | predictor-corrector | | AdamsMoulton4 | 4 | predictor-corrector (not active)|
| AdamsMoulton5 | 5 | predictor-corrector | | AdamsMoulton5 | 5 | predictor-corrector (not active)|
### 5.3. Contact force models (needs improvement)
### Contact force models
Linear and non-linear visco-elastic contact force models are considered in the simulation. In addition to these, limited and non-limited Coulomb's friction model can be used to account for the friction between objects. For spherical objects, rolling friction can also be specified between bodies in contact. Linear and non-linear visco-elastic contact force models are considered in the simulation. In addition to these, limited and non-limited Coulomb's friction model can be used to account for the friction between objects. For spherical objects, rolling friction can also be specified between bodies in contact.
In addition, for course-grained particles simulation, we developed a speciall set of***
### Particle insertion ### 5.4. Particle insertion
Particles can be inserted during simulation from specified region at specified rate and time interval. Any number of insertion regions can be defined in a simulation. Various region types are considered here: box, cylinder and sphere. Particles are inserted into the simulation through the specified region.
### restarting/resuming a simulation Particles can be inserted during simulation from specified region at specified rate and time interval. Any number of insertion regions can be defined in a simulation. Various region types are considered here: `box`, `cylinder` and `sphere`. Particles are inserted into the simulation through the specified region.
It is possible to resume a simulation fron any time-folder that is avaiable in the simulation case setup directory. PhasicFlow restart the simulation from that time folder.
## Postprocessing tools ### 5.5. restarting/resuming a simulation
* **pFlowToVTK** is used to convert simulation results into vtk file format. vtk file format can be read by Paraview for visualizing the results. It is possible to resume a simulation from any time-folder that is available in the simulation case setup directory. PhasicFlow restarts the simulation from that time folder.
* **postprocessPhasicFlow** is a tool for performing various cell-based averaging on the fields.
### 5.6. Postprocessing data during simulation
PhasicFlow provides a powerful in-simulation postprocessing module that allows users to analyze particle data in real-time while the simulation is running. This feature enables:
- **Real-time data analysis** without waiting for simulation completion
- **Region-based processing** in spheres, along lines, or at specific points
- **Various statistical operations** including weighted averages and sums of particle properties
- **Individual particle tracking** to monitor specific particles throughout simulation
- **Multiple processing methods** including arithmetic mean, uniform distribution, and Gaussian distribution
- **Particle filtering** based on properties like diameter, mass, etc.
- **Flexible time control** options for when postprocessing should be executed
To activate in-simulation postprocessing, users need to:
1. Create a `postprocessDataDict` file in the `settings` directory with appropriate configurations
2. Add `libs ("libPostprocessData.so")` and `auxFunctions postprocessData` to the `settings/settingsDict` file
Results are written to output files in the case directory with timestamps, allowing users to monitor simulation behavior as it progresses without interrupting the simulation. for more information on how to use this feature, please refer to the [PostprocessData](./../../src/PostprocessData/) module.
The same postprocessing module can also be used after simulation completion through the [`postprocessPhasicFlow`](./../../utilities/postprocessPhasicFlow/) utility.

View File

@ -0,0 +1,71 @@
#include "processorAB2BoundaryIntegration.hpp"
#include "AdamsBashforth2.hpp"
#include "AB2Kernels.hpp"
#include "boundaryConfigs.hpp"
pFlow::processorAB2BoundaryIntegration::processorAB2BoundaryIntegration(
const boundaryBase &boundary,
const pointStructure &pStruct,
const word &method,
integration& intgrtn
)
:
boundaryIntegration(boundary, pStruct, method, intgrtn)
{}
bool pFlow::processorAB2BoundaryIntegration::correct(
real dt,
const realx3PointField_D& y,
const realx3PointField_D& dy
)
{
#ifndef BoundaryModel1
if(this->isBoundaryMaster())
{
const uint32 thisIndex = thisBoundaryIndex();
const auto& AB2 = static_cast<const AdamsBashforth2&>(Integration());
const auto& dy1View = AB2.BoundaryField(thisIndex).neighborProcField().deviceView();
const auto& dyView = dy.BoundaryField(thisIndex).neighborProcField().deviceView();
const auto& yView = y.BoundaryField(thisIndex).neighborProcField().deviceView();
const rangeU32 aRange(0u, dy1View.size());
return AB2Kernels::intAllActive(
"AB2Integration::correct."+this->boundaryName(),
dt,
aRange,
yView,
dyView,
dy1View
);
}
#endif //BoundaryModel1
return true;
}
bool pFlow::processorAB2BoundaryIntegration::correctPStruct(real dt, const realx3PointField_D &vel)
{
#ifndef BoundaryModel1
if(this->isBoundaryMaster())
{
const uint32 thisIndex = thisBoundaryIndex();
const auto& AB2 = static_cast<const AdamsBashforth2&>(Integration());
const auto& dy1View = AB2.BoundaryField(thisIndex).neighborProcField().deviceView();
const auto& velView = vel.BoundaryField(thisIndex).neighborProcField().deviceView();
const auto& xposView = boundary().neighborProcPoints().deviceView();
const rangeU32 aRange(0u, dy1View.size());
return AB2Kernels::intAllActive(
"AB2Integration::correctPStruct."+this->boundaryName(),
dt,
aRange,
xposView,
velView,
dy1View
);
}
#endif //BoundaryModel1
return true;
}

View File

@ -0,0 +1,51 @@
#ifndef __processorAB2BoundaryIntegration_hpp__
#define __processorAB2BoundaryIntegration_hpp__
#include "boundaryIntegration.hpp"
namespace pFlow
{
class processorAB2BoundaryIntegration
:
public boundaryIntegration
{
public:
TypeInfo("boundaryIntegration<processor,AdamsBashforth2>");
processorAB2BoundaryIntegration(
const boundaryBase& boundary,
const pointStructure& pStruct,
const word& method,
integration& intgrtn
);
~processorAB2BoundaryIntegration()override=default;
bool correct(
real dt,
const realx3PointField_D& y,
const realx3PointField_D& dy)override;
bool correctPStruct(real dt, const realx3PointField_D& vel)override;
add_vCtor(
boundaryIntegration,
processorAB2BoundaryIntegration,
boundaryBase
);
};
}
#endif

View File

@ -0,0 +1,111 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#include "processorBoundaryContactSearch.hpp"
#include "contactSearch.hpp"
#include "particles.hpp"
//#include "pointStructure.hpp"
//#include "geometry.hpp"
void pFlow::processorBoundaryContactSearch::setSearchBox()
{
auto l = boundary().neighborLength();
auto n = boundary().boundaryPlane().normal();
auto pp1 = boundary().boundaryPlane().parallelPlane(l);
auto pp2 = boundary().boundaryPlane().parallelPlane(-l);
realx3 minP1 = min(min(min(pp1.p1(), pp1.p2()), pp1.p3()), pp1.p4());
realx3 maxP1 = max(max(max(pp1.p1(), pp1.p2()), pp1.p3()), pp1.p4());
realx3 minP2 = min(min(min(pp2.p1(), pp2.p2()), pp2.p3()), pp2.p4());
realx3 maxP2 = max(max(max(pp2.p1(), pp2.p2()), pp2.p3()), pp2.p4());
auto minP = min(minP1, minP2) - l*(realx3(1.0)-abs(n));
auto maxP = max(maxP1, maxP2) + l*(realx3(1.0)-abs(n));
searchBox_={minP, maxP};
}
pFlow::processorBoundaryContactSearch::processorBoundaryContactSearch(
const dictionary &dict,
const boundaryBase &boundary,
const contactSearch &cSearch)
:
boundaryContactSearch(dict, boundary, cSearch),
diameter_(cSearch.Particles().boundingSphere()),
masterSearch_(this->isBoundaryMaster()),
sizeRatio_(dict.getVal<real>("sizeRatio"))
{
if(masterSearch_)
{
setSearchBox();
real minD;
real maxD;
cSearch.Particles().boundingSphereMinMax(minD, maxD);
ppContactSearch_ = makeUnique<twoPartContactSearch>(
searchBox_,
maxD,
sizeRatio_);
}
else
{
searchBox_={{0,0,0},{0,0,0}};
}
}
bool pFlow::processorBoundaryContactSearch::broadSearch
(
uint32 iter,
real t,
real dt,
csPairContainerType &ppPairs,
csPairContainerType &pwPairs,
bool force
)
{
if(masterSearch_)
{
const auto thisPoints = boundary().thisPoints();
const auto& neighborProcPoints = boundary().neighborProcPoints();
const auto& bDiams = diameter_.BoundaryField(thisBoundaryIndex());
const auto thisDiams = bDiams.thisField();
const auto& neighborProcDiams = bDiams.neighborProcField();
ppContactSearch_().broadSearchPP(
ppPairs,
thisPoints,
thisDiams,
neighborProcPoints,
neighborProcDiams,
boundaryName()
);
//pOutput<<"ppSize "<< ppPairs.size()<<endl;
return true;
}else
{
return true;
}
}

View File

@ -0,0 +1,76 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __processorBoundaryContactSearch_hpp__
#define __processorBoundaryContactSearch_hpp__
#include "boundaryContactSearch.hpp"
#include "pointFields.hpp"
#include "twoPartContactSearch.hpp"
namespace pFlow
{
class processorBoundaryContactSearch : public boundaryContactSearch
{
private:
box searchBox_;
uniquePtr<twoPartContactSearch> ppContactSearch_ = nullptr;
const realPointField_D& diameter_;
bool masterSearch_;
real sizeRatio_;
void setSearchBox();
public:
TypeInfo("boundaryContactSearch<MPI,processor>")
processorBoundaryContactSearch(
const dictionary& dict,
const boundaryBase& boundary,
const contactSearch& cSearch
);
~processorBoundaryContactSearch() override = default;
add_vCtor(
boundaryContactSearch,
processorBoundaryContactSearch,
boundaryBase
);
bool broadSearch(
uint32 iter,
real t,
real dt,
csPairContainerType& ppPairs,
csPairContainerType& pwPairs,
bool force = false
) override;
};
}
#endif //__processorBoundaryContactSearch_hpp__

View File

@ -0,0 +1,163 @@
#include "twoPartContactSearch.hpp"
#include "twoPartContactSearchKernels.hpp"
#include "phasicFlowKokkos.hpp"
#include "streams.hpp"
void pFlow::twoPartContactSearch::checkAllocateNext(uint32 n)
{
if( nextCapacity_ < n)
{
nextCapacity_ = n;
reallocNoInit(next_, n);
}
}
void pFlow::twoPartContactSearch::nullifyHead()
{
fill(head_, static_cast<uint32>(-1));
}
void pFlow::twoPartContactSearch::nullifyNext(uint32 n)
{
fill(next_, 0u, n, static_cast<uint32>(-1));
}
void pFlow::twoPartContactSearch::buildList(
const deviceScatteredFieldAccess<realx3> &points)
{
if(points.empty())return;
uint32 n = points.size();
checkAllocateNext(n);
nullifyNext(n);
nullifyHead();
pFlow::twoPartContactSearchKernels::buildNextHead(
points,
searchCells_,
head_,
next_
);
}
pFlow::twoPartContactSearch::twoPartContactSearch
(
const box &domain,
real cellSize,
real sizeRatio
)
:
searchCells_(domain, cellSize),
head_("periodic:head",searchCells_.nx(), searchCells_.ny(), searchCells_.nz()),
sizeRatio_(sizeRatio)
{
}
bool pFlow::twoPartContactSearch::broadSearchPP
(
csPairContainerType &ppPairs,
const deviceScatteredFieldAccess<realx3> &points1,
const deviceScatteredFieldAccess<real>& diams1,
const deviceScatteredFieldAccess<realx3> &points2,
const deviceScatteredFieldAccess<real>& diams2,
const realx3& transferVec
)
{
if(points1.empty())return true;
if(points2.empty()) return true;
buildList(points1);
uint32 nNotInserted = 1;
// loop until the container size fits the numebr of contact pairs
while (nNotInserted > 0)
{
nNotInserted = pFlow::twoPartContactSearchKernels::broadSearchPP
(
ppPairs,
points1,
diams1,
points2,
diams2,
transferVec,
head_,
next_,
searchCells_,
sizeRatio_
);
if(nNotInserted)
{
// - resize the container
// note that getFull now shows the number of failed insertions.
uint32 len = max(nNotInserted,100u) ;
auto oldCap = ppPairs.capacity();
ppPairs.increaseCapacityBy(len);
INFORMATION<< "Particle-particle contact pair container capacity increased from "<<
oldCap << " to "<<ppPairs.capacity()<<" in contact search in boundary region."<<END_INFO;
}
}
return true;
}
bool pFlow::twoPartContactSearch::broadSearchPP
(
csPairContainerType &ppPairs,
const deviceScatteredFieldAccess<realx3> &points1,
const deviceScatteredFieldAccess<real> &diams1,
const realx3Vector_D& points2,
const realVector_D& diams2,
const word& name
)
{
buildList(points1);
uint32 nNotInserted = 1;
// loop until the container size fits the numebr of contact pairs
while (nNotInserted > 0)
{
nNotInserted = pFlow::twoPartContactSearchKernels::broadSearchPP
(
ppPairs,
points1,
diams1,
points2,
diams2,
head_,
next_,
searchCells_,
sizeRatio_
);
if(nNotInserted)
{
// - resize the container
// note that getFull now shows the number of failed insertions.
uint32 len = max(nNotInserted,100u) ;
auto oldCap = ppPairs.capacity();
ppPairs.increaseCapacityBy(len);
INFORMATION<< "Particle-particle contact pair container capacity increased from "<<
oldCap << " to "<<ppPairs.capacity()<<" in boundary contact search in "<< name <<END_INFO;
}
}
return true;
}

View File

@ -0,0 +1,104 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __twoPartContactSearch_hpp__
#define __twoPartContactSearch_hpp__
#include "contactSearchGlobals.hpp"
#include "scatteredFieldAccess.hpp"
#include "cells.hpp"
#include "VectorSingles.hpp"
namespace pFlow
{
class twoPartContactSearch
{
public:
using HeadType = deviceViewType3D<uint32>;
using NextType = deviceViewType1D<uint32>;
private:
cells searchCells_;
HeadType head_{ "periodic::head", 1, 1, 1 };
NextType next_{ "periodic::next", 1 };
real sizeRatio_ = 1.0;
uint32 nextCapacity_ = 0;
void checkAllocateNext(uint32 n);
void nullifyHead();
void nullifyNext(uint32 n);
void buildList(
const deviceScatteredFieldAccess<realx3> &points);
public:
twoPartContactSearch(
const box &domain,
real cellSize,
real sizeRatio = 1.0);
/// @brief Perform a broad-search for spheres in two adjacent regions.
/// Region 1 is considered as the master (primary) region and region 2 as slave
/// @param ppPairs pairs container which holds i and j
/// @param points1 point positions in region 1
/// @param diams1 diameter of spheres in region 1
/// @param points2 point positions in region 2
/// @param diams2 diameter of spheres in region 2
/// @param transferVec a vector to transfer points from region 2 to region 1
/// @return true if it is successful
bool broadSearchPP(
csPairContainerType &ppPairs,
const deviceScatteredFieldAccess<realx3> &points1,
const deviceScatteredFieldAccess<real> &diams1,
const deviceScatteredFieldAccess<realx3> &points2,
const deviceScatteredFieldAccess<real> &diams2,
const realx3 &transferVec);
bool broadSearchPP(
csPairContainerType &ppPairs,
const deviceScatteredFieldAccess<realx3> &points1,
const deviceScatteredFieldAccess<real> &diams1,
const realx3Vector_D& points2,
const realVector_D& diams2,
const word& name);
const auto& searchCells()const
{
return searchCells_;
}
real sizeRatio()const
{
return sizeRatio_;
}
};
}
#endif //__twoPartContactSearch_hpp__

View File

@ -0,0 +1,186 @@
#include "twoPartContactSearchKernels.hpp"
INLINE_FUNCTION_HD
bool
sphereSphereCheckB(
const pFlow::realx3& p1,
const pFlow::realx3 p2,
pFlow::real d1,
pFlow::real d2
)
{
return pFlow::length(p2 - p1) < 0.5 * (d2 + d1);
}
void
pFlow::twoPartContactSearchKernels::buildNextHead(
const deviceScatteredFieldAccess<realx3>& points,
const cells& searchCells,
deviceViewType3D<uint32>& head,
deviceViewType1D<uint32>& next
)
{
uint32 n = points.size();
Kokkos::parallel_for(
"pFlow::ppwBndryContactSearch::buildList",
deviceRPolicyStatic(0, n),
LAMBDA_HD(uint32 i) {
int32x3 ind;
if (searchCells.pointIndexInDomain(points[i], ind))
{
// discards points out of searchCell
uint32 old =
Kokkos::atomic_exchange(&head(ind.x(), ind.y(), ind.z()), i);
next[i] = old;
}
}
);
Kokkos::fence();
}
pFlow::uint32
pFlow::twoPartContactSearchKernels::broadSearchPP(
csPairContainerType& ppPairs,
const deviceScatteredFieldAccess<realx3>& points,
const deviceScatteredFieldAccess<real>& diams,
const deviceScatteredFieldAccess<realx3>& mirrorPoints,
const deviceScatteredFieldAccess<real>& mirrorDiams,
const realx3& transferVec,
const deviceViewType3D<uint32>& head,
const deviceViewType1D<uint32>& next,
const cells& searchCells,
const real sizeRatio
)
{
if (points.empty())
return 0;
if (mirrorPoints.empty())
return 0;
auto nMirror = mirrorPoints.size();
uint32 getFull = 0;
Kokkos::parallel_reduce(
"pFlow::twoPartContactSearchKernels::broadSearchPP",
deviceRPolicyStatic(0, nMirror),
LAMBDA_HD(const uint32 mrrI, uint32& getFullUpdate) {
realx3 p_m = mirrorPoints(mrrI) + transferVec;
int32x3 ind_m;
if (!searchCells.pointIndexInDomain(p_m, ind_m))
return;
real d_m = sizeRatio * mirrorDiams[mrrI];
for (int ii = -1; ii < 2; ii++)
{
for (int jj = -1; jj < 2; jj++)
{
for (int kk = -1; kk < 2; kk++)
{
auto ind = ind_m + int32x3{ ii, jj, kk };
if (!searchCells.inCellRange(ind))
continue;
uint32 thisI = head(ind.x(), ind.y(), ind.z());
while (thisI != static_cast<uint32>(-1))
{
auto d_n = sizeRatio * diams[thisI];
// first item is for this boundary and second itme,
// for mirror
if(sphereSphereCheckB(p_m, points[thisI], d_m, d_n)&&
ppPairs.insert(thisI,mrrI) == static_cast<uint32>(-1))
{
getFullUpdate++;
}
thisI = next(thisI);
}
}
}
}
},
getFull
);
return getFull;
}
pFlow::uint32
pFlow::twoPartContactSearchKernels::broadSearchPP(
csPairContainerType& ppPairs,
const deviceScatteredFieldAccess<realx3>& points1,
const deviceScatteredFieldAccess<real>& diams1,
const realx3Vector_D& points2,
const realVector_D& diams2,
const deviceViewType3D<uint32>& head,
const deviceViewType1D<uint32>& next,
const cells& searchCells,
real sizeRatio
)
{
if (points1.empty())
return 0;
if (points2.empty())
return 0;
auto nP2 = points2.size();
auto points2View = points2.deviceView();
auto diams2View = diams2.deviceView();
uint32 getFull = 0;
Kokkos::parallel_reduce(
"pFlow::twoPartContactSearchKernels::broadSearchPP",
deviceRPolicyStatic(0, nP2),
LAMBDA_HD(const uint32 i2, uint32& getFullUpdate) {
realx3 p_m = points2View(i2);
int32x3 ind_m;
if (!searchCells.pointIndexInDomain(p_m, ind_m))
return;
real d_m = sizeRatio * diams2View[i2];
for (int ii = -1; ii < 2; ii++)
{
for (int jj = -1; jj < 2; jj++)
{
for (int kk = -1; kk < 2; kk++)
{
auto ind = ind_m + int32x3{ ii, jj, kk };
if (!searchCells.inCellRange(ind))
{
continue;
}
uint32 i1 = head(ind.x(), ind.y(), ind.z());
while (i1 != static_cast<uint32>(-1))
{
auto d_n = sizeRatio * diams1[i1];
// first item is for this boundary and second itme,
// for mirror
if(sphereSphereCheckB(p_m, points1[i1], d_m, d_n)&&
ppPairs.insert(i1,i2) == static_cast<uint32>(-1))
{
getFullUpdate++;
}
i1 = next(i1);
}
}
}
}
},
getFull
);
return getFull;
}

View File

@ -0,0 +1,49 @@
#ifndef __twoPartContactSearchKernels_hpp__
#define __twoPartContactSearchKernels_hpp__
#include "contactSearchGlobals.hpp"
#include "cells.hpp"
#include "contactSearchFunctions.hpp"
#include "scatteredFieldAccess.hpp"
#include "VectorSingles.hpp"
namespace pFlow::twoPartContactSearchKernels
{
void buildNextHead(
const deviceScatteredFieldAccess<realx3> &points,
const cells &searchCells,
deviceViewType3D<uint32> &head,
deviceViewType1D<uint32> &next );
uint32 broadSearchPP
(
csPairContainerType &ppPairs,
const deviceScatteredFieldAccess<realx3> &points,
const deviceScatteredFieldAccess<real> &diams,
const deviceScatteredFieldAccess<realx3> &mirrorPoints,
const deviceScatteredFieldAccess<real> &mirrorDiams,
const realx3 &transferVec,
const deviceViewType3D<uint32> &head,
const deviceViewType1D<uint32> &next,
const cells &searchCells,
real sizeRatio
);
uint32
broadSearchPP(
csPairContainerType& ppPairs,
const deviceScatteredFieldAccess<realx3>& points1,
const deviceScatteredFieldAccess<real>& diams1,
const realx3Vector_D& points2,
const realVector_D& diams2,
const deviceViewType3D<uint32>& head,
const deviceViewType1D<uint32>& next,
const cells& searchCells,
real sizeRatio
);
}
#endif //__twoPartContactSearchKernels_hpp__

View File

@ -0,0 +1,132 @@
#ifndef __processorBoundarySIKernels_hpp__
#define __processorBoundarySIKernels_hpp__
namespace pFlow::MPI::processorBoundarySIKernels
{
template<typename ContactListType, typename ContactForceModel>
inline
void sphereSphereInteraction
(
const word& kernalName,
real dt,
const ContactListType& cntctList,
const ContactForceModel& forceModel,
const deviceScatteredFieldAccess<realx3>& thisPoints,
const deviceViewType1D<real>& thisDiam,
const deviceViewType1D<uint32>& thisPropId,
const deviceViewType1D<realx3>& thisVel,
const deviceViewType1D<realx3>& thisRVel,
const deviceViewType1D<realx3>& thisCForce,
const deviceViewType1D<realx3>& thisCTorque,
const deviceViewType1D<realx3>& neighborPoints,
const deviceViewType1D<real>& neighborDiam,
const deviceViewType1D<uint32>& neighborPropId,
const deviceViewType1D<realx3>& neighborVel,
const deviceViewType1D<realx3>& neighborRVel,
const deviceViewType1D<realx3>& neighborCForce,
const deviceViewType1D<realx3>& neighborCTorque
)
{
using ValueType = typename ContactListType::ValueType;
uint32 ss = cntctList.size();
if(ss == 0u)return;
uint32 lastItem = cntctList.loopCount();
Kokkos::parallel_for(
kernalName,
deviceRPolicyDynamic(0,lastItem),
LAMBDA_HD(uint32 n)
{
if(!cntctList.isValid(n))return;
auto [i,j] = cntctList.getPair(n);
uint32 ind_i = thisPoints.index(i);
uint32 ind_j = j;
real Ri = 0.5*thisDiam[ind_i];
real Rj = 0.5*neighborDiam[ind_j];
realx3 xi = thisPoints.field()[ind_i];
realx3 xj = neighborPoints[ind_j];
real dist = length(xj-xi);
real ovrlp = (Ri+Rj) - dist;
if( ovrlp >0.0 )
{
auto Nij = (xj-xi)/max(dist,smallValue);
auto wi = thisRVel[ind_i];
auto wj = neighborRVel[ind_j];
auto Vr = thisVel[ind_i] - neighborVel[ind_j] + cross((Ri*wi+Rj*wj), Nij);
auto history = cntctList.getValue(n);
int32 propId_i = thisPropId[ind_i];
int32 propId_j = neighborPropId[ind_j];
realx3 FCn, FCt, Mri, Mrj, Mij, Mji;
// calculates contact force
forceModel.contactForce(
dt, i, j,
propId_i, propId_j,
Ri, Rj,
ovrlp,
Vr, Nij,
history,
FCn, FCt);
forceModel.rollingFriction(
dt, i, j,
propId_i, propId_j,
Ri, Rj,
wi, wj,
Nij,
FCn,
Mri, Mrj);
auto M = cross(Nij,FCt);
Mij = Ri*M+Mri;
Mji = Rj*M+Mrj;
auto FC = FCn + FCt;
Kokkos::atomic_add(&thisCForce[ind_i].x_,FC.x_);
Kokkos::atomic_add(&thisCForce[ind_i].y_,FC.y_);
Kokkos::atomic_add(&thisCForce[ind_i].z_,FC.z_);
Kokkos::atomic_add(&neighborCForce[ind_j].x_,-FC.x_);
Kokkos::atomic_add(&neighborCForce[ind_j].y_,-FC.y_);
Kokkos::atomic_add(&neighborCForce[ind_j].z_,-FC.z_);
Kokkos::atomic_add(&thisCTorque[ind_i].x_, Mij.x_);
Kokkos::atomic_add(&thisCTorque[ind_i].y_, Mij.y_);
Kokkos::atomic_add(&thisCTorque[ind_i].z_, Mij.z_);
Kokkos::atomic_add(&neighborCTorque[ind_j].x_, Mji.x_);
Kokkos::atomic_add(&neighborCTorque[ind_j].y_, Mji.y_);
Kokkos::atomic_add(&neighborCTorque[ind_j].z_, Mji.z_);
cntctList.setValue(n,history);
}
else
{
cntctList.setValue(n, ValueType());
}
});
Kokkos::fence();
}
} //pFlow::MPI::processorBoundarySIKernels
#endif //__processorBoundarySIKernels_hpp__

View File

@ -0,0 +1,256 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#include "processorBoundarySIKernels.hpp"
template <typename cFM, typename gMM>
pFlow::MPI::processorBoundarySphereInteraction<cFM, gMM>::processorBoundarySphereInteraction(
const boundaryBase &boundary,
const sphereParticles &sphPrtcls,
const GeometryMotionModel &geomMotion)
:
boundarySphereInteraction<cFM,gMM>(
boundary,
sphPrtcls,
geomMotion
),
masterInteraction_(boundary.isBoundaryMaster())
{
if(masterInteraction_)
{
this->allocatePPPairs();
this->allocatePWPairs();
}
}
#ifdef BoundaryModel1
template <typename cFM, typename gMM>
bool pFlow::MPI::processorBoundarySphereInteraction<cFM, gMM>::sphereSphereInteraction
(
real dt,
const ContactForceModel &cfModel,
uint32 step
)
{
// master processor calculates the contact force/torque and sends data back to the
// neighbor processor (slave processor).
// slave processor recieves the data and adds the data to the internalField
if(masterInteraction_)
{
if(step==1)return true;
const auto & sphPar = this->sphParticles();
uint32 thisIndex = this->boundary().thisBoundaryIndex();
const auto& cfBndry = static_cast<const processorBoundaryField<realx3>&> (
sphPar.contactForce().BoundaryField(thisIndex));
const auto& ctBndry = static_cast<const processorBoundaryField<realx3>&> (
sphPar.contactTorque().BoundaryField(thisIndex));
if(step == 2 )
{
iter++;
pFlow::MPI::processorBoundarySIKernels::sphereSphereInteraction(
"ppBoundaryInteraction."+this->boundaryName(),
dt,
this->ppPairs(),
cfModel,
this->boundary().thisPoints(),
sphPar.diameter().deviceViewAll(),
sphPar.propertyId().deviceViewAll(),
sphPar.velocity().deviceViewAll(),
sphPar.rVelocity().deviceViewAll(),
sphPar.contactForce().deviceViewAll(),
sphPar.contactTorque().deviceViewAll(),
this->boundary().neighborProcPoints().deviceViewAll(),
sphPar.diameter().BoundaryField(thisIndex).neighborProcField().deviceViewAll(),
sphPar.propertyId().BoundaryField(thisIndex).neighborProcField().deviceViewAll(),
sphPar.velocity().BoundaryField(thisIndex).neighborProcField().deviceViewAll(),
sphPar.rVelocity().BoundaryField(thisIndex).neighborProcField().deviceViewAll(),
cfBndry.neighborProcField().deviceViewAll(),
ctBndry.neighborProcField().deviceViewAll()
);
return true;
}
else if(step == 3 )
{
cfBndry.sendBackData();
ctBndry.sendBackData();
return true;
}
return false;
}
else
{
if(step == 1 )
{
const auto & sphPar = this->sphParticles();
uint32 thisIndex = this->boundary().thisBoundaryIndex();
const auto& cfBndry = static_cast<const processorBoundaryField<realx3>&>(
sphPar.contactForce().BoundaryField(thisIndex));
const auto& ctBndry = static_cast<const processorBoundaryField<realx3>&> (
sphPar.contactTorque().BoundaryField(thisIndex));
cfBndry.recieveBackData();
ctBndry.recieveBackData();
return false;
}
else if(step == 11)
{
const auto & sphPar = this->sphParticles();
uint32 thisIndex = this->boundary().thisBoundaryIndex();
const auto& cfBndry = static_cast<const processorBoundaryField<realx3>&>(
sphPar.contactForce().BoundaryField(thisIndex));
const auto& ctBndry = static_cast<const processorBoundaryField<realx3>&> (
sphPar.contactTorque().BoundaryField(thisIndex));
cfBndry.addBufferToInternalField();
ctBndry.addBufferToInternalField();
return true;
}
return false;
}
return false;
}
#else
template <typename cFM, typename gMM>
bool pFlow::MPI::processorBoundarySphereInteraction<cFM, gMM>::sphereSphereInteraction
(
real dt,
const ContactForceModel &cfModel,
uint32 step
)
{
// master processor calculates the contact force/torque and sends data back to the
// neighbor processor (slave processor).
// slave processor recieves the data and adds the data to the internalField
if(masterInteraction_)
{
if(step==1)return true;
const auto & sphPar = this->sphParticles();
uint32 thisIndex = this->boundary().thisBoundaryIndex();
const auto& cfBndry = static_cast<const processorBoundaryField<realx3>&> (
sphPar.contactForce().BoundaryField(thisIndex));
const auto& ctBndry = static_cast<const processorBoundaryField<realx3>&> (
sphPar.contactTorque().BoundaryField(thisIndex));
if(step == 2 )
{
pFlow::MPI::processorBoundarySIKernels::sphereSphereInteraction(
"ppBoundaryInteraction."+this->boundaryName(),
dt,
this->ppPairs(),
cfModel,
this->boundary().thisPoints(),
sphPar.diameter().deviceViewAll(),
sphPar.propertyId().deviceViewAll(),
sphPar.velocity().deviceViewAll(),
sphPar.rVelocity().deviceViewAll(),
sphPar.contactForce().deviceViewAll(),
sphPar.contactTorque().deviceViewAll(),
this->boundary().neighborProcPoints().deviceViewAll(),
sphPar.diameter().BoundaryField(thisIndex).neighborProcField().deviceViewAll(),
sphPar.propertyId().BoundaryField(thisIndex).neighborProcField().deviceViewAll(),
sphPar.velocity().BoundaryField(thisIndex).neighborProcField().deviceViewAll(),
sphPar.rVelocity().BoundaryField(thisIndex).neighborProcField().deviceViewAll(),
cfBndry.neighborProcField().deviceViewAll(),
ctBndry.neighborProcField().deviceViewAll()
);
return true;
}
else if(step == 3 )
{
cfBndry.sendBackData();
ctBndry.sendBackData();
return true;
}
else if(step == 11 )
{
cfBndry.updateBoundaryFromSlave();
ctBndry.updateBoundaryFromSlave();
return true;
}
return false;
}
else
{
if(step == 1 )
{
const auto & sphPar = this->sphParticles();
uint32 thisIndex = this->boundary().thisBoundaryIndex();
const auto& cfBndry = static_cast<const processorBoundaryField<realx3>&>(
sphPar.contactForce().BoundaryField(thisIndex));
const auto& ctBndry = static_cast<const processorBoundaryField<realx3>&> (
sphPar.contactTorque().BoundaryField(thisIndex));
cfBndry.recieveBackData();
ctBndry.recieveBackData();
return false;
}
else if(step == 11)
{
const auto & sphPar = this->sphParticles();
uint32 thisIndex = this->boundary().thisBoundaryIndex();
const auto& cfBndry = static_cast<const processorBoundaryField<realx3>&>(
sphPar.contactForce().BoundaryField(thisIndex));
const auto& ctBndry = static_cast<const processorBoundaryField<realx3>&> (
sphPar.contactTorque().BoundaryField(thisIndex));
cfBndry.addBufferToInternalField();
cfBndry.updateBoundaryToMaster();
ctBndry.addBufferToInternalField();
ctBndry.updateBoundaryToMaster();
return true;
}
return false;
}
return false;
}
#endif

View File

@ -0,0 +1,93 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __processorBoundarySphereInteraction_hpp__
#define __processorBoundarySphereInteraction_hpp__
#include "boundarySphereInteraction.hpp"
#include "processorBoundaryField.hpp"
#include "boundaryProcessor.hpp"
namespace pFlow::MPI
{
template<typename contactForceModel,typename geometryMotionModel>
class processorBoundarySphereInteraction
:
public boundarySphereInteraction<contactForceModel, geometryMotionModel>
{
public:
using PBSInteractionType =
processorBoundarySphereInteraction<contactForceModel,geometryMotionModel>;
using BSInteractionType =
boundarySphereInteraction<contactForceModel, geometryMotionModel>;
using GeometryMotionModel = typename BSInteractionType::GeometryMotionModel;
using ContactForceModel = typename BSInteractionType::ContactForceModel;
using MotionModel = typename geometryMotionModel::MotionModel;
using ModelStorage = typename ContactForceModel::contactForceStorage;
using IdType = typename BSInteractionType::IdType;
using IndexType = typename BSInteractionType::IndexType;
using ContactListType = typename BSInteractionType::ContactListType;
private:
bool masterInteraction_;
public:
TypeInfoTemplate22("boundarySphereInteraction", "processor",ContactForceModel, MotionModel);
processorBoundarySphereInteraction(
const boundaryBase& boundary,
const sphereParticles& sphPrtcls,
const GeometryMotionModel& geomMotion
);
add_vCtor
(
BSInteractionType,
PBSInteractionType,
boundaryBase
);
~processorBoundarySphereInteraction()override = default;
bool sphereSphereInteraction(
real dt,
const ContactForceModel& cfModel,
uint32 step)override;
};
}
#include "processorBoundarySphereInteraction.cpp"
#endif //__processorBoundarySphereInteraction_hpp__

View File

@ -0,0 +1,17 @@
#include "processorBoundarySphereInteraction.hpp"
#include "geometryMotions.hpp"
#include "contactForceModels.hpp"
template class pFlow::MPI::processorBoundarySphereInteraction
<
pFlow::cfModels::limitedNonLinearNormalRolling,
pFlow::rotationAxisMotionGeometry
>;
template class pFlow::MPI::processorBoundarySphereInteraction
<
pFlow::cfModels::nonLimitedNonLinearNormalRolling,
pFlow::rotationAxisMotionGeometry
>;

View File

@ -359,7 +359,7 @@ bool pFlow::sphereInteraction<cFM,gMM, cLT>::hearChanges
if(msg.equivalentTo(message::ITEMS_REARRANGE)) if(msg.equivalentTo(message::ITEMS_REARRANGE))
{ {
notImplementedFunction; notImplementedFunction;
return false; return true;
} }
fatalErrorInFunction<<"Event "<< msg.eventNames()<< fatalErrorInFunction<<"Event "<< msg.eventNames()<<

View File

@ -0,0 +1,46 @@
#include "processorBoundarySphereParticles.hpp"
#include "sphereParticles.hpp"
#include "boundaryProcessor.hpp"
pFlow::processorBoundarySphereParticles::processorBoundarySphereParticles(
const boundaryBase &boundary,
sphereParticles &prtcls
)
:
boundarySphereParticles(boundary, prtcls)
{
}
bool pFlow::processorBoundarySphereParticles::acceleration(const timeInfo &ti, const realx3& g)
{
#ifndef BoundaryModel1
if(isBoundaryMaster())
{
auto thisIndex = thisBoundaryIndex();
auto mass = Particles().mass().BoundaryField(thisIndex).neighborProcField().deviceView();
auto I = Particles().I().BoundaryField(thisIndex).neighborProcField().deviceView();
auto cf = Particles().contactForce().BoundaryField(thisIndex).neighborProcField().deviceView();
auto ct = Particles().contactTorque().BoundaryField(thisIndex).neighborProcField().deviceView();
auto acc = Particles().accelertion().BoundaryField(thisIndex).neighborProcField().deviceView();
auto rAcc = Particles().rAcceleration().BoundaryField(thisIndex).neighborProcField().deviceView();
Kokkos::parallel_for(
"processorBoundary::acceleration."+this->boundaryName(),
deviceRPolicyStatic(0,mass.size()),
LAMBDA_HD(uint32 i){
acc[i] = cf[i]/mass[i] + g;
rAcc[i] = ct[i]/I[i];
});
Kokkos::fence();
}
#endif
return true;
}

View File

@ -0,0 +1,38 @@
#ifndef __processorBoundarySphereParticles_hpp__
#define __processorBoundarySphereParticles_hpp__
#include "boundarySphereParticles.hpp"
namespace pFlow
{
class processorBoundarySphereParticles
:
public boundarySphereParticles
{
public:
/// type info
TypeInfo("boundarySphereParticles<MPI,processor>");
processorBoundarySphereParticles(
const boundaryBase &boundary,
sphereParticles& prtcls
);
add_vCtor(
boundarySphereParticles,
processorBoundarySphereParticles,
boundaryBase
);
bool acceleration(const timeInfo& ti, const realx3& g)override;
};
}
#endif

View File

@ -0,0 +1,70 @@
#include "MPIParticleIdHandler.hpp"
#include "procCommunication.hpp"
pFlow::MPI::MPIParticleIdHandler::MPIParticleIdHandler
(
pointStructure& pStruct
)
:
particleIdHandler(pStruct)
{
initialIdCheck();
}
pFlow::Pair<pFlow::uint32, pFlow::uint32>
pFlow::MPI::MPIParticleIdHandler::getIdRange(uint32 nNewParticles)
{
uint32 startId;
if(maxId_==-1)
{
startId = 0;
}
else
{
startId = maxId_+1;
}
uint32 endId = startId+nNewParticles-1;
maxId_ = endId;
return {startId, endId};
}
bool pFlow::MPI::MPIParticleIdHandler::initialIdCheck()
{
/// empty point structure / no particles in simulation
uint32 maxId = -1;
if( !pStruct().empty() )
{
maxId = max( *this );
}
auto maxIdAll = procVector<uint32>(pFlowProcessors());
auto numAll = procVector<uint32>(pFlowProcessors());
auto comm = procCommunication(pFlowProcessors());
comm.collectAllToAll(maxId, maxIdAll);
comm.collectAllToAll(size(),numAll);
uint32 n = 0;
for(uint32 i=0; i<maxIdAll.size(); i++)
{
if( maxIdAll[i]==-1 && numAll[i]!= 0)
{
if(comm.localRank() == i)
{
fillSequence(*this, n);
maxId_ = size()-1 + n;
}
}
else
{
if(comm.localRank() == i)
{
maxId_ = maxIdAll[i];
}
}
n += numAll[i];
}
return true;
}

View File

@ -0,0 +1,60 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __MPIParticleIdHandler_hpp__
#define __MPIParticleIdHandler_hpp__
#include "particleIdHandler.hpp"
namespace pFlow::MPI
{
class MPIParticleIdHandler : public particleIdHandler
{
private:
uint32 maxId_ = -1;
bool initialIdCheck() override;
public:
ClassInfo("particleIdHandler<MPI>");
explicit MPIParticleIdHandler(pointStructure& pStruct);
~MPIParticleIdHandler() override = default;
add_vCtor(
particleIdHandler,
MPIParticleIdHandler,
pointStructure
);
Pair<uint32, uint32> getIdRange(uint32 nNewParticles) override;
uint32 maxId() const override
{
return maxId_;
}
};
}
#endif //__MPIParticleIdHandler_hpp__

View File

@ -467,7 +467,7 @@ pFlow::postprocessData::fieldsDataBase::fieldsDataBase
systemControl& control, systemControl& control,
const dictionary& postDict, const dictionary& postDict,
bool inSimulation, bool inSimulation,
timeValue startTime TimeValueType startTime
) )
: :
time_(control.time()), time_(control.time()),
@ -492,7 +492,7 @@ pFlow::postprocessData::fieldsDataBase::fieldsDataBase
} }
} }
pFlow::timeValue pFlow::postprocessData::fieldsDataBase::currentTime() const pFlow::TimeValueType pFlow::postprocessData::fieldsDataBase::currentTime() const
{ {
return time_.currentTime(); return time_.currentTime();
} }
@ -914,7 +914,7 @@ pFlow::uniquePtr<pFlow::postprocessData::fieldsDataBase>
systemControl& control, systemControl& control,
const dictionary& postDict, const dictionary& postDict,
bool inSimulation, bool inSimulation,
timeValue startTime TimeValueType startTime
) )
{ {
word dbType; word dbType;

View File

@ -78,7 +78,7 @@ private:
anyList allFields_; anyList allFields_;
/// Map to store the last capture time of each field /// Map to store the last capture time of each field
wordMap<timeValue> captureTime_; wordMap<TimeValueType> captureTime_;
/// Reference to the Time object /// Reference to the Time object
Time& time_; Time& time_;
@ -178,7 +178,7 @@ public:
systemControl& control, systemControl& control,
const dictionary& postDict, const dictionary& postDict,
bool inSimulation, bool inSimulation,
timeValue startTime); TimeValueType startTime);
/// no copy constructor /// no copy constructor
fieldsDataBase(const fieldsDataBase&) = delete; fieldsDataBase(const fieldsDataBase&) = delete;
@ -203,7 +203,7 @@ public:
systemControl& control, systemControl& control,
const dictionary& postDict, const dictionary& postDict,
bool inSimulation, bool inSimulation,
timeValue startTime TimeValueType startTime
), ),
(control, postDict, inSimulation, startTime) (control, postDict, inSimulation, startTime)
); );
@ -211,7 +211,7 @@ public:
// - Public Access Functions // - Public Access Functions
/// returns the current time /// returns the current time
timeValue currentTime()const; TimeValueType currentTime()const;
/// const ref to object Time /// const ref to object Time
const Time& time()const const Time& time()const
@ -282,7 +282,7 @@ public:
/// Get the next avaiable time folder after the current time folder /// Get the next avaiable time folder after the current time folder
/// This is only used for post-simulation processing /// This is only used for post-simulation processing
virtual virtual
timeValue getNextTimeFolder()const TimeValueType getNextTimeFolder()const
{ {
return -1.0; return -1.0;
} }
@ -291,7 +291,7 @@ public:
/// This is used only for post-simulation processing /// This is used only for post-simulation processing
/// @returns the time value of the next folder. /// @returns the time value of the next folder.
virtual virtual
timeValue setToNextTimeFolder() TimeValueType setToNextTimeFolder()
{ {
return -1.0; return -1.0;
} }
@ -300,7 +300,7 @@ public:
/// This is used only for post-simulation processing /// This is used only for post-simulation processing
/// @returns the time value of the skipped folder /// @returns the time value of the skipped folder
virtual virtual
timeValue skipNextTimeFolder() TimeValueType skipNextTimeFolder()
{ {
return -1.0; return -1.0;
} }
@ -316,7 +316,7 @@ public:
systemControl& control, systemControl& control,
const dictionary& postDict, const dictionary& postDict,
bool inSimulation, bool inSimulation,
timeValue startTime); TimeValueType startTime);
}; };
} // namespace pFlow::postprocessData } // namespace pFlow::postprocessData

View File

@ -49,7 +49,7 @@ pFlow::postprocessData::simulationFieldsDataBase::simulationFieldsDataBase
systemControl &control, systemControl &control,
const dictionary& postDict, const dictionary& postDict,
bool inSimulation, bool inSimulation,
timeValue startTime TimeValueType startTime
) )
: :
fieldsDataBase(control, postDict, inSimulation, startTime), fieldsDataBase(control, postDict, inSimulation, startTime),

View File

@ -60,7 +60,7 @@ public:
systemControl& control, systemControl& control,
const dictionary& postDict, const dictionary& postDict,
bool inSimulation, bool inSimulation,
timeValue startTime); TimeValueType startTime);
~simulationFieldsDataBase() override = default; ~simulationFieldsDataBase() override = default;

View File

@ -86,12 +86,12 @@ private:
word fieldName_; word fieldName_;
/// Timestamp when mask was last updated (-1 indicates never updated) /// Timestamp when mask was last updated (-1 indicates never updated)
timeValue lastUpdated_ = -1; TimeValueType lastUpdated_ = -1;
/// Updates the mask based on current field values if needed, returns true if successful /// Updates the mask based on current field values if needed, returns true if successful
bool updateMask() bool updateMask()
{ {
timeValue t = database().currentTime(); TimeValueType t = database().currentTime();
if( equal( t, lastUpdated_)) return true; if( equal( t, lastUpdated_)) return true;
@ -206,11 +206,11 @@ private:
std::vector<bool> mask_; std::vector<bool> mask_;
timeValue lastUpdated_ = -1; TimeValueType lastUpdated_ = -1;
bool updateMask() bool updateMask()
{ {
timeValue t = database().currentTime(); TimeValueType t = database().currentTime();
if( equal( t, lastUpdated_)) return true; if( equal( t, lastUpdated_)) return true;

View File

@ -46,7 +46,7 @@ inline
bool writeField bool writeField
( (
iOstream& os, iOstream& os,
timeValue t, TimeValueType t,
const regionField<T> field, const regionField<T> field,
uint32 threshold, uint32 threshold,
const T& defValue=T{} const T& defValue=T{}

View File

@ -52,7 +52,7 @@ template<typename T>
inline bool writeField inline bool writeField
( (
iOstream& os, iOstream& os,
timeValue t, TimeValueType t,
const regionField<T>& field, const regionField<T>& field,
const regionPoints& regPoints, const regionPoints& regPoints,
const T& invalidVal = T{} const T& invalidVal = T{}

View File

@ -27,7 +27,7 @@ Licence:
pFlow::postprocessData::postprocessData::postprocessData pFlow::postprocessData::postprocessData::postprocessData
( (
const systemControl &control, const systemControl &control,
timeValue startTime TimeValueType startTime
) )
: :
auxFunctions(control), auxFunctions(control),
@ -50,7 +50,7 @@ pFlow::postprocessData::postprocessData::postprocessData
if( !dict_.fileExist() || !dict_.headerOk() ) if( !dict_.fileExist() || !dict_.headerOk() )
{ {
WARNING<<"You requested postprocessData function while," WARNING<<"You requested postprocessData function while,"
<<" the dictionary system/postprocessDataDict does not exist." <<" the dictionary settings/postprocessDataDict does not exist."
<<" This feature is disabled in the current run."<<END_WARNING; <<" This feature is disabled in the current run."<<END_WARNING;
return; return;
} }

View File

@ -85,7 +85,7 @@ public:
/// this constructor is used when postprocesing is active /// this constructor is used when postprocesing is active
/// during simulation. /// during simulation.
/// @param control const reference to systemControl /// @param control const reference to systemControl
postprocessData(const systemControl& control, timeValue startTime = -1.0); postprocessData(const systemControl& control, TimeValueType startTime = -1.0);
~postprocessData()override = default; ~postprocessData()override = default;

View File

@ -1,4 +1,3 @@
list(APPEND SourceFiles list(APPEND SourceFiles
types/basicTypes/bTypesFunctions.cpp types/basicTypes/bTypesFunctions.cpp
types/basicTypes/Logical.cpp types/basicTypes/Logical.cpp
@ -119,35 +118,27 @@ set(link_libs)
set(link_libs Kokkos::kokkos tbb) set(link_libs Kokkos::kokkos tbb)
# for MPI parallelization
if(pFlow_Build_MPI) if(pFlow_Build_MPI)
set(Zoltan_Install_DIR) # Include the Zoltan installation check macro
if(DEFINED ENV{Zoltan_DIR}) include(${CMAKE_SOURCE_DIR}/cmake/zoltanInstallCheck.cmake)
set(Zoltan_Install_DIR $ENV{Zoltan_DIR})
else()
set(Zoltan_Install_DIR $ENV{HOME}/PhasicFlow/Zoltan)
endif()
message(STATUS "Zoltan install directory is ${Zoltan_Install_DIR}")
set(ZOLTAN_PREFIX "${Zoltan_Install_DIR}" CACHE STRING "Zoltan install directory") # set the Zoltan Directory and check/build if needed
set(Zoltan_Install_DIR ${CMAKE_SOURCE_DIR}/thirdParty/Zoltan)
find_path(ZOLTAN_INCLUDE_DIR zoltan.h PATHS "${ZOLTAN_PREFIX}/include") # Call the macro to find or build Zoltan
zoltan_find_or_build(${Zoltan_Install_DIR})
message(STATUS "Zoltan include path: ${ZOLTAN_INCLUDE_DIR}")
find_library(ZOLTAN_LIBRARY zoltan PATHS "${ZOLTAN_PREFIX}/lib")
message(STATUS "Zoltan lib path: ${ZOLTAN_LIBRARY}")
list(APPEND SourceFiles list(APPEND SourceFiles
MPIParallelization/domain/partitioning/partitioning.cpp MPIParallelization/domain/partitioning/partitioning.cpp
MPIParallelization/domain/partitioning/rcb1DPartitioning.cpp MPIParallelization/domain/partitioning/rcb1DPartitioning.cpp
MPIParallelization/domain/MPISimulationDomain.cpp MPIParallelization/domain/MPISimulationDomain.cpp
MPIParallelization/dataIOMPI/dataIOMPIs.cpp MPIParallelization/dataIOMPI/dataIOMPIs.cpp
MPIParallelization/MPI/procCommunication.cpp MPIParallelization/MPI/procCommunication.cpp
MPIParallelization/MPI/scatteredMasterDistributeChar.cpp MPIParallelization/MPI/scatteredMasterDistributeChar.cpp
MPIParallelization/pointStructure/boundaries/boundaryProcessor.cpp MPIParallelization/pointStructure/boundaries/boundaryProcessor.cpp
MPIParallelization/pointField/processorBoundaryFields.cpp MPIParallelization/pointField/processorBoundaryFields.cpp
) )
list(APPEND link_libs MPI::MPI_CXX ${ZOLTAN_LIBRARY} -lm ) list(APPEND link_libs MPI::MPI_CXX ${ZOLTAN_LIBRARY} -lm )
@ -155,8 +146,10 @@ if(pFlow_Build_MPI)
target_include_directories(phasicFlow PUBLIC ./globals ${ZOLTAN_INCLUDE_DIR}) target_include_directories(phasicFlow PUBLIC ./globals ${ZOLTAN_INCLUDE_DIR})
else() else()
pFlow_add_library_install(phasicFlow SourceFiles link_libs)
pFlow_add_library_install(phasicFlow SourceFiles link_libs)
target_include_directories(phasicFlow PUBLIC ./globals) target_include_directories(phasicFlow PUBLIC ./globals)
endif() endif()

View File

@ -0,0 +1,106 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __gatherMaster_hpp__
#define __gatherMaster_hpp__
#include <numeric>
#include "procCommunication.hpp"
#include "stdVectorHelper.hpp"
namespace pFlow::MPI
{
template<typename T>
class gatherMaster
:
public procCommunication
{
protected:
std::vector<T> buffer_;
public:
gatherMaster(const localProcessors& procs)
:
procCommunication(procs)
{}
span<T> getData()
{
if(this->localMaster())
return span<T>( buffer_.data(), buffer_.size());
else
return span<T>(nullptr, 0);
}
std::vector<T> moveData()
{
return std::move(buffer_);
}
bool gatherData(span<T> data)
{
int thisN = data.size();
bool succss;
procVector<int> numElems(this->processors(), true);
procVector<int> displ(this->processors(), true);
if( !this->collectAllToMaster(thisN, numElems) )
{
fatalErrorInFunction<<
"error in collecting number of elements from processors"<<endl;
return false;
}
auto totalN = std::accumulate(
numElems.begin(),
numElems.end(),
static_cast<int>(0));
buffer_.resize(totalN);
std::exclusive_scan(
numElems.begin(),
numElems.end(),
displ.begin(),
0);
auto bufferSpan = span<T>(this->buffer_.data(),this->buffer_.size() );
return CheckMPI(
Gatherv(
data,
bufferSpan,
numElems.getSpan(),
displ.getSpan(),
this->localMasterNo(),
this->localCommunicator()),
false);
}
};
}
#endif

View File

@ -0,0 +1,463 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __mpiCommunication_H__
#define __mpiCommunication_H__
#include "mpiTypes.hpp"
#include "types.hpp"
#include "span.hpp"
namespace pFlow::MPI
{
extern DataType realx3Type__;
extern DataType realx4Type__;
extern DataType int32x3Type__;
extern DataType uint32x3Type__;
template<typename T>
auto constexpr Type()
{
return MPI_BYTE;
}
template<typename T>
auto constexpr sFactor()
{
return sizeof(T);
}
template<char>
auto constexpr Type()
{
return MPI_CHAR;
}
template<char>
auto constexpr sFactor()
{
return 1;
}
template<short>
auto constexpr Type()
{
return MPI_SHORT;
}
template<short>
auto constexpr sFactor()
{
return 1;
}
template<unsigned short>
auto constexpr Type()
{
return MPI_UNSIGNED_SHORT;
}
template<unsigned short>
auto constexpr sFactor()
{
return 1;
}
template<int>
auto constexpr Type()
{
return MPI_INT;
}
template<int>
auto constexpr sFactor()
{
return 1;
}
template<>
auto constexpr Type<unsigned int>()
{
return MPI_UNSIGNED;
}
template<>
auto constexpr sFactor<unsigned int>()
{
return 1;
}
template<>
auto constexpr Type<long>()
{
return MPI_LONG;
}
template<>
auto constexpr sFactor<long>()
{
return 1;
}
template<>
auto constexpr Type<unsigned long>()
{
return MPI_UNSIGNED_LONG;
}
template<>
auto constexpr sFactor<unsigned long>()
{
return 1;
}
template<>
auto constexpr Type<float>()
{
return MPI_FLOAT;
}
template<>
auto constexpr sFactor<float>()
{
return 1;
}
template<>
auto constexpr Type<double>()
{
return MPI_DOUBLE;
}
template<>
auto constexpr sFactor<double>()
{
return 1;
}
template<>
inline
auto Type<realx3>()
{
return realx3Type__;
}
template<>
auto constexpr sFactor<realx3>()
{
return 1;
}
template<>
inline
auto Type<realx4>()
{
return realx4Type__;
}
template<>
auto constexpr sFactor<realx4>()
{
return 1;
}
template<>
inline
auto Type<int32x3>()
{
return int32x3Type__;
}
template<>
auto constexpr sFactor<int32x3>()
{
return 1;
}
template<>
inline
auto Type<uint32x3>()
{
return uint32x3Type__;
}
template<>
auto constexpr sFactor<uint32x3>()
{
return 1;
}
/*inline
auto createByteSequence(int sizeOfElement)
{
DataType newType;
MPI_Type_contiguous(sizeOfElement, MPI_CHAR, &newType);
MPI_Type_commit(&newType);
return newType;
}*/
inline
auto TypeCommit(DataType* type)
{
return MPI_Type_commit(type);
}
inline
auto TypeFree(DataType* type)
{
return MPI_Type_free(type);
}
template<typename T>
inline auto getCount(Status* status, int& count)
{
int lCount;
auto res = MPI_Get_count(status, Type<T>(), &lCount);
count = lCount/sFactor<T>();
return res;
}
template<typename T>
inline int convertIndex(const int& ind)
{
return ind*sFactor<T>();
}
template<typename T>
inline auto send(span<T> data, int dest, int tag, Comm comm)
{
return MPI_Send(
data.data(),
sFactor<T>()*data().size(),
Type<T>(),
dest,
tag,
comm);
}
template<typename T>
inline auto send(const T& data, int dest, int tag, Comm comm)
{
return MPI_Send(
&data,
sFactor<T>(),
Type<T>(),
dest,
tag,
comm);
}
template<typename T>
inline auto Isend(span<T> data, int dest, int tag, Comm comm, Request* req)
{
return MPI_Isend(
data.data(),
sFactor<T>()*data.size(),
Type<T>(),
dest,
tag,
comm,
req);
}
template<typename T>
inline auto Isend(const T& data, int dest, int tag, Comm comm, Request* req)
{
return MPI_Isend(
&data,
sFactor<T>(),
Type<T>(),
dest,
tag,
comm,
req);
}
template<typename T>
inline auto recv(span<T> data, int source, int tag, Comm comm, Status *status)
{
return MPI_Recv(
data.data(),
sFactor<T>()*data.size(),
Type<T>(),
source,
tag,
comm,
status);
}
template<typename T>
inline auto recv(T& data, int source, int tag, Comm comm, Status *status)
{
return MPI_Recv(
&data,
sFactor<T>(),
Type<T>(),
source,
tag,
comm,
status);
}
template<typename T>
inline auto Irecv(T& data, int source, int tag, Comm comm, Request* req)
{
return MPI_Irecv(
&data,
sFactor<T>(),
Type<T>(),
source,
tag,
comm,
req);
}
template<typename T>
inline auto Irecv(span<T> data, int source, int tag, Comm comm, Request* req)
{
return MPI_Irecv(
data.data(),
sFactor<T>()*data.size(),
Type<T>(),
source,
tag,
comm,
req);
}
template<typename T>
inline auto scan(T sData, T& rData, Comm comm, Operation op = SumOp)
{
return MPI_Scan(&sData, &rData, sFactor<T>()*1, Type<T>(), op , comm );
}
// gathering one scalar data to root processor
template<typename T>
inline auto gather(T sendData, span<T>& recvData, int root, Comm comm)
{
return MPI_Gather(
&sendData,
sFactor<T>()*1,
Type<T>(),
recvData.data(),
sFactor<T>()*1,
Type<T>(),
root,
comm);
}
template<typename T>
inline auto allGather(T sendData, span<T>& recvData, Comm comm)
{
return MPI_Allgather(
&sendData,
sFactor<T>()*1,
Type<T>(),
recvData.data(),
sFactor<T>()*1,
Type<T>(),
comm);
}
template<typename T>
inline auto scatter(span<T> sendData, T& recvData, int root, Comm comm)
{
return MPI_Scatter(
sendData.data(),
sFactor<T>()*1,
Type<T>(),
&recvData,
sFactor<T>()*1,
Type<T>(),
root,
comm);
}
template<typename T>
inline auto Bcast(T& sendData, int root, Comm comm)
{
return MPI_Bcast(
&sendData, sFactor<T>()*1, Type<T>(), root, comm);
}
template<typename T>
bool typeCreateIndexedBlock(
span<int32> index,
DataType &newType)
{
auto res = MPI_Type_create_indexed_block(
index.size(),
sFactor<T>(),
index.data(),
Type<T>(),
&newType);
if(res == Success)
{
TypeCommit(&newType);
}
else
{
return false;
}
return true;
}
template<typename T>
inline auto Gatherv
(
span<T> sendData,
span<T>& recvData,
span<int> recvCounts,
span<int> displs,
int root,
Comm comm)
{
return MPI_Gatherv(
sendData.data(),
sendData.size()*sFactor<T>(),
Type<T>(),
recvData.data(),
recvCounts.data(),
displs.data(),
Type<T>(),
root,
comm
);
}
inline auto Wait(Request* request, Status* status)
{
return MPI_Wait(request, status);
}
}
#endif //__mpiCommunication_H__

View File

@ -0,0 +1,71 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __mpiTypes_H__
#define __mpiTypes_H__
#include <mpi.h>
namespace pFlow::MPI
{
// types
using Comm = MPI_Comm;
using Group = MPI_Group;
using Status = MPI_Status;
using Offset = MPI_Offset;
using Request = MPI_Request;
using Operation = MPI_Op;
using Information = MPI_Info;
using DataType = MPI_Datatype;
inline Comm CommWorld = MPI_COMM_WORLD;
// all nulls
inline auto ProcNull = MPI_PROC_NULL;
inline auto InfoNull = MPI_INFO_NULL;
inline auto RequestNull = MPI_REQUEST_NULL;
inline auto StatusIgnore = MPI_STATUS_IGNORE;
inline auto StatusesIgnore = MPI_STATUSES_IGNORE;
inline auto FileNull = MPI_FILE_NULL;
inline Comm CommNull = MPI_COMM_NULL;
inline auto TypeNull = MPI_DATATYPE_NULL;
// errors
inline const auto Success = MPI_SUCCESS;
inline const auto ErrOp = MPI_ERR_OP;
inline const auto SumOp = MPI_SUM;
inline const auto MaxOp = MPI_MAX;
inline const auto MinOp = MPI_MIN;
inline const size_t MaxNoProcessors = 2048;
}
#endif //__mpiTypes_H__

View File

@ -0,0 +1,30 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#include "procCommunication.hpp"
pFlow::MPI::procCommunication::procCommunication
(
const localProcessors& proc
)
:
processors_(proc)
{}

View File

@ -0,0 +1,178 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __procCommunication_hpp__
#define __procCommunication_hpp__
#include "procVector.hpp"
#include "localProcessors.hpp"
#include "mpiCommunication.hpp"
namespace pFlow::MPI
{
class procCommunication
{
protected:
const localProcessors& processors_;
public:
procCommunication(const localProcessors& proc);
~procCommunication()=default;
/// @brief Tell if this processor is master processor in the local
/// communicator
/// @return true if this processor is master
inline
const auto& processors()const
{
return processors_;
}
inline
bool localMaster()const
{
return processors_.localMaster();;
}
inline
auto localSize()const
{
return processors_.localSize();
}
inline
auto localRank()const
{
return processors_.localRank();
}
inline
auto localCommunicator()const
{
return processors_.localCommunicator();
}
/// @brief return the master number in the local communicator
auto localMasterNo()const
{
return processors_.localMasterNo();
}
/// Send a single val to all processors including itself (local communicator)
template<typename T>
std::pair<T,bool> distributeMasterToAll(const T& val)
{
T retVal = val;
auto res = CheckMPI(
Bcast(retVal, localMasterNo(),localCommunicator() ),
false);
return {retVal, res};
}
/// @brief Send a single value to all processor including master (in local communicator)
/// @param val value to be sent
/// @param recvVal recieved value
/// @return true if successful and false if fail
template<typename T>
bool distributeMasterToAll(const T& val, T& recvVal)
{
recvVal = val;
return CheckMPI(
Bcast(recvVal, localMasterNo(), localCommunicator()),
false);
}
/// @brief values in the vector (size is equal to number of
// processors in local communicator) to each processor
template<typename T>
std::pair<T,bool> distributeMasterToAll(const procVector<T>& vals)
{
T val;
auto vec = vals.getSpan();
auto res = CheckMPI(
scatter(vec, val, localMasterNo(), localCommunicator()),
false);
return {val, res};
}
/// @brief Each processor in the local communicator calls this funtion with a value
/// and the values are distributed among all processors
template<typename T>
std::pair<procVector<T>, bool> collectAllToAll(const T& val)
{
procVector<T> allVec(processors_);
auto vec = allVec.getSpan();
auto res = CheckMPI(
allGather(val, vec, localCommunicator()),
false);
return {allVec, res};
}
/// @brief Each processor in the local communicator calls this funtion with a value
/// and the values are distributed among all processors
template<typename T>
bool collectAllToAll(const T& val, procVector<T>& allVec)
{
auto vec = allVec.getSpan();
return CheckMPI(
allGather(val, vec, localCommunicator()),
false);
}
/// @brief Each processor in the local communicator calls this function with a value
/// and all values are collected in the master processor
template<typename T>
std::pair<procVector<T>,bool> collectAllToMaster(const T& val)
{
// only on master processor
procVector<T> masterVec(processors_, true);
auto masterSpan = masterVec.getSpan();
auto res = CheckMPI(
gather(val,masterSpan, localMasterNo(), localCommunicator()),
false);
return {masterVec, res};
}
template<typename T>
bool collectAllToMaster(const T& val, procVector<T>& masterVec)
{
// only on master processor
auto [vec, res] = collectAllToMaster(val);
masterVec = vec;
return res;
}
}; //procCommunication
} // pFlow::MPI
#endif //__procCommunication_hpp__

View File

@ -0,0 +1,199 @@
#ifndef __procVector_hpp__
#define __procVector_hpp__
// from PhasicFlow
#include "localProcessors.hpp"
#include "span.hpp"
#include "streams.hpp"
#include "IOPattern.hpp"
#include "mpiTypes.hpp"
namespace pFlow::MPI
{
template<typename T>
class procVector
:
public std::vector<T>
{
public:
using ProcVectorType = procVector<T>;
using VectorType = std::vector<T>;
protected:
int rank_ = 0;
bool isMaster_ = false;
using VectorType::reserve;
using VectorType::resize;
using VectorType::assign;
using VectorType::clear;
using VectorType::erase;
public:
procVector(
const localProcessors& procs,
bool onlyMaster = false)
:
rank_(procs.localRank()),
isMaster_(procs.localMaster())
{
if( onlyMaster && !isMaster_ ) return;
this->reserve(procs.localSize());
this->resize(procs.localSize());
}
procVector(
const T& val,
const localProcessors& procs,
bool onlyMaster = false)
:
procVector(procs, onlyMaster)
{
std::fill(this->begin(), this->end(), val);
}
procVector(const T& val, const procVector& src)
{
this->reserve(src.size());
this->resize(src.size());
std::fill(this->begin(), this->end(), val);
}
procVector(const localProcessors& procs, const VectorType& src)
:
procVector(procs)
{
if(src.size()!= this->size())
{
fatalErrorInFunction<<
"Size of std::vector and procVector does not match in construction"<<endl;
fatalExit;
}
this->assign(src.begin(), src.end());
}
procVector(const procVector&) = default;
procVector(procVector&&) = default;
procVector& operator=(const procVector&) = default;
procVector& operator=(procVector&&) = default;
procVector& operator=(const VectorType& src)
{
if(src.size() != this->size())
{
fatalErrorInFunction<<
"Size of std::vector and procVector does not match in copy assignment"<<endl;
fatalExit;
}
static_cast<VectorType&>(*this).operator=(src);
return *this;
}
procVector& operator=(VectorType&& src)
{
if(src.size() != this->size())
{
fatalErrorInFunction<<
"Size of std::vector and procVector does not match in move assignment"
<<endl;
fatalExit;
}
static_cast<VectorType&>(*this).operator=(std::move(src));
return *this;
}
procVector(const localProcessors& procs, VectorType&& src)
:
VectorType(std::move(src))
{
if(this->size()!= static_cast<size_t>(procs.localSize()))
{
fatalErrorInFunction<<
"Size of std::vector and procVector does not match in move"<<endl;
fatalExit;
}
isMaster_ = procs.localMaster();
rank_ = procs.localRank();
}
~procVector()=default;
inline
auto& thisValue()
{
return VectorType::operator[](rank_);
}
inline
const auto& thisValue()const
{
return VectorType::operator[](rank_);
}
inline
auto size()const
{
return VectorType::size();
}
inline
auto rank()const
{
return rank_;
}
inline
auto getSpan()
{
return span<T>(this->data(), this->size());
}
inline
auto getSpan()const
{
return span<T>(const_cast<T*>(this->data()), this->size());
}
bool write(
iOstream& os,
const IOPattern& iop ) const
{
return writeStdVector(os, *this, iop);
}
};
template<typename T>
inline iOstream& operator << (iOstream& os, const procVector<T>& ovec )
{
if( !ovec.write(os, IOPattern::AllProcessorsDifferent) )
{
ioErrorInFile(os.name(), os.lineNumber());
fatalExit;
}
return os;
}
}
#endif

View File

@ -0,0 +1,158 @@
template<typename T>
pFlow::MPI::scatteredMasterDistribute<T>::scatteredMasterDistribute
(
const localProcessors& procs
)
:
procCommunication(procs),
indexedMap_(TypeNull, procs, true)
{
}
template<typename T>
bool pFlow::MPI::scatteredMasterDistribute<T>::setDataMaps
(
procVector<span<uint32>>& maps
)
{
if(this->localMaster())
{
if(maps.size() != this->localSize() )
{
fatalErrorInFunction<<"size mismatch";
return false;
}
std::vector<int32> index;
freeIndexedMap();
for(auto proc = 0; proc< maps.size(); proc++)
{
auto m = maps[proc];
index.resize(m.size());
for(auto i=0; i<index.size(); i++ )
{
index[i] = m[i];
}
DataType dt;
if(! typeCreateIndexedBlock<T>( makeSpan(index), dt))
{
fatalErrorInFunction;
return false;
}
else
{
indexedMap_[proc] = dt;
}
}
}
return true;
}
template<typename T>
bool pFlow::MPI::scatteredMasterDistribute<T>::setDataMaps
(
procVector<span<int32>>& maps
)
{
if(this->localMaster())
{
if(maps.size() != this->localSize() )
{
fatalErrorInFunction<<"size mismatch";
return false;
}
freeIndexedMap();
for(auto proc = 0; proc< maps.size(); proc++)
{
DataType dt;
if( !typeCreateIndexedBlock<T>(maps[proc], dt) )
{
fatalErrorInFunction;
return false;
}
else
{
indexedMap_[proc] = dt;
}
}
}
return true;
}
template<typename T>
void pFlow::MPI::scatteredMasterDistribute<T>::freeIndexedMap()
{
for(auto i=0; i<indexedMap_.size(); i++)
{
if(indexedMap_[i]!= TypeNull)
{
TypeFree(&indexedMap_[i]);
indexedMap_[i] = TypeNull;
}
}
}
template<typename T>
bool pFlow::MPI::scatteredMasterDistribute<T>::distribute
(
span<T>& sendBuff,
span<T>& recvb
)
{
procVector<Request> requests(processors(), true);
procVector<Status> statuses(processors(), true);
if(this->localMaster())
{
bool res = true;
for(int32 i = indexedMap_.size()-1; i>=0; i--)
{
res = res&&CheckMPI(
MPI_Issend(
sendBuff.data(),
1,
indexedMap_[i],
i,
0,
localCommunicator(),
&requests[i]),
false);
}
if(!res)return false;
}
Status stat;
bool sucss = CheckMPI(
MPI_Recv(
recvb.data(),
recvb.size()*sFactor<T>(),
Type<T>(),
0,
0,
localCommunicator(),
&stat),
false);
if(this->localMaster())
{
CheckMPI(
MPI_Waitall(requests.size(), requests.data(), statuses.data()),
false
);
}
return sucss;
}

View File

@ -0,0 +1,67 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __scatteredMasterDistribute_hpp__
#define __scatteredMasterDistribute_hpp__
#include "mpiCommunication.hpp"
#include "procCommunication.hpp"
#include "procVector.hpp"
#include "stdVectorHelper.hpp"
#include "streams.hpp"
namespace pFlow::MPI
{
template<typename T>
class scatteredMasterDistribute : public procCommunication
{
protected:
procVector<DataType> indexedMap_;
void freeIndexedMap();
public:
scatteredMasterDistribute(const localProcessors& procs);
~scatteredMasterDistribute()
{
freeIndexedMap();
}
scatteredMasterDistribute(const scatteredMasterDistribute&) = delete;
scatteredMasterDistribute& operator=(const scatteredMasterDistribute&) =
delete;
bool setDataMaps(procVector<span<uint32>>& maps);
bool setDataMaps(procVector<span<int32>>& maps);
bool distribute(span<T>& sendBuff, span<T>& recvb);
};
} // pFlow::MPI
#include "scatteredMasterDistribute.cpp"
#endif //__scatteredMasterDistribute_hpp__

View File

@ -0,0 +1,166 @@
#include "scatteredMasterDistributeChar.hpp"
pFlow::MPI::scatteredMasterDistribute<char>::scatteredMasterDistribute
(
size_t sizeOfElement,
const localProcessors& procs
)
:
procCommunication(procs),
indexedMap_(TypeNull, procs, true),
sizeOfElement_(sizeOfElement)
{}
bool pFlow::MPI::scatteredMasterDistribute<char>::setDataMaps
(
procVector<span<uint32>>& maps
)
{
if(this->localMaster())
{
if(maps.size() != this->localSize() )
{
fatalErrorInFunction<<"size mismatch";
return false;
}
freeIndexedMap();
std::vector<MPI_Aint> index;
for(auto proc = 0; proc< maps.size(); proc++)
{
auto m = maps[proc];
index.resize(m.size());
for(auto i=0; i<index.size(); i++ )
{
index[i] = m[i]*sizeOfElement_;
}
DataType dt;
MPI_Type_create_hindexed_block(
m.size(),
sizeOfElement_,
index.data(),
MPI_BYTE,
&dt);
MPI_Type_commit(&dt);
indexedMap_[proc] = dt;
}
}
return true;
}
bool pFlow::MPI::scatteredMasterDistribute<char>::setDataMaps
(
procVector<span<int32>>& maps
)
{
if(this->localMaster())
{
if(maps.size() != this->localSize() )
{
fatalErrorInFunction<<"size mismatch";
return false;
}
std::vector<MPI_Aint> index;
freeIndexedMap();
for(auto proc = 0; proc< maps.size(); proc++)
{
auto m = maps[proc];
index.resize(m.size());
for(auto i=0; i<index.size(); i++ )
{
index[i] = m[i]*sizeOfElement_;
}
DataType dt;
MPI_Type_create_hindexed_block(
index.size(),
sizeOfElement_,
index.data(),
MPI_CHAR,
&dt);
MPI_Type_commit(&dt);
indexedMap_[proc] = dt;
}
}
return true;
}
void pFlow::MPI::scatteredMasterDistribute<char>::freeIndexedMap()
{
for(auto i=0; i<indexedMap_.size(); i++)
{
if(indexedMap_[i]!= TypeNull)
{
TypeFree(&indexedMap_[i]);
indexedMap_[i] = TypeNull;
}
}
}
bool pFlow::MPI::scatteredMasterDistribute<char>::distribute
(
span<char>& sendBuff,
span<char>& recvb
)
{
procVector<Request> requests(processors(), true);
procVector<Status> statuses(processors(), true);
if(this->localMaster())
{
bool res = true;
for(int32 i = indexedMap_.size()-1; i>=0; i--)
{
res = res&&CheckMPI(
MPI_Issend(
sendBuff.data(),
1,
indexedMap_[i],
i,
0,
localCommunicator(),
&requests[i]),
false);
}
if(!res)return false;
}
Status stat;
bool sucss = CheckMPI(
MPI_Recv(
recvb.data(),
recvb.size(),
MPI_CHAR,
0,
0,
localCommunicator(),
&stat),
true);
if(this->localMaster())
{
CheckMPI(
MPI_Waitall(requests.size(), requests.data(), statuses.data()),
false
);
}
return sucss;
}

View File

@ -0,0 +1,66 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __scatteredMasterDistributeChar_hpp__
#define __scatteredMasterDistributeChar_hpp__
#include "scatteredMasterDistribute.hpp"
namespace pFlow::MPI
{
template<>
class scatteredMasterDistribute<char> : public procCommunication
{
protected:
procVector<DataType> indexedMap_;
size_t sizeOfElement_;
void freeIndexedMap();
public:
scatteredMasterDistribute(
size_t sizeOfElement,
const localProcessors& procs
);
~scatteredMasterDistribute()
{
freeIndexedMap();
}
scatteredMasterDistribute(const scatteredMasterDistribute&) = delete;
scatteredMasterDistribute& operator=(const scatteredMasterDistribute&) =
delete;
bool setDataMaps(procVector<span<uint32>>& maps);
bool setDataMaps(procVector<span<int32>>& maps);
bool distribute(span<char>& sendBuff, span<char>& recvb);
};
} // pFlow::MPI
#endif //__scatteredMasterDistributeChar_hpp__

View File

@ -0,0 +1,52 @@
template<typename T>
bool pFlow::MPI::dataIOMPI<T>::gatherData(span<T> data )
{
if(this->ioPattern_.isAllProcessorsDifferent())
{
this->bufferSpan_ = data;
return true;
}
if( this->ioPattern_.isMasterProcessorDistribute())
{
auto gatherT = pFlow::MPI::gatherMaster<T>(pFlowProcessors());
if(!gatherT.gatherData(data))
{
fatalErrorInFunction<<"Error in gathering data to master"<<endl;
return false;
}
this->buffer_ = gatherT.moveData();
this->bufferSpan_ = span<T>(this->buffer_.data(),this->buffer_.size() );
return true;
}
if( this->ioPattern_.isMasterProcessorOnly() || this->ioPattern_.isAllProcessorSimilar() )
{
if( this->ioPattern_.isMaster() )
{
this->bufferSpan_ = data;
return true;
}
else
{
this->bufferSpan_ = span<T>(nullptr, 0);
return true;
}
}
return false;
}
template<typename T>
pFlow::MPI::dataIOMPI<T>::dataIOMPI(const IOPattern& iop)
:
dataIO<T>(iop)
{}

View File

@ -0,0 +1,58 @@
#ifndef __datIOMPI_hpp__
#define __datIOMPI_hpp__
#include "dataIO.hpp"
#include "pFlowProcessors.hpp"
#include "gatherMaster.hpp"
namespace pFlow::MPI
{
template<typename T>
class dataIOMPI
:
public dataIO<T>
{
public:
using DataIOType = dataIO<T>;
using DataIOMPIType = dataIOMPI<T>;
protected:
bool gatherData(span<T> data ) override;
public:
TypeInfoTemplate111("dataIO",T,"MPI");
explicit dataIOMPI(const IOPattern& iop);
dataIOMPI(const dataIOMPI&) = default;
dataIOMPI(dataIOMPI&&) = default;
dataIOMPI& operator=(const dataIOMPI&) = default;
dataIOMPI& operator=(dataIOMPI&&) = default;
~dataIOMPI() = default;
add_vCtor
(
DataIOType,
DataIOMPIType,
IOPattern
);
}; //dataIOMPI
} //namespace pFlow::MPI
#include "dataIOMPI.cpp"
#endif //__datIOMPI_hpp__

View File

@ -0,0 +1,27 @@
#include "types.hpp"
#include "dataIOMPI.hpp"
template class pFlow::MPI::dataIOMPI<pFlow::uint8>;
template class pFlow::MPI::dataIOMPI<pFlow::int8>;
template class pFlow::MPI::dataIOMPI<pFlow::int32>;
template class pFlow::MPI::dataIOMPI<pFlow::int64>;
template class pFlow::MPI::dataIOMPI<pFlow::uint32>;
template class pFlow::MPI::dataIOMPI<pFlow::uint32x3>;
template class pFlow::MPI::dataIOMPI<pFlow::uint64>;
template class pFlow::MPI::dataIOMPI<pFlow::size_t>;
template class pFlow::MPI::dataIOMPI<pFlow::real>;
template class pFlow::MPI::dataIOMPI<pFlow::realx3>;
template class pFlow::MPI::dataIOMPI<pFlow::realx4>;
template class pFlow::MPI::dataIOMPI<pFlow::word>;

View File

@ -0,0 +1,431 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#include "MPISimulationDomain.hpp"
#include "systemControl.hpp"
#include "rcb1DPartitioning.hpp"
#include "scatteredMasterDistribute.hpp"
#include "scatteredMasterDistributeChar.hpp"
pFlow::MPI::MPISimulationDomain::MPISimulationDomain(systemControl& control)
:
simulationDomain(control),
communication_(pFlowProcessors()),
subDomainsAll_(pFlowProcessors()),
numPointsAll_(pFlowProcessors()),
domainPartitioning_( makeUnique<rcb1DPartitioning>(subDict("decomposition"), globalBox()))
{}
bool pFlow::MPI::MPISimulationDomain::createBoundaryDicts()
{
dictionary& boundaries = this->subDict("boundaries");
dictionary& thisBoundaries = this->subDict(thisBoundariesDictName());
auto neighbors = findPlaneNeighbors();
for(uint32 i=0; i<sizeOfBoundaries(); i++)
{
word bName = bundaryName(i);
auto& bDict = thisBoundaries.subDict(bName);
if( thisDomainActive_ )
{
if( neighbors[i] == -1 )
{
bDict.add("neighborProcessorNo", processors::globalRank());
}
else
{
bDict.add("neighborProcessorNo", neighbors[i]);
bDict.addOrReplace("type", "processor");
}
}
else
{
bDict.add("neighborProcessorNo", processors::globalRank());
bDict.addOrReplace("type", "none");
}
if( bDict.getVal<word>("type") == "periodic")
{
fatalErrorInFunction<<
"periodic is not implemented "<<endl;
fatalExit;
}
}
return true;
}
bool pFlow::MPI::MPISimulationDomain::setThisDomain()
{
thisDomain_ = domain(domainPartitioning_->localBox());
uint32 thisNumPoints = initialNumberInThis();
if(!communication_.collectAllToAll(thisNumPoints, numPointsAll_))
{
fatalErrorInFunction<<
"Failed to distribute number of points."<<endl;
return false;
}
uint32 allNumPoints = std::accumulate(numPointsAll_.begin(), numPointsAll_.end(), 0u);
if( thisNumPoints != 0u )
{
thisDomainActive_ = true;
}
else
{
if(communication_.localMaster()&& allNumPoints == 0u)
thisDomainActive_ = true;
else
thisDomainActive_ = false;
}
if( thisDomainActive_ )
{
bool allInactive = true;
for(int32 i=0; i<communication_.localSize(); i++ )
{
if(i == communication_.localRank() )continue;
if(numPointsAll_[i]!=0)
{
allInactive = false;
break;
}
}
if(allInactive)
{
thisDomain_ = domain(globalBox());
}
}
if(!communication_.collectAllToAll(thisDomain_, subDomainsAll_))
{
fatalErrorInFunction<< "Failed to distributed domains"<<endl;
return false;
}
return true;
}
std::vector<int> pFlow::MPI::MPISimulationDomain::findPlaneNeighbors() const
{
std::vector<int> neighbors(sizeOfBoundaries(), -2);
domain gDomain(globalBox());
// left
if( thisDomain_.left().parallelTouch( gDomain.left() ) )
{
neighbors[0] = -1;
}
for(int i=0; i<subDomainsAll_.size(); i++)
{
if(i == subDomainsAll_.rank())continue;
if( thisDomain_.left().parallelTouch(
subDomainsAll_[i].right()) )
{
neighbors[0] = i;
break;
}
}
// right
if( thisDomain_.right().parallelTouch( gDomain.right() ) )
{
neighbors[1] = -1;
}
for(int i=0; i<subDomainsAll_.size(); i++)
{
if(i == subDomainsAll_.rank())continue;
if( thisDomain_.right().parallelTouch(
subDomainsAll_[i].left()) )
{
neighbors[1] = i;
break;
}
}
// bottom
if( thisDomain_.bottom().parallelTouch( gDomain.bottom() ) )
{
neighbors[2] = -1;
}
for(int i=0; i<subDomainsAll_.size(); i++)
{
if(i == subDomainsAll_.rank())continue;
if( thisDomain_.bottom().parallelTouch(
subDomainsAll_[i].top()) )
{
neighbors[2] = i;
break;
}
}
// top
if( thisDomain_.top().parallelTouch( gDomain.top() ) )
{
neighbors[3] = -1;
}
for(int i=0; i<subDomainsAll_.size(); i++)
{
if(i == subDomainsAll_.rank())continue;
if( thisDomain_.top().parallelTouch(
subDomainsAll_[i].bottom()) )
{
neighbors[3] = i;
break;
}
}
// rear
if( thisDomain_.rear().parallelTouch( gDomain.rear() ) )
{
neighbors[4] = -1;
}
for(int i=0; i<subDomainsAll_.size(); i++)
{
if(i == subDomainsAll_.rank())continue;
if( thisDomain_.rear().parallelTouch(
subDomainsAll_[i].front()) )
{
neighbors[4] = i;
break;
}
}
// front
if( thisDomain_.front().parallelTouch( gDomain.front() ) )
{
neighbors[5] = -1;
}
for(int i=0; i<subDomainsAll_.size(); i++)
{
if(i == subDomainsAll_.rank())continue;
if( thisDomain_.front().parallelTouch(
subDomainsAll_[i].rear()) )
{
neighbors[5] = i;
break;
}
}
return neighbors;
}
bool pFlow::MPI::MPISimulationDomain::initialUpdateDomains(span<realx3> pointPos)
{
pFlagTypeHost flags(pointPos.size(), 0 , pointPos.size());
initialNumPoints_ = pointPos.size();
if( !domainPartitioning_->partition(pointPos, flags) )
{
fatalErrorInFunction<<
"Point partitioning failed."<<endl;
return false;
}
if(!setThisDomain()) return false;
if(!createBoundaryDicts()) return false;
return true;
}
pFlow::uint32 pFlow::MPI::MPISimulationDomain::initialNumberInThis() const
{
uint32 numImport = domainPartitioning_->numberImportThisProc();
uint32 numExport = domainPartitioning_->numberExportThisProc();
return max(initialNumPoints_+ numImport - numExport, 0u);
}
bool pFlow::MPI::MPISimulationDomain::initialTransferBlockData
(
span<char> src,
span<char> dst,
size_t sizeOfElement
)const
{
MPI::scatteredMasterDistribute<char> dataDist(sizeOfElement, pFlowProcessors());
auto lists = domainPartitioning_->allExportLists();
if(!dataDist.setDataMaps( lists ))
{
fatalErrorInFunction;
return false;
}
if(!dataDist.distribute(src, dst))
{
fatalErrorInFunction<<
"Error in distribute"<<endl;
return false;
}
return true;
}
bool pFlow::MPI::MPISimulationDomain::initialTransferBlockData
(
span<realx3> src,
span<realx3> dst
)const
{
MPI::scatteredMasterDistribute<realx3>
dataDist(pFlowProcessors());
auto lists = domainPartitioning_->allExportLists();
if(!dataDist.setDataMaps( lists ))
{
fatalErrorInFunction;
return false;
}
if(!dataDist.distribute(src, dst))
{
fatalErrorInFunction<<
"Error in distribute"<<endl;
return false;
}
return true;
}
bool pFlow::MPI::MPISimulationDomain::initialTransferBlockData
(
span<real> src,
span<real> dst
)const
{
MPI::scatteredMasterDistribute<real>
dataDist(pFlowProcessors());
auto lists = domainPartitioning_->allExportLists();
if(!dataDist.setDataMaps( lists ))
{
fatalErrorInFunction;
return false;
}
if(!dataDist.distribute(src, dst))
{
fatalErrorInFunction<<
"Error in distribute"<<endl;
return false;
}
return true;
}
bool pFlow::MPI::MPISimulationDomain::initialTransferBlockData
(
span<uint32> src,
span<uint32> dst
)const
{
MPI::scatteredMasterDistribute<uint32>
dataDist(pFlowProcessors());
auto lists = domainPartitioning_->allExportLists();
if(!dataDist.setDataMaps( lists ))
{
fatalErrorInFunction;
return false;
}
if(!dataDist.distribute(src, dst))
{
fatalErrorInFunction<<
"Error in distribute"<<endl;
return false;
}
return true;
}
bool pFlow::MPI::MPISimulationDomain::initialTransferBlockData
(
span<int32> src,
span<int32> dst
)const
{
MPI::scatteredMasterDistribute<int32>
dataDist(pFlowProcessors());
auto lists = domainPartitioning_->allExportLists();
if(!dataDist.setDataMaps( lists ))
{
fatalErrorInFunction;
return false;
}
if(!dataDist.distribute(src, dst))
{
fatalErrorInFunction<<
"Error in distribute"<<endl;
return false;
}
return true;
}
pFlow::uint32 pFlow::MPI::MPISimulationDomain::numberToBeImported() const
{
return domainPartitioning_->numberImportThisProc();
}
pFlow::uint32 pFlow::MPI::MPISimulationDomain::numberToBeExported() const
{
return domainPartitioning_->numberExportThisProc();
}
bool
pFlow::MPI::MPISimulationDomain::domainActive() const
{
return thisDomainActive_;
}
const pFlow::domain&
pFlow::MPI::MPISimulationDomain::thisDomain() const
{
return thisDomain_;
}

View File

@ -0,0 +1,116 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __MPISimulationDomain_hpp__
#define __MPISimulationDomain_hpp__
#include "partitioning.hpp"
#include "procCommunication.hpp"
#include "procVector.hpp"
#include "simulationDomain.hpp"
namespace pFlow::MPI
{
class MPISimulationDomain : public simulationDomain
{
private:
/// a processor communcator for simulation domain
procCommunication communication_;
/// sub-domain (thisDomain_ for all processors)
procVector<domain> subDomainsAll_;
/// number of points in all processors
procVector<uint32> numPointsAll_;
/// partitioning object
uniquePtr<partitioning> domainPartitioning_ = nullptr;
/// the acutal limits of the simulation domain in this processor
domain thisDomain_;
uint32 initialNumPoints_ = 0;
bool thisDomainActive_ = false;
bool createBoundaryDicts() final;
bool setThisDomain() final;
std::vector<int> findPlaneNeighbors() const;
public:
TypeInfo("simulationDomain<MPI>");
explicit MPISimulationDomain(systemControl& control);
~MPISimulationDomain() final = default;
add_vCtor
(
simulationDomain,
MPISimulationDomain,
systemControl
);
/// @brief
/// @param pointPos
/// @return
bool initialUpdateDomains(span<realx3> pointPos) final;
/// @brief
/// @return
uint32 initialNumberInThis() const final;
bool initialTransferBlockData(
span<char> src,
span<char> dst,
size_t sizeOfElement
) const final;
bool initialTransferBlockData(span<realx3> src, span<realx3> dst)
const final;
bool initialTransferBlockData(span<real> src, span<real> dst)
const final;
bool initialTransferBlockData(span<uint32> src, span<uint32> dst)
const final;
bool initialTransferBlockData(span<int32> src, span<int32> dst)
const final;
uint32 numberToBeImported() const final;
uint32 numberToBeExported() const final;
/// @brief Is this domain active?
/// Active mean, there is particle in it and
/// boundaries and other entities of simulation domains are valid
bool domainActive() const final;
const domain& thisDomain()const final;
};
} // namespace pFlow::MPI
#endif //

View File

@ -0,0 +1,113 @@
#include "partitioning.hpp"
#include "error.hpp"
#include "streams.hpp"
void pFlow::partitioning::freeZoltan()
{
if(validPointers_)
{
Zoltan::LB_Free_Part(&importGlobalGids_, &importLocalGids_,
&importProcs_, &importToPart_);
Zoltan::LB_Free_Part(&exportGlobalGids_, &exportLocalGids_,
&exportProcs_, &exportToPart_);
validPointers_ = false;
}
zoltan_.release();
}
pFlow::partitioning::partitioning
(
const dictionary& dict,
const box& globalBox
)
:
globalBox_(globalBox)
{
if(!zoltanInitialized__)
{
auto rc = Zoltan_Initialize
(
processors::argc(),
processors::argv(),
&version_
);
if (rc != ZOLTAN_OK)
{
fatalErrorInFunction<<"Cannot initialize zoltan"<<endl;
fatalExit;
}
zoltanInitialized__ = true;
}
// Creates Zoltan object
zoltan_ = std::make_unique<Zoltan>(pFlowProcessors().localCommunicator());
zoltan_->Set_Param("DEBUG_LEVEL", "0");
zoltan_->Set_Param("LB_METHOD", "RCB");
zoltan_->Set_Param("NUM_GID_ENTRIES", "1");
zoltan_->Set_Param("NUM_LID_ENTRIES", "1");
zoltan_->Set_Param("OBJ_WEIGHT_DIM", "0");
zoltan_->Set_Param("RETURN_LISTS", "ALL");
}
bool pFlow::partitioning::partition(span<realx3> points, pFlagTypeHost flags)
{
pointCollection pointCollctn{points, flags};
return partition(pointCollctn);
}
int GetObjectSize
(
void *data,
int num_gid_entries,
int num_lid_entries,
ZOLTAN_ID_PTR global_id,
ZOLTAN_ID_PTR local_id,
int *ierr
)
{
*ierr = ZOLTAN_OK;
pFlow::uint32 s = *(static_cast<pFlow::uint32*>(data));
return static_cast<int>(s);
}
void PackObject
(
void *data,
int num_gid_entries,
int num_lid_entries,
ZOLTAN_ID_PTR global_id,
ZOLTAN_ID_PTR local_id,
int dest,
int size,
char *buf,
int *ierr
)
{
}
bool pFlow::partitioning::migrateData(span<char> src, span<char> dst, uint32 elementSize)
{
dataCollection data{src, dst, elementSize};
zoltan_->Set_Obj_Size_Fn(GetObjectSize, &elementSize);
return false;
}
pFlow::partitioning::~partitioning()
{
freeZoltan();
}
void pFlow::partitioning::printBox()const
{
pOutput<< "localBox:" << localBox_<<endl;
}

View File

@ -0,0 +1,168 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __partitioning_hpp__
#define __partitioning_hpp__
#include "zoltan_cpp.h"
#include "pFlowProcessors.hpp"
#include "virtualConstructor.hpp"
#include "box.hpp"
#include "span.hpp"
#include "pointFlag.hpp"
#include "procVector.hpp"
namespace pFlow
{
struct pointCollection
{
span<realx3> points_;
pFlagTypeHost pFlag_;
uint32 numActivePoints()const
{
return pFlag_.numActive();
}
};
struct dataCollection
{
span<char> srcData_;
span<char> dstData_;
uint32 elementSize_;
};
class partitioning
{
protected:
float version_ = 0.0;
std::unique_ptr<Zoltan> zoltan_ = nullptr;
bool validPointers_ = false;
box globalBox_;
box localBox_;
int32 changes_, numImport_, numExport_;
id_t *importGlobalGids_, *importLocalGids_, *exportGlobalGids_, *exportLocalGids_;
int32 *importProcs_, *importToPart_, *exportProcs_, *exportToPart_;
uint32 numBeforePartition_ = 0 ;
static inline bool zoltanInitialized__ = false;
void freeZoltan();
virtual
bool partition(pointCollection& points) = 0;
public:
partitioning(
const dictionary& dict,
const box& globalBox);
virtual
~partitioning();
create_vCtor(
partitioning,
dictionary,
(
const dictionary& dict,
const box& globalBox
),
(dict, globalBox));
bool partition(
span<realx3> points,
pFlagTypeHost flags);
bool migrateData(span<char> src, span<char> dst, uint32 elementSize);
inline
auto localBox()const
{
return localBox_;
}
inline
const auto& globalBox()const
{
return globalBox_;
}
inline
bool partitionsChanged()const
{
return changes_ == 1;
}
uint32 numberImportThisProc()const
{
return numImport_;
}
uint32 numberExportThisProc()const
{
return numExport_;
}
virtual
span<int32> exportList(int procNo)const = 0;
virtual
pFlow::MPI::procVector<span<int32>> allExportLists()const=0;
void printBox()const;
};
}
#endif //__partitioning_hpp__
/*static
int getNumberOfPoints(void *data, int32 *ierr);
static
void getPointList(
void *data,
int32 sizeGID,
int32 sizeLID,
id_t* globalID,
id_t* localID,
int32 wgt_dim,
float *obj_wgts,
int32 *ierr);*/

View File

@ -0,0 +1,330 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#include "zoltan_cpp.h"
#include "error.hpp"
#include "processors.hpp"
#include "rcb1DPartitioning.hpp"
bool pFlow::rcb1DPartitioning::partition(pointCollection &points)
{
zoltan_->Set_Param("RCB_OUTPUT_LEVEL", "0");
zoltan_->Set_Param("RCB_RECTILINEAR_BLOCKS", "1");
zoltan_->Set_Param("KEEP_CUTS", "1");
zoltan_->Set_Param("REDUCE_DIMENSIONS", "1");
zoltan_->Set_Param("RCB_RECOMPUTE_BOX", "1");
zoltan_->Set_Param("AVERAGE_CUTS", "0");
zoltan_->Set_Param("MIGRATE_ONLY_PROC_CHANGES", "0");
zoltan_->Set_Num_Obj_Fn(rcb1DPartitioning::getNumberOfPoints, &points);
zoltan_->Set_Obj_List_Fn(rcb1DPartitioning::getPointList, &points);
zoltan_->Set_Num_Geom_Fn(rcb1DPartitioning::getNumGeometry, &points);
switch (direction_)
{
case Direction::X:
zoltan_->Set_Geom_Multi_Fn(rcb1DPartitioning::getGeometryList_x, &points);
break;
case Direction::Y:
zoltan_->Set_Geom_Multi_Fn(rcb1DPartitioning::getGeometryList_y, &points);
break;
case Direction::Z:
zoltan_->Set_Geom_Multi_Fn(rcb1DPartitioning::getGeometryList_z, &points);
break;
}
int numGidEntries_, numLidEntries_;
int rc = zoltan_->LB_Partition(changes_, numGidEntries_, numLidEntries_,
numImport_, importGlobalGids_, importLocalGids_, importProcs_, importToPart_,
numExport_, exportGlobalGids_, exportLocalGids_, exportProcs_, exportToPart_);
if (rc != ZOLTAN_OK)
{
fatalErrorInFunction<< "Zoltan faild to perform partitioning."<<endl;
return false;
}
for(auto& ids:exportIds_)
{
ids.clear();
}
std::vector<int32> thisProc(points.numActivePoints(),-1);
for(auto i =0; i<numExport_; i++)
{
exportIds_[exportProcs_[i]].push_back(exportGlobalGids_[i]);
thisProc[exportGlobalGids_[i]] = exportGlobalGids_[i];
}
for(int i=0; i<thisProc.size(); i++)
{
if(thisProc[i]==-1)
exportIds_[0].push_back(i);
}
validPointers_ = true;
int nDim;
double x0;
double y0;
double z0;
double x1;
double y1;
double z1;
zoltan_->RCB_Box
(
processors::globalRank(),
nDim,
x0, y0, z0,
x1, y1, z1
);
localBox_ = globalBox_;
if(equal(x0, x1))
{
x0 = x0 - 0.00001;
x1 = x1 + 0.00001;
}
switch (direction_)
{
case Direction::X :
localBox_.minPoint().x_ = x0;
localBox_.maxPoint().x_ = x1;
break;
case Direction::Y :
localBox_.minPoint().y_ = x0;
localBox_.maxPoint().y_ = x1;
break;
case Direction::Z :
localBox_.minPoint().z_ = x0;
localBox_.maxPoint().z_ = x1;
break;
}
localBox_.minPoint() = max(localBox_.minPoint(), globalBox_.minPoint());
localBox_.maxPoint() = min(localBox_.maxPoint(), globalBox_.maxPoint());
return true;
}
pFlow::rcb1DPartitioning::rcb1DPartitioning
(
const dictionary &dict,
const box &globalBox
)
:
partitioning(dict, globalBox),
exportIds_(pFlowProcessors())
{
word directionName = dict.getVal<word>("direction");
if(toUpper(directionName)== "X")
{
direction_ = Direction::X;
dirVector_ ={1.0, 0.0, 0.0};
}
else if( toUpper(directionName) == "Y")
{
direction_ = Direction::Y;
dirVector_ ={0.0, 1.0, 0.0};
}
else if( toUpper(directionName) == "Z")
{
direction_ = Direction::Z;
dirVector_ ={0.0, 0.0, 1.0};
}
else
{
fatalErrorInFunction<< "wrong direction in dictionary "<<
dict.globalName()<<". Directions should be one of x, y, or z."<<endl;
fatalError;
}
}
int pFlow::rcb1DPartitioning::getNumGeometry(void *data, int *ierr)
{
*ierr = ZOLTAN_OK;
return 1;
}
int pFlow::rcb1DPartitioning::getNumberOfPoints(void *data, int *ierr)
{
auto *obj = static_cast<pointCollection *>(data);
*ierr = ZOLTAN_OK;
return obj->numActivePoints();
}
void pFlow::rcb1DPartitioning::getPointList
(
void *data,
int sizeGID,
int sizeLID,
ZOLTAN_ID_PTR globalID,
ZOLTAN_ID_PTR localID,
int wgt_dim,
float *obj_wgts,
int *ierr
)
{
auto* obj = static_cast<pointCollection *>(data);
*ierr = ZOLTAN_OK;
auto activeRange = obj->pFlag_.activeRange();
uint32 n = 0;
for (auto i=activeRange.start(); i<activeRange.end(); i++)
{
if( obj->pFlag_.isActive(i) )
{
globalID[n] = i;
localID[n] = n;
n++;
}
}
}
void pFlow::rcb1DPartitioning::getGeometryList_x
(
void *data,
int sizeGID,
int sizeLID,
int num_obj,
ZOLTAN_ID_PTR globalID,
ZOLTAN_ID_PTR localID,
int num_dim,
double *geom_vec,
int *ierr
)
{
auto* obj = static_cast<pointCollection *>(data);
if ( (sizeGID != 1) || (sizeLID != 1) || (num_dim != 1))
{
*ierr = ZOLTAN_FATAL;
return;
}
auto activeRange = obj->pFlag_.activeRange();
uint32 n = 0;
for (auto i=activeRange.start(); i<activeRange.end(); i++)
{
if( obj->pFlag_.isActive(i) )
{
geom_vec[n] = obj->points_[i].x_;
n++;
}
}
*ierr = ZOLTAN_OK;
return;
}
void pFlow::rcb1DPartitioning::getGeometryList_y
(
void *data,
int sizeGID,
int sizeLID,
int num_obj,
ZOLTAN_ID_PTR globalID,
ZOLTAN_ID_PTR localID,
int num_dim,
double *geom_vec,
int *ierr
)
{
auto* obj = static_cast<pointCollection *>(data);
if ( (sizeGID != 1) || (sizeLID != 1) || (num_dim != 1))
{
*ierr = ZOLTAN_FATAL;
return;
}
auto activeRange = obj->pFlag_.activeRange();
uint32 n = 0;
for (auto i=activeRange.start(); i<activeRange.end(); i++)
{
if( obj->pFlag_.isActive(i) )
{
geom_vec[n] = obj->points_[i].y_;
n++;
}
}
*ierr = ZOLTAN_OK;
return;
}
void pFlow::rcb1DPartitioning::getGeometryList_z
(
void *data,
int sizeGID,
int sizeLID,
int num_obj,
ZOLTAN_ID_PTR globalID,
ZOLTAN_ID_PTR localID,
int num_dim,
double *geom_vec,
int *ierr
)
{
auto* obj = static_cast<pointCollection *>(data);
if ( (sizeGID != 1) || (sizeLID != 1) || (num_dim != 1))
{
*ierr = ZOLTAN_FATAL;
return;
}
auto activeRange = obj->pFlag_.activeRange();
uint32 n = 0;
for (auto i=activeRange.start(); i<activeRange.end(); i++)
{
if( obj->pFlag_.isActive(i) )
{
geom_vec[n] = obj->points_[i].z_;
n++;
}
}
*ierr = ZOLTAN_OK;
return;
}

View File

@ -0,0 +1,240 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __rcb1DPartitioning_hpp__
#define __rcb1DPartitioning_hpp__
#include "partitioning.hpp"
#include "procVector.hpp"
namespace pFlow
{
class rcb1DPartitioning
:
public partitioning
{
public:
enum Direction
{
X = 0,
Y = 1,
Z = 2
};
protected:
/// Direction of partitioning
Direction direction_ = Direction::X;
realx3 dirVector_ = {1.0, 0.0, 0.0};
word directionName_ = "x";
MPI::procVector<std::vector<int>> exportIds_;
bool partition(pointCollection& points) override;
public:
rcb1DPartitioning(
const dictionary& dict,
const box& globalBox);
~rcb1DPartitioning() override=default;
span<int32> exportList(int procNo)const override
{
return span<int32>(
const_cast<int32*>(exportIds_[procNo].data()),
exportIds_[procNo].size());
}
pFlow::MPI::procVector<span<int32>> allExportLists()const override
{
pFlow::MPI::procVector<span<int32>> allList(pFlowProcessors());
for(int i=0; i<allList.size(); i++)
allList[i]= exportList(i);
return allList;
}
static
int getNumGeometry(void *data, int *ierr);
static
int getNumberOfPoints(void *data, int *ierr);
static
void getPointList
(
void *data,
int sizeGID,
int sizeLID,
ZOLTAN_ID_PTR globalID,
ZOLTAN_ID_PTR localID,
int wgt_dim,
float *obj_wgts,
int *ierr
);
static
void getGeometryList_x(
void *data,
int sizeGID,
int sizeLID,
int num_obj,
ZOLTAN_ID_PTR globalID,
ZOLTAN_ID_PTR localID,
int num_dim,
double *geom_vec,
int *ierr);
static
void getGeometryList_y(
void *data,
int sizeGID,
int sizeLID,
int num_obj,
ZOLTAN_ID_PTR globalID,
ZOLTAN_ID_PTR localID,
int num_dim,
double *geom_vec,
int *ierr);
static
void getGeometryList_z(
void *data,
int sizeGID,
int sizeLID,
int num_obj,
ZOLTAN_ID_PTR globalID,
ZOLTAN_ID_PTR localID,
int num_dim,
double *geom_vec,
int *ierr);
};
/*class RCB_y_partitioning
:
public partitioning
{
public:
RCB_y_partitioning(int argc, char *argv[], pointCollection& collection, const box& gBox)
:
partitioning(argc, argv, collection, gBox)
{}
virtual
~RCB_y_partitioning()=default;
bool partition() override;
static
void getGeometryList(
void *data,
int sizeGID,
int sizeLID,
int num_obj,
ZOLTAN_ID_PTR globalID,
ZOLTAN_ID_PTR localID,
int num_dim,
double *geom_vec,
int *ierr)
{
auto* obj = static_cast<pointCollection *>(data);
if ( (sizeGID != 1) || (sizeLID != 1) || (num_dim != 1))
{
*ierr = ZOLTAN_FATAL;
return;
}
*ierr = ZOLTAN_OK;
for (int i=0; i < num_obj ; i++)
{
geom_vec[i] = obj->pointList()[i].y_;
}
return;
}
static
int getNumGeometry(void *data, int *ierr)
{
*ierr = ZOLTAN_OK;
return 1;
}
};
class RCB_x_partitioning
:
public partitioning
{
public:
RCB_x_partitioning(int argc, char *argv[], pointCollection& collection, const box& gBox)
:
partitioning(argc, argv, collection, gBox)
{}
virtual
~RCB_x_partitioning()=default;
bool partition() override;
static
void getGeometryList(
void *data,
int sizeGID,
int sizeLID,
int num_obj,
ZOLTAN_ID_PTR globalID,
ZOLTAN_ID_PTR localID,
int num_dim,
double *geom_vec,
int *ierr);
static
int getNumGeometry(void *data, int *ierr);
};*/
} // pFlow
#endif //__rcb1DPartitioning_hpp__

View File

@ -0,0 +1,284 @@
#include "processorBoundaryField.hpp"
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
template<class T, class MemorySpace>
void
pFlow::MPI::processorBoundaryField<T, MemorySpace>::checkDataRecieved() const
{
if (!dataRecieved_)
{
uint32 nRecv = neighborProcField_.waitBufferForUse();
dataRecieved_ = true;
if (nRecv != this->neighborProcSize())
{
fatalErrorInFunction<<
"number of recived data is "<< nRecv <<" and expected number is "<<
this->neighborProcSize()<< " in "<<this->name() <<endl;
fatalExit;
}
//pOutput<<"field data "<< this->name()<<" has recieved with size "<< nRecv<<endl;
}
}
template<class T, class MemorySpace>
bool
pFlow::MPI::processorBoundaryField<T, MemorySpace>::updateBoundary(
int step,
DataDirection direction
)
{
#ifndef BoundaryModel1
if(!this->boundary().performBoundarytUpdate())
return true;
#endif
if (step == 1)
{
// Isend
if (direction == DataDirection::TwoWay ||
( this->isBoundaryMaster() && direction == DataDirection::MasterToSlave) ||
(!this->isBoundaryMaster() && direction == DataDirection::SlaveToMaster))
{
thisFieldInNeighbor_.sendData(pFlowProcessors(), this->thisField(), this->name());
dataRecieved_ = false;
//pOutput<<"request for boundary update "<< this->name()<<" direction "<< (int)direction<<endl;
}
}
else if (step == 2)
{
// Irecv
if (direction == DataDirection::TwoWay ||
(!this->isBoundaryMaster() && direction == DataDirection::MasterToSlave) ||
( this->isBoundaryMaster() && direction == DataDirection::SlaveToMaster))
{
neighborProcField_.recieveData(pFlowProcessors(), this->neighborProcSize(), this->name());
dataRecieved_ = false;
//pOutput<<"request for boundary update "<< this->name()<<" direction "<< (int)direction<<endl;
}
}
else
{
fatalErrorInFunction << "Invalid step number " << step << endl;
return false;
}
return true;
}
template<class T, class MemorySpace>
pFlow::MPI::processorBoundaryField<T, MemorySpace>::processorBoundaryField(
const boundaryBase& boundary,
const pointStructure& pStruct,
InternalFieldType& internal
)
: BoundaryFieldType(boundary, pStruct, internal),
thisFieldInNeighbor_(
groupNames("sendBuffer", this->name()),
boundary.neighborProcessorNo(),
boundary.thisBoundaryIndex()
),
neighborProcField_(
groupNames("recieveBuffer", boundary.name()),
boundary.neighborProcessorNo(),
boundary.mirrorBoundaryIndex()
)
{
this->addEvent(message::BNDR_PROCTRANSFER_SEND).
addEvent(message::BNDR_PROCTRANSFER_RECIEVE).
addEvent(message::BNDR_PROCTRANSFER_WAITFILL).
addEvent(message::BNDR_PROC_SIZE_CHANGED);
}
template<class T, class MemorySpace>
typename pFlow::MPI::processorBoundaryField<T, MemorySpace>::ProcVectorType&
pFlow::MPI::processorBoundaryField<T, MemorySpace>::neighborProcField()
{
checkDataRecieved();
return neighborProcField_.buffer();
}
template<class T, class MemorySpace>
const typename pFlow::MPI::processorBoundaryField<T, MemorySpace>::
ProcVectorType&
pFlow::MPI::processorBoundaryField<T, MemorySpace>::neighborProcField() const
{
checkDataRecieved();
return neighborProcField_.buffer();
}
template<class T, class MemorySpace>
bool pFlow::MPI::processorBoundaryField<T, MemorySpace>::hearChanges(
real t,
real dt,
uint32 iter,
const message& msg,
const anyList& varList
)
{
BoundaryFieldType::hearChanges(t,dt,iter, msg,varList);
if(msg.equivalentTo(message::BNDR_PROC_SIZE_CHANGED))
{
auto newProcSize = varList.getObject<uint32>("size");
neighborProcField_.resize(newProcSize);
}
if(msg.equivalentTo(message::BNDR_PROCTRANSFER_SEND))
{
const auto& indices = varList.getObject<uint32Vector_D>(
message::eventName(message::BNDR_PROCTRANSFER_SEND)
);
if constexpr( isDeviceAccessible<execution_space>())
{
FieldAccessType transferData(
indices.size(),
indices.deviceViewAll(),
this->internal().deviceViewAll()
);
thisFieldInNeighbor_.sendData(pFlowProcessors(),transferData);
}
else
{
FieldAccessType transferData(
indices.size(),
indices.hostViewAll(),
this->internal().deviceViewAll()
);
thisFieldInNeighbor_.sendData(pFlowProcessors(),transferData);
}
}
else if(msg.equivalentTo(message::BNDR_PROCTRANSFER_RECIEVE))
{
uint32 numRecieved = varList.getObject<uint32>(
message::eventName(message::BNDR_PROCTRANSFER_RECIEVE)
);
neighborProcField_.recieveData(pFlowProcessors(), numRecieved);
}
else if(msg.equivalentTo(message::BNDR_PROCTRANSFER_WAITFILL))
{
uint32 numRecieved = neighborProcField_.waitBufferForUse();
if(msg.equivalentTo(message::CAP_CHANGED))
{
auto newCap = varList.getObject<uint32>(
message::eventName(message::CAP_CHANGED));
this->internal().field().reserve(newCap);
}
if(msg.equivalentTo(message::SIZE_CHANGED))
{
auto newSize = varList.getObject<uint32>(
message::eventName(message::SIZE_CHANGED));
this->internal().field().resize(newSize);
}
const auto& indices = varList.getObject<uint32IndexContainer>(
message::eventName(message::ITEM_INSERT));
this->internal().field().insertSetElement(indices, neighborProcField_.buffer().deviceView());
return true;
}
return true;
}
template <class T, class MemorySpace>
void pFlow::MPI::processorBoundaryField<T, MemorySpace>::sendBackData() const
{
neighborProcField_.sendBackData(pFlowProcessors());
dataRecieved_ = false;
}
template <class T, class MemorySpace>
void pFlow::MPI::processorBoundaryField<T, MemorySpace>::recieveBackData() const
{
thisFieldInNeighbor_.recieveBackData(pFlowProcessors(), this->size());
}
template <class T, class MemorySpace>
void pFlow::MPI::processorBoundaryField<T, MemorySpace>::addBufferToInternalField()const
{
using RPolicy = Kokkos::RangePolicy<
execution_space,
Kokkos::Schedule<Kokkos::Static>,
Kokkos::IndexType<pFlow::uint32>>;
//pOutput<<"waiting for buffer to be recived in addBufferToInternalField "<<this->name()<<endl;
thisFieldInNeighbor_.waitBufferForUse();
const auto& buffView = thisFieldInNeighbor_.buffer().deviceViewAll();
const auto& field = this->internal().deviceViewAll();
if constexpr( isDeviceAccessible<execution_space> )
{
const auto& indices = this->indexList().deviceViewAll();
Kokkos::parallel_for(
"recieveBackData::"+this->name(),
RPolicy(0,this->size()),
LAMBDA_HD(uint32 i)
{
field[indices[i]] += buffView[i];
}
);
Kokkos::fence();
}
else
{
const auto& indices = this->boundary().indexListHost().deviceViewAll();
Kokkos::parallel_for(
"recieveBackData::"+this->name(),
RPolicy(0,this->size()),
LAMBDA_HD(uint32 i)
{
field[indices[i]] += buffView[i];
}
);
Kokkos::fence();
}
}
template <class T, class MemorySpace>
void pFlow::MPI::processorBoundaryField<T, MemorySpace>::updateBoundaryToMaster()const
{
if (!this->isBoundaryMaster() )
{
thisFieldInNeighbor_.sendData(pFlowProcessors(), this->thisField(), this->name());
dataRecieved_ = false;
}
}
template <class T, class MemorySpace>
void pFlow::MPI::processorBoundaryField<T, MemorySpace>::updateBoundaryFromSlave()const
{
if( this->isBoundaryMaster() )
{
neighborProcField_.recieveData(pFlowProcessors(), this->neighborProcSize(), this->name());
dataRecieved_ = false;
}
}

View File

@ -0,0 +1,117 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __processorBoundaryField_hpp__
#define __processorBoundaryField_hpp__
#include "boundaryField.hpp"
#include "dataSender.hpp"
#include "dataReciever.hpp"
#include "boundaryProcessor.hpp"
namespace pFlow::MPI
{
template< class T, class MemorySpace = void>
class processorBoundaryField
:
public boundaryField<T, MemorySpace>
{
public:
using processorBoundaryFieldType = processorBoundaryField<T, MemorySpace>;
using BoundaryFieldType = boundaryField<T, MemorySpace>;
using InternalFieldType = typename BoundaryFieldType::InternalFieldType;
using memory_space = typename BoundaryFieldType::memory_space;
using execution_space = typename BoundaryFieldType::execution_space;
using FieldAccessType = typename BoundaryFieldType::FieldAccessType;
using ProcVectorType = typename BoundaryFieldType::ProcVectorType;
private:
mutable dataSender<T, MemorySpace> thisFieldInNeighbor_;
mutable dataReciever<T, MemorySpace> neighborProcField_;
mutable bool dataRecieved_ = true;
void checkDataRecieved()const;
bool updateBoundary(int step, DataDirection direction);
public:
TypeInfoTemplate211("boundaryField","processor", T, memory_space::name());
processorBoundaryField(
const boundaryBase& boundary,
const pointStructure& pStruct,
InternalFieldType& internal);
~processorBoundaryField()override = default;
add_vCtor
(
BoundaryFieldType,
processorBoundaryFieldType,
boundaryBase
);
ProcVectorType& neighborProcField() override;
const ProcVectorType& neighborProcField()const override;
void fill(const T& val)override
{
neighborProcField_.fill(val);
}
bool hearChanges(
real t,
real dt,
uint32 iter,
const message& msg,
const anyList& varList
) override;
void sendBackData()const;
void recieveBackData()const;
void addBufferToInternalField()const;
void updateBoundaryToMaster()const;
void updateBoundaryFromSlave()const;
};
}
#include "processorBoundaryField.cpp"
#endif //__processorBoundaryField_hpp__

View File

@ -0,0 +1,24 @@
//#include "Field.hpp"
#include "processorBoundaryField.hpp"
template class pFlow::MPI::processorBoundaryField<pFlow::uint8>;
template class pFlow::MPI::processorBoundaryField<pFlow::uint8, pFlow::HostSpace>;
template class pFlow::MPI::processorBoundaryField<pFlow::uint32>;
template class pFlow::MPI::processorBoundaryField<pFlow::uint32, pFlow::HostSpace>;
template class pFlow::MPI::processorBoundaryField<pFlow::uint64>;
template class pFlow::MPI::processorBoundaryField<pFlow::uint64, pFlow::HostSpace>;
template class pFlow::MPI::processorBoundaryField<pFlow::real>;
template class pFlow::MPI::processorBoundaryField<pFlow::real, pFlow::HostSpace>;
template class pFlow::MPI::processorBoundaryField<pFlow::realx3>;
template class pFlow::MPI::processorBoundaryField<pFlow::realx3, pFlow::HostSpace>;
template class pFlow::MPI::processorBoundaryField<pFlow::realx4>;
template class pFlow::MPI::processorBoundaryField<pFlow::realx4, pFlow::HostSpace>;

View File

@ -0,0 +1,432 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#include "boundaryProcessor.hpp"
#include "boundaryProcessorKernels.hpp"
#include "dictionary.hpp"
#include "mpiCommunication.hpp"
#include "boundaryBaseKernels.hpp"
#include "internalPoints.hpp"
#include "Time.hpp"
#include "anyList.hpp"
void
pFlow::MPI::boundaryProcessor::checkDataRecieved() const
{
if (!dataRecieved_)
{
uint32 nRecv = neighborProcPoints_.waitBufferForUse();
dataRecieved_ = true;
if (nRecv != neighborProcSize())
{
fatalErrorInFunction<<"In boundary "<<this->name()<<
" ,number of recieved data is "<< nRecv<<
" and neighborProcSize is "<<neighborProcSize()<<endl;
fatalExit;
}
}
}
pFlow::MPI::boundaryProcessor::boundaryProcessor(
const dictionary& dict,
const plane& bplane,
internalPoints& internal,
boundaryList& bndrs,
uint32 thisIndex
)
: boundaryBase(dict, bplane, internal, bndrs, thisIndex),
thisPointsInNeighbor_(
groupNames("sendBuffer", name()),
neighborProcessorNo(),
thisBoundaryIndex()
),
neighborProcPoints_(
groupNames("neighborProcPoints", name()),
neighborProcessorNo(),
mirrorBoundaryIndex()
)
{
}
bool
pFlow::MPI::boundaryProcessor::beforeIteration(
uint32 step,
const timeInfo& ti,
bool updateIter,
bool iterBeforeUpdate ,
bool& callAgain
)
{
if(step == 1)
{
boundaryBase::beforeIteration(step, ti, updateIter, iterBeforeUpdate, callAgain);
callAgain = true;
}
else if(step == 2 )
{
#ifdef BoundaryModel1
callAgain = true;
#else
if(!performBoundarytUpdate())
{
callAgain = false;
return true;
}
#endif
thisNumPoints_ = size();
MPI_Isend(
&thisNumPoints_,
1,
MPI_UNSIGNED,
neighborProcessorNo(),
thisBoundaryIndex(),
pFlowProcessors().localCommunicator(),
&numPointsRequest0_);
MPI_Irecv(
&neighborProcNumPoints_,
1,
MPI_UNSIGNED,
neighborProcessorNo(),
mirrorBoundaryIndex(),
pFlowProcessors().localCommunicator(),
&numPointsRequest_
);
}
else if(step == 3 )
{
callAgain = true;
if(numPointsRequest_ != RequestNull)
{
MPI_Wait(&numPointsRequest_, MPI_STATUS_IGNORE);
if(numPointsRequest0_!= RequestNull)
{
MPI_Wait(&numPointsRequest0_, MPI_STATUS_IGNORE);
}
}
// Size has not been changed. Notification is not required.
if(neighborProcNumPoints_ == neighborProcPoints_.size()) return true;
anyList varList;
message msg;
varList.emplaceBack(msg.addAndName(message::BNDR_PROC_SIZE_CHANGED), neighborProcNumPoints_);
if( !notify(ti.iter(), ti.t(), ti.dt(), msg, varList) )
{
fatalErrorInFunction;
callAgain = false;
return false;
}
}
else if(step == 4)
{
dataRecieved_ = false;
if ( !isBoundaryMaster())
{
thisPointsInNeighbor_.sendData(pFlowProcessors(), thisPoints(),"positions");
}
else if (isBoundaryMaster())
{
neighborProcPoints_.recieveData(pFlowProcessors(), neighborProcSize(), "positions");
}
callAgain = false;
}
return true;
}
pFlow::uint32
pFlow::MPI::boundaryProcessor::neighborProcSize() const
{
return neighborProcNumPoints_;
}
pFlow::realx3Vector_D&
pFlow::MPI::boundaryProcessor::neighborProcPoints()
{
checkDataRecieved();
return neighborProcPoints_.buffer();
}
const pFlow::realx3Vector_D&
pFlow::MPI::boundaryProcessor::neighborProcPoints() const
{
checkDataRecieved();
return neighborProcPoints_.buffer();
}
bool
pFlow::MPI::boundaryProcessor::updataBoundaryData(int step)
{
return true;
}
bool pFlow::MPI::boundaryProcessor::transferData(
uint32 iter,
int step,
bool& callAgain
)
{
if( !iterBeforeBoundaryUpdate() )
{
callAgain = false;
return true;
}
if(step == 1)
{
uint32Vector_D transferFlags("transferFlags"+this->name());
numToTransfer_ = markInNegativeSide(
"transferData::markToTransfer"+this->name(),
transferFlags);
uint32Vector_D keepIndices("keepIndices");
if(numToTransfer_ != 0u)
{
pFlow::boundaryBaseKernels::createRemoveKeepIndices
(
indexList(),
numToTransfer_,
transferFlags,
transferIndices_,
keepIndices,
false
);
// delete transfer point from this processor
if( !setRemoveKeepIndices(transferIndices_, keepIndices))
{
fatalErrorInFunction<<
"error in setting transfer and keep points in boundary "<< name()<<endl;
return false;
}
}
else
{
transferIndices_.clear();
}
CheckMPI( Isend(
numToTransfer_,
neighborProcessorNo(),
thisBoundaryIndex(),
pFlowProcessors().localCommunicator(),
&numTransferRequest_), true );
CheckMPI(Irecv(
numToRecieve_,
neighborProcessorNo(),
mirrorBoundaryIndex(),
pFlowProcessors().localCommunicator(),
&numRecieveRequest_), true);
callAgain = true;
return true;
}
else if(step ==2) // to transferData to neighbor
{
if(numTransferRequest_!= RequestNull)
{
Wait(&numTransferRequest_, StatusIgnore);
}
if( numToTransfer_ == 0u)
{
callAgain = true;
return true;
}
pointFieldAccessType transferPoints(
transferIndices_.size(),
transferIndices_.deviceViewAll(),
internal().pointPositionDevice()
);
// this buffer is used temporarily
thisPointsInNeighbor_.sendData(pFlowProcessors(), transferPoints);
message msg;
anyList varList;
varList.emplaceBack(
msg.addAndName(message::BNDR_PROCTRANSFER_SEND),
transferIndices_);
const auto ti = internal().time().TimeInfo();
if(!notify(ti, msg, varList)
)
{
fatalErrorInFunction;
callAgain = false;
return false;
}
callAgain = true;
return true;
}
else if(step == 3) // to recieve data
{
if(numRecieveRequest_ != RequestNull)
{
Wait(&numRecieveRequest_, StatusIgnore);
}
if(numToRecieve_ == 0u)
{
callAgain = false;
return true;
}
// this buffer is being used temporarily
neighborProcPoints_.recieveData(pFlowProcessors(), numToRecieve_);
message msg;
anyList varList;
varList.emplaceBack(
msg.addAndName(message::BNDR_PROCTRANSFER_RECIEVE),
numToRecieve_);
const auto ti = internal().time().TimeInfo();
if(!notify( ti, msg, varList))
{
fatalErrorInFunction;
callAgain = false;
return false;
}
callAgain = true;
return true;
}
else if(step == 4) // to insert data
{
if(numToRecieve_ == 0u)
{
callAgain = false;
return true;
}
// points should be inserted first
message msg(message::BNDR_PROCTRANSFER_WAITFILL);
anyList varList;
neighborProcPoints_.waitBufferForUse();
internal().insertPointsOnly(neighborProcPoints_.buffer(), msg, varList);
const auto& indices = varList.getObject<uint32IndexContainer>(message::eventName(message::ITEM_INSERT));
auto indView = deviceViewType1D<uint32>(indices.deviceView().data(), indices.deviceView().size());
uint32Vector_D newIndices("newIndices", indView);
if(! appendNewIndices(newIndices))
{
fatalErrorInFunction;
callAgain = false;
return false;
}
const auto ti = internal().time().TimeInfo();
if(!notify(ti, msg, varList))
{
fatalErrorInFunction;
callAgain = false;
return false;
}
callAgain = false;
return true;
}
return true;
}
bool
pFlow::MPI::boundaryProcessor::iterate(const timeInfo& ti)
{
return true;
}
bool
pFlow::MPI::boundaryProcessor::afterIteration(const timeInfo& ti)
{
uint32 s = size();
pOutput<<"size of boundary is "<< s <<endl;
uint32Vector_D transferFlags("transferFlags",s+1, s+1, RESERVE());
transferFlags.fill(0u);
const auto& transferD = transferFlags.deviceViewAll();
auto points = thisPoints();
auto p = boundaryPlane().infPlane();
uint32 numTransfer = 0;
Kokkos::parallel_reduce
(
"boundaryProcessor::afterIteration",
deviceRPolicyStatic(0,s),
LAMBDA_HD(uint32 i, uint32& transferToUpdate)
{
if(p.pointInNegativeSide(points(i)))
{
transferD(i)=1;
transferToUpdate++;
}
},
numTransfer
);
pOutput<<"Numebr to be transfered "<< numTransfer<<endl;
uint32Vector_D transferIndices("transferIndices");
uint32Vector_D keepIndices("keepIndices");
pFlow::boundaryBaseKernels::createRemoveKeepIndices
(
indexList(),
numTransfer,
transferFlags,
transferIndices,
keepIndices
);
// delete transfer point from this processor
if( !setRemoveKeepIndices(transferIndices, keepIndices))
{
fatalErrorInFunction<<
"error in setting transfer and keep points in boundary "<< name()<<endl;
return false;
}
return true;
}

View File

@ -0,0 +1,137 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __boundaryProcessor_hpp__
#define __boundaryProcessor_hpp__
#include "boundaryBase.hpp"
#include "timeInfo.hpp"
#include "mpiTypes.hpp"
#include "dataSender.hpp"
#include "dataReciever.hpp"
#include "boundaryConfigs.hpp"
namespace pFlow::MPI
{
class boundaryProcessor
: public boundaryBase
{
public:
using pointFieldAccessType = typename boundaryBase::pointFieldAccessType;
private:
uint32 neighborProcNumPoints_ = 0;
uint32 thisNumPoints_ = 0;
Request numPointsRequest_ = RequestNull;
Request numPointsRequest0_ = RequestNull;
dataSender<realx3> thisPointsInNeighbor_;
dataReciever<realx3> neighborProcPoints_;
mutable bool dataRecieved_ = true;
uint32 numToTransfer_ = 0;
uint32 numToRecieve_ = 0;
uint32Vector_D transferIndices_{"transferIndices"};
Request numTransferRequest_ = RequestNull;
Request numRecieveRequest_ = RequestNull;
void checkDataRecieved() const;
/// @brief Update processor boundary data for this processor
/// @param step It is either 1 or 2 in the input to indicate
/// the update step
/// @return true if successful
/// @details This method is called by boundaryList two times to
/// allow processor boundaries to exchange data in two steps.
/// The first step is a buffered non-blocking send and the second
/// step is non-blocking recieve to get data.
bool updataBoundaryData(int step) override;
bool transferData(uint32 iter, int step, bool& callAgain) override;
public:
TypeInfo("boundary<processor>");
boundaryProcessor(
const dictionary &dict,
const plane &bplane,
internalPoints &internal,
boundaryList &bndrs,
uint32 thisIndex);
~boundaryProcessor() override = default;
add_vCtor(
boundaryBase,
boundaryProcessor,
dictionary);
bool beforeIteration(
uint32 step,
const timeInfo& ti,
bool updateIter,
bool iterBeforeUpdate ,
bool& callAgain
) override;
bool iterate(const timeInfo& ti) override;
bool afterIteration(const timeInfo& ti) override;
/// @brief Return number of points in the neighbor processor boundary.
/// This is overriden from boundaryBase.
uint32 neighborProcSize() const override;
/// @brief Return a reference to point positions in the neighbor
/// processor boundary.
realx3Vector_D &neighborProcPoints() override;
/// @brief Return a const reference to point positions in the
/// neighbor processor boundary.
const realx3Vector_D &neighborProcPoints() const override;
uint32 numToTransfer()const override
{
return numToTransfer_;
}
uint32 numToRecieve()const override
{
return numToRecieve_;
}
};
} // namespace pFlow::MPI
#endif //__boundaryProcessor_hpp__

View File

@ -0,0 +1,56 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#include "phasicFlowKokkos.hpp"
#include "infinitePlane.hpp"
#include "scatteredFieldAccess.hpp"
namespace pFlow::boundaryProcessorKernels
{
struct markNegative
{
markNegative(const infinitePlane& pl,
const deviceViewType1D<uint32>& f,
const deviceScatteredFieldAccess<realx3>& p
)
:
plane_(pl),
flags_(f),
points_(p)
{}
infinitePlane plane_;
deviceViewType1D<uint32> flags_;
deviceScatteredFieldAccess<realx3> points_;
INLINE_FUNCTION_HD
void operator()(uint32 i, uint32& transferToUpdate)const
{
if(plane_.pointInNegativeSide(points_(i)))
{
flags_(i)=1;
transferToUpdate++;
}
}
};
}

View File

@ -0,0 +1,135 @@
#ifndef __dataReciever_hpp__
#define __dataReciever_hpp__
#include "span.hpp"
#include "localProcessors.hpp"
#include "mpiCommunication.hpp"
namespace pFlow::MPI
{
template<typename T, typename MemorySpace=void>
class dataReciever
{
public:
using BufferVectorType = VectorSingle<T, MemorySpace>;
using BufferVectorTypeHost = VectorSingle<T, HostSpace>;
using memory_space = typename BufferVectorType::memory_space;
using execution_space = typename BufferVectorType::execution_space;
private:
BufferVectorType buffer_;
int fromProc_;
int tag_;
mutable Request recvRequest_ = RequestNull;
public:
dataReciever(const word& name, int from, int tag)
:
buffer_(name),
fromProc_(from),
tag_(tag)
{}
~dataReciever()=default;
uint32 waitBufferForUse()const
{
if(recvRequest_ != RequestNull)
{
Status status;
MPI_Wait(&recvRequest_, &status);
int count;
CheckMPI(getCount<T>(&status, count), true);
return static_cast<uint32>(count);
}
else
return buffer_.size();
}
void sendBackData(
const localProcessors& processors)const
{
CheckMPI(
Isend(
buffer_.getSpan(),
fromProc_,
tag_,
processors.localCommunicator(),
&recvRequest_
),
true
);
}
void recieveData(
const localProcessors& processors,
uint32 numToRecv,
const word& name = "dataReciver"
)
{
resize(numToRecv);
CheckMPI(
Irecv(
buffer_.getSpan(),
fromProc_,
tag_,
processors.localCommunicator(),
&recvRequest_
),
true
);
}
inline
auto& buffer()
{
return buffer_;
}
inline
const auto& buffer()const
{
return buffer_;
}
inline
void fill(const T& val)
{
waitBufferForUse();
buffer_.fill(val);
}
inline
uint32 size()const
{
return buffer_.size();
}
inline
void resize(uint32 newSize)
{
waitBufferForUse();
buffer_.clear();
buffer_.resize(newSize);
}
};
}
#endif //__dataReciever_hpp__

View File

@ -0,0 +1,202 @@
/*------------------------------- phasicFlow ---------------------------------
O C enter of
O O E ngineering and
O O M ultiscale modeling of
OOOOOOO F luid flow
------------------------------------------------------------------------------
Copyright (C): www.cemf.ir
email: hamid.r.norouzi AT gmail.com
------------------------------------------------------------------------------
Licence:
This file is part of phasicFlow code. It is a free software for simulating
granular and multiphase flows. You can redistribute it and/or modify it under
the terms of GNU General Public License v3 or any other later versions.
phasicFlow is distributed to help others in their research in the field of
granular and multiphase flows, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
#ifndef __dataSender_hpp__
#define __dataSender_hpp__
#include "VectorSingles.hpp"
#include "localProcessors.hpp"
#include "mpiCommunication.hpp"
namespace pFlow::MPI
{
template<typename T, typename MemorySpace=void>
class dataSender
{
public:
using BufferVectorType = VectorSingle<T, MemorySpace>;
using BufferVectorTypeHost = VectorSingle<T, HostSpace>;
using memory_space = typename BufferVectorType::memory_space;
using execution_space = typename BufferVectorType::execution_space;
// This is device vector
private:
mutable BufferVectorType buffer_;
int toProc_;
int tag_;
mutable Request sendRequest_ = RequestNull;
public:
dataSender(const word& name, int toProc, int tag)
:
toProc_(toProc),
tag_(tag)
{}
~dataSender()
{
if(sendRequest_ != RequestNull)
{
MPI_Request_free(&sendRequest_);
}
}
bool waitBufferForUse()const
{
if(sendRequest_ != RequestNull)
{
MPI_Wait(&sendRequest_, StatusesIgnore);
}
return true;
}
void sendData(
const localProcessors& processors,
const scatteredFieldAccess<T, memory_space>& scatterField,
const word& name = "dataSender::sendData"
)
{
using RPolicy = Kokkos::RangePolicy<
execution_space,
Kokkos::Schedule<Kokkos::Static>,
Kokkos::IndexType<pFlow::uint32>>;
uint32 n = scatterField.size();
// make sure the buffer is ready to be used and free
// the previous request (if any).
waitBufferForUse();
// clear the buffer to prevent data copy if capacity increases
buffer_.clear();
buffer_.resize(n);
const auto& buffView = buffer_.deviceViewAll();
Kokkos::parallel_for(
"packDataForSend::"+name,
RPolicy(0,n),
LAMBDA_HD(uint32 i)
{
buffView[i] = scatterField[i];
}
);
Kokkos::fence();
CheckMPI(
Isend(buffer_.getSpan(),
toProc_,
tag_,
processors.localCommunicator(),
&sendRequest_
),
true
);
}
bool recieveBackData(
const localProcessors& processors,
uint32 numToRecieve
)const
{
// make sure the buffer is ready to be used and free
// the previous request (if any).
waitBufferForUse();
// clear the buffer to prevent data copy if capacity increases
buffer_.clear();
buffer_.resize(numToRecieve);
CheckMPI(
Irecv(
buffer_.getSpan(),
toProc_,
tag_,
processors.localCommunicator(),
&sendRequest_
),
true
);
return true;
}
auto& buffer()
{
return buffer_;
}
const auto& buffer()const
{
return buffer_;
}
inline
void fill(const T& val)
{
waitBufferForUse();
buffer_.fill(val);
}
uint32 size()const
{
return buffer_.size();
}
bool sendComplete()
{
int test;
if(sendRequest_ != RequestNull)
{
MPI_Test(&sendRequest_, &test, StatusIgnore);
return test;
}
else
{
return true;
}
}
inline
void resize(uint32 newSize)
{
waitBufferForUse();
buffer_.clear();
buffer_.resize(newSize);
}
};
}
#endif //__dataSender_hpp__

View File

@ -55,7 +55,7 @@ private:
mutable T* data_ = nullptr; mutable T* data_ = nullptr;
index size_ = 0; IndexType size_ = 0;
public: public:
@ -104,7 +104,7 @@ public:
/// Returns the number of elements in the span /// Returns the number of elements in the span
INLINE_FUNCTION_HD INLINE_FUNCTION_HD
index size() const IndexType size() const
{ {
return size_; return size_;
} }
@ -139,14 +139,14 @@ public:
/// Returns a reference to the element at the specified index /// Returns a reference to the element at the specified index
INLINE_FUNCTION_HD INLINE_FUNCTION_HD
T& operator[](index i) T& operator[](IndexType i)
{ {
return data_[i]; return data_[i];
} }
/// Returns a const reference to the element at the specified index /// Returns a const reference to the element at the specified index
INLINE_FUNCTION_HD INLINE_FUNCTION_HD
T& operator[](index i)const T& operator[](IndexType i)const
{ {
return data_[i]; return data_[i];
} }

View File

@ -23,21 +23,21 @@ Licence:
void pFlow::baseTimeControl::setTimeControl void pFlow::baseTimeControl::setTimeControl
( (
timeValue startTime, TimeValueType startTime,
timeValue endTime, TimeValueType endTime,
timeValue interval, TimeValueType interval,
const word &intervalPrefix const word &intervalPrefix
) )
{ {
isTimeStep_ = false; isTimeStep_ = false;
intervalPrefix_ = intervalPrefix; intervalPrefix_ = intervalPrefix;
rRange_ = stridedRange<timeValue>(startTime, endTime, interval); rRange_ = stridedRange<TimeValueType>(startTime, endTime, interval);
} }
pFlow::baseTimeControl::baseTimeControl( pFlow::baseTimeControl::baseTimeControl(
const dictionary &dict, const dictionary &dict,
const word &intervalPrefix, const word &intervalPrefix,
timeValue defStartTime) TimeValueType defStartTime)
: intervalPrefix_(intervalPrefix) : intervalPrefix_(intervalPrefix)
{ {
auto tControl = dict.getVal<word>("timeControl"); auto tControl = dict.getVal<word>("timeControl");
@ -59,10 +59,10 @@ pFlow::baseTimeControl::baseTimeControl(
if(!isTimeStep_) if(!isTimeStep_)
{ {
auto startTime = (dict.getValOrSet<timeValue>("startTime", defStartTime)); auto startTime = (dict.getValOrSet<TimeValueType>("startTime", defStartTime));
auto endTime = (dict.getValOrSet<timeValue>("endTime", largeValue)); auto endTime = (dict.getValOrSet<TimeValueType>("endTime", largeValue));
auto interval = dict.getVal<timeValue>(intervalWord); auto interval = dict.getVal<TimeValueType>(intervalWord);
rRange_ = stridedRange<timeValue>(startTime, endTime, interval); rRange_ = stridedRange<TimeValueType>(startTime, endTime, interval);
} }
else else
@ -78,9 +78,9 @@ pFlow::baseTimeControl::baseTimeControl(
pFlow::baseTimeControl::baseTimeControl pFlow::baseTimeControl::baseTimeControl
( (
const dictionary& dict, const dictionary& dict,
const timeValue defInterval, const TimeValueType defInterval,
const word& intervalPrefix, const word& intervalPrefix,
const timeValue defStartTime const TimeValueType defStartTime
) )
: :
intervalPrefix_(intervalPrefix) intervalPrefix_(intervalPrefix)
@ -104,10 +104,10 @@ pFlow::baseTimeControl::baseTimeControl
if(!isTimeStep_) if(!isTimeStep_)
{ {
auto startTime = (dict.getValOrSet<timeValue>("startTime", defStartTime)); auto startTime = (dict.getValOrSet<TimeValueType>("startTime", defStartTime));
auto endTime = (dict.getValOrSet<timeValue>("endTime", largeValue)); auto endTime = (dict.getValOrSet<TimeValueType>("endTime", largeValue));
auto interval = dict.getValOrSet<timeValue>(intervalWord, defInterval); auto interval = dict.getValOrSet<TimeValueType>(intervalWord, defInterval);
rRange_ = stridedRange<timeValue>(startTime, endTime, interval); rRange_ = stridedRange<TimeValueType>(startTime, endTime, interval);
} }
else else
@ -131,9 +131,9 @@ pFlow::baseTimeControl::baseTimeControl(int32 start, int32 end, int32 stride, co
pFlow::baseTimeControl::baseTimeControl pFlow::baseTimeControl::baseTimeControl
( (
timeValue start, TimeValueType start,
timeValue end, TimeValueType end,
timeValue stride, TimeValueType stride,
const word &intervalPrefix const word &intervalPrefix
) )
: :
@ -147,7 +147,7 @@ pFlow::baseTimeControl::baseTimeControl
{ {
} }
bool pFlow::baseTimeControl::eventTime(uint32 iter, timeValue t, timeValue dt) const bool pFlow::baseTimeControl::eventTime(uint32 iter, TimeValueType t, TimeValueType dt) const
{ {
if(isTimeStep_) if(isTimeStep_)
{ {
@ -166,7 +166,7 @@ bool pFlow::baseTimeControl::eventTime(const timeInfo &ti) const
} }
bool bool
pFlow::baseTimeControl::isInRange(uint32 iter, timeValue t, timeValue dt) const pFlow::baseTimeControl::isInRange(uint32 iter, TimeValueType t, TimeValueType dt) const
{ {
if(isTimeStep_) if(isTimeStep_)
{ {
@ -178,7 +178,7 @@ pFlow::baseTimeControl::isInRange(uint32 iter, timeValue t, timeValue dt) const
} }
} }
pFlow::timeValue pFlow::TimeValueType
pFlow::baseTimeControl::startTime() const pFlow::baseTimeControl::startTime() const
{ {
if(!isTimeStep_) if(!isTimeStep_)
@ -191,7 +191,7 @@ pFlow::baseTimeControl::startTime() const
return 0; return 0;
} }
pFlow::timeValue pFlow::TimeValueType
pFlow::baseTimeControl::endTime() const pFlow::baseTimeControl::endTime() const
{ {
if(!isTimeStep_) if(!isTimeStep_)
@ -204,7 +204,7 @@ pFlow::baseTimeControl::endTime() const
return 0; return 0;
} }
pFlow::timeValue pFlow::TimeValueType
pFlow::baseTimeControl::rInterval() const pFlow::baseTimeControl::rInterval() const
{ {
if(!isTimeStep_) if(!isTimeStep_)

View File

@ -36,16 +36,16 @@ private:
int32StridedRagne iRange_; int32StridedRagne iRange_;
stridedRange<timeValue> rRange_; stridedRange<TimeValueType> rRange_;
word intervalPrefix_; word intervalPrefix_;
protected: protected:
void setTimeControl( void setTimeControl(
timeValue startTime, TimeValueType startTime,
timeValue endTime, TimeValueType endTime,
timeValue interval, TimeValueType interval,
const word& intervalPrefix); const word& intervalPrefix);
public: public:
@ -53,14 +53,14 @@ public:
baseTimeControl( baseTimeControl(
const dictionary& dict, const dictionary& dict,
const word& intervalPrefix = "", const word& intervalPrefix = "",
timeValue defStartTime = 0.0 TimeValueType defStartTime = 0.0
); );
baseTimeControl( baseTimeControl(
const dictionary& dict, const dictionary& dict,
const timeValue defInterval, const TimeValueType defInterval,
const word& intervalPrefix="", const word& intervalPrefix="",
const timeValue defStartTime=0.0); const TimeValueType defStartTime=0.0);
baseTimeControl( baseTimeControl(
int32 start, int32 start,
@ -70,9 +70,9 @@ public:
); );
baseTimeControl( baseTimeControl(
timeValue start, TimeValueType start,
timeValue end, TimeValueType end,
timeValue stride, TimeValueType stride,
const word& intervalPrefix = "" const word& intervalPrefix = ""
); );
@ -99,17 +99,17 @@ public:
return isTimeStep_; return isTimeStep_;
} }
bool eventTime(uint32 iter, timeValue t, timeValue dt) const; bool eventTime(uint32 iter, TimeValueType t, TimeValueType dt) const;
bool eventTime(const timeInfo& ti)const; bool eventTime(const timeInfo& ti)const;
bool isInRange(uint32 iter, timeValue t, timeValue dt) const; bool isInRange(uint32 iter, TimeValueType t, TimeValueType dt) const;
timeValue startTime() const; TimeValueType startTime() const;
timeValue endTime() const; TimeValueType endTime() const;
timeValue rInterval() const; TimeValueType rInterval() const;
int32 startIter() const; int32 startIter() const;

View File

@ -36,16 +36,16 @@ pFlow::timeControl::timeControl
ti_(dict), ti_(dict),
startTime_ startTime_
( (
dict.getVal<timeValue>("startTime") dict.getVal<TimeValueType>("startTime")
), ),
endTime_ endTime_
( (
dict.getVal<timeValue>("endTime") dict.getVal<TimeValueType>("endTime")
), ),
stopAt_(endTime_), stopAt_(endTime_),
saveInterval_ saveInterval_
( (
dict.getVal<timeValue>("saveInterval") dict.getVal<TimeValueType>("saveInterval")
), ),
lastSaved_(startTime_), lastSaved_(startTime_),
performSorting_ performSorting_
@ -65,9 +65,9 @@ pFlow::timeControl::timeControl
pFlow::timeControl::timeControl( pFlow::timeControl::timeControl(
dictionary& dict, dictionary& dict,
timeValue startTime, TimeValueType startTime,
timeValue endTime, TimeValueType endTime,
timeValue saveInterval, TimeValueType saveInterval,
word startTimeName) word startTimeName)
: :
ti_(startTime, dict), ti_(startTime, dict),
@ -95,9 +95,9 @@ pFlow::timeControl::timeControl(
checkForOutputToFile(); checkForOutputToFile();
} }
pFlow::timeValue pFlow::timeControl::setTime(timeValue t) pFlow::TimeValueType pFlow::timeControl::setTime(TimeValueType t)
{ {
timeValue tmp = ti_.currentTime(); TimeValueType tmp = ti_.currentTime();
ti_.currentTime_ = t; ti_.currentTime_ = t;
lastSaved_ = t; lastSaved_ = t;
checkForOutputToFile(); checkForOutputToFile();

View File

@ -37,7 +37,7 @@ class dictionary;
class timeControl class timeControl
{ {
public: public:
using timeStridedRange = stridedRange<timeValue>; using timeStridedRange = stridedRange<TimeValueType>;
private: private:
@ -47,19 +47,19 @@ private:
timeInfo ti_; timeInfo ti_;
// - start time of simulation // - start time of simulation
timeValue startTime_; TimeValueType startTime_;
// - end time of simulation // - end time of simulation
timeValue endTime_; TimeValueType endTime_;
// - stopAt // - stopAt
timeValue stopAt_; TimeValueType stopAt_;
// - time interval for time folder output // - time interval for time folder output
timeValue saveInterval_; TimeValueType saveInterval_;
// - the last time folder that was saved // - the last time folder that was saved
timeValue lastSaved_; TimeValueType lastSaved_;
bool managedExternaly_ = false; bool managedExternaly_ = false;
@ -80,7 +80,7 @@ private:
inline word timeName_ = "wrongSettings"; // for managedExternamly inline word timeName_ = "wrongSettings"; // for managedExternamly
static static
inline timeValue writeTime_ = 0; // for managedExternamly inline TimeValueType writeTime_ = 0; // for managedExternamly
void checkForOutputToFile(); void checkForOutputToFile();
@ -92,22 +92,22 @@ public:
timeControl( timeControl(
dictionary& dict, dictionary& dict,
timeValue startTime, TimeValueType startTime,
timeValue endTime, TimeValueType endTime,
timeValue saveInterval, TimeValueType saveInterval,
word startTimeName); word startTimeName);
virtual ~timeControl() = default; virtual ~timeControl() = default;
timeValue dt()const TimeValueType dt()const
{ {
return ti_.dt(); return ti_.dt();
} }
timeValue setTime(timeValue t); TimeValueType setTime(TimeValueType t);
void setStopAt(timeValue sT) void setStopAt(TimeValueType sT)
{ {
if(managedExternaly_) if(managedExternaly_)
{ {
@ -115,24 +115,24 @@ public:
} }
} }
timeValue startTime()const TimeValueType startTime()const
{ {
return startTime_; return startTime_;
} }
timeValue endTime()const TimeValueType endTime()const
{ {
return endTime_; return endTime_;
} }
timeValue saveInterval()const TimeValueType saveInterval()const
{ {
return saveInterval_; return saveInterval_;
} }
word timeName()const; word timeName()const;
timeValue currentTime() const TimeValueType currentTime() const
{ {
return ti_.currentTime(); return ti_.currentTime();
} }

View File

@ -36,16 +36,16 @@ private:
uint32 currentIter_; uint32 currentIter_;
// - current time of simulation // - current time of simulation
timeValue currentTime_; TimeValueType currentTime_;
// - integration time step // - integration time step
timeValue dt_; TimeValueType dt_;
inline static uint32 presicion_ = 5; inline static uint32 presicion_ = 5;
public: public:
timeInfo(uint32 cIter, timeValue cTime, timeValue dt) timeInfo(uint32 cIter, TimeValueType cTime, TimeValueType dt)
: currentIter_(cIter), : currentIter_(cIter),
currentTime_(cTime), currentTime_(cTime),
dt_(dt) dt_(dt)
@ -55,31 +55,31 @@ public:
timeInfo(const dictionary& dict) timeInfo(const dictionary& dict)
: :
currentIter_(0), currentIter_(0),
currentTime_(dict.getVal<timeValue>("startTime")), currentTime_(dict.getVal<TimeValueType>("startTime")),
dt_( dict.getVal<timeValue>("dt")) dt_( dict.getVal<TimeValueType>("dt"))
{ {
presicion_ = dict.getValOrSet<uint32>("timePrecision",5); presicion_ = dict.getValOrSet<uint32>("timePrecision",5);
} }
timeInfo(timeValue currentTime, const dictionary& dict) timeInfo(TimeValueType currentTime, const dictionary& dict)
: :
currentIter_(0), currentIter_(0),
currentTime_(currentTime), currentTime_(currentTime),
dt_( dict.getVal<timeValue>("dt")) dt_( dict.getVal<TimeValueType>("dt"))
{ {
presicion_ = dict.getValOrSet<int32>("timePrecision",5); presicion_ = dict.getValOrSet<int32>("timePrecision",5);
} }
inline const timeValue& currentTime()const inline const TimeValueType& currentTime()const
{ {
return currentTime_; return currentTime_;
} }
inline const timeValue& t() const inline const TimeValueType& t() const
{ {
return currentTime_; return currentTime_;
} }
inline const timeValue& dt() const inline const TimeValueType& dt() const
{ {
return dt_; return dt_;
} }
@ -109,7 +109,7 @@ public:
inline inline
word prevTimeName()const word prevTimeName()const
{ {
return real2FixedStripZeros( max(currentTime_-dt_, timeValue(0)), presicion_); return real2FixedStripZeros( max(currentTime_-dt_, TimeValueType(0)), presicion_);
} }
static static

View File

@ -0,0 +1,55 @@
/* -------------------------------*- C++ -*--------------------------------- *\
| phasicFlow File |
| copyright: www.cemf.ir |
\* ------------------------------------------------------------------------- */
objectName settingsDict;
objectType dictionary;
fileFormat ASCII;
/*---------------------------------------------------------------------------*/
// list of libraries that should be loaded during runtime;
// for example, "libPostprocessData.so" (optional)
libs ();
// Auxiliary function to be run during simulation (optional)
auxFunctions postprocessData;
// time step for integration in seconds, (mandatory)
dt 0.00001;
// start time for simulation, can be any valid value. Simulation starts from
// time folder with the same name
startTime 0;
// end time for simulation (mandatory)
endTime 10;
// time interval between each data save on disk, (mandatory)
saveInterval 0.01;
// maximum number of digits for time folder, (opetional, default is 5)
timePrecision 5;
// gravitational acceleration vector (m/s2), (mandatory)
g (0 -9.8 0); // gravity vector (m/s2)
// include a list of objects that are not normally in the save list on disk, (optional)
includeObjects (diameter);
// exculde a list of objects from saving on the disk, (optional)
excludeObjects ();
// integration method for position and velocity, (mandatory)
integrationMethod AdamsBashforth2;
// if keep the integeration history under timeFolder/integeration or not, (optional, default is no)
integrationHistory Yes;
// data writting format (ascii or binary), (mandatory)
writeFormat ascii;
// report timers (Yes or No)?, (optional, default is Yes)
timersReport Yes;
// time interval between each timer report, (optional, default is 0.04)
timersReportInterval 0.01; // time interval for reporting timers

View File

@ -27,11 +27,11 @@ Licence:
namespace pFlow namespace pFlow
{ {
Map<timeValue, fileSystem> getTimeFolders(const fileSystem& path); Map<TimeValueType, fileSystem> getTimeFolders(const fileSystem& path);
class timeFolder class timeFolder
{ {
using timeList = Map<timeValue, fileSystem>; using timeList = Map<TimeValueType, fileSystem>;
protected: protected:
@ -61,14 +61,14 @@ public:
} }
inline inline
timeValue currentTime()const TimeValueType currentTime()const
{ {
if(folders_.empty()) return -1; if(folders_.empty()) return -1;
return currentFolder_->first; return currentFolder_->first;
} }
inline inline
timeValue nextTime()const TimeValueType nextTime()const
{ {
auto next = currentFolder_; auto next = currentFolder_;
next++; next++;
@ -98,7 +98,7 @@ public:
return !finished(); return !finished();
} }
bool setTime(timeValue upto) bool setTime(TimeValueType upto)
{ {
timeList::iterator orgFolder = currentFolder_; timeList::iterator orgFolder = currentFolder_;
@ -140,14 +140,14 @@ public:
return false; return false;
} }
timeValue startTime()const TimeValueType startTime()const
{ {
if(folders_.empty()) return -1; if(folders_.empty()) return -1;
auto [t,f] = *folders_.begin(); auto [t,f] = *folders_.begin();
return t; return t;
} }
timeValue endTime()const TimeValueType endTime()const
{ {
if(folders_.empty()) return -1; if(folders_.empty()) return -1;
auto [t,f] = *(--folders_.end()); auto [t,f] = *(--folders_.end());
@ -162,16 +162,16 @@ public:
}; };
inline inline
Map<timeValue, fileSystem> getTimeFolders(const fileSystem& path) Map<TimeValueType, fileSystem> getTimeFolders(const fileSystem& path)
{ {
Map<timeValue, fileSystem> tFolders; Map<TimeValueType, fileSystem> tFolders;
auto subDirs = subDirectories(path); auto subDirs = subDirectories(path);
for(auto& subD: subDirs) for(auto& subD: subDirs)
{ {
auto timeName = tailName(subD.wordPath(), '/'); auto timeName = tailName(subD.wordPath(), '/');
timeValue TIME; TimeValueType TIME;
if( auto success = readReal(timeName, TIME); success) if( auto success = readReal(timeName, TIME); success)
{ {
if(!tFolders.insertIf(TIME, subD)) if(!tFolders.insertIf(TIME, subD))

View File

@ -0,0 +1,64 @@
/* -------------------------------*- C++ -*--------------------------------- *\
| phasicFlow File |
| copyright: www.cemf.ir |
\* ------------------------------------------------------------------------- */
objectName domainDict;
objectType dictionary;
fileFormat ASCII;
/*---------------------------------------------------------------------------*/
// Simulation domain: every particles that goes outside this domain will be deleted
globalBox
{
min (-0.12 -0.12 0.00);
max (0.12 0.12 0.11);
}
pointSorting
{
active Yes; // optional (default: No)
dx 0.01; // optional (default: 0.01)
timeControl simulationTime; // runTime, or timeStep
startTime 0.0;
endTime 100; // optional (default: very large number)
sortingInterval 0.1; // in seconds
}
boundaries
{
left
{
type exit; // other options: periodict, reflective
}
right
{
type exit; // other options: periodict, reflective
}
bottom
{
type exit; // other options: periodict, reflective
}
top
{
type exit; // other options: periodict, reflective
}
rear
{
type exit; // other options: periodict, reflective
}
front
{
type exit; // other options: periodict, reflective
}
}

View File

@ -32,15 +32,23 @@ pFlow::pointSorting::pointSorting(const dictionary & dict)
dx_( dx_(
performSorting_()? performSorting_()?
dict.getVal<real>("dx"): dict.getVal<real>("dx"):
1.0 0.01
) )
{ {
if( performSorting_() ) if( performSorting_() )
REPORT(2)<<"Point sorting is "<<Yellow_Text("active")<<" in simulation"<<END_REPORT; {
else REPORT(2)<<"Point sorting is "<<Yellow_Text("active")<<" in the simulation"<<END_REPORT;
REPORT(2)<<"Point sorting is "<<Yellow_Text("inactive")<<" in simulation"<<END_REPORT; dictionary dict2("pointSorting");
dict2.add("avtive", performSorting_);
dict2.add("dx", dx_);
timeControl_.write(dict2);
output.incrIndent();
output<<dict2<<endl;
output.decrIndent();
}
} }
pFlow::uint32IndexContainer pFlow::uint32IndexContainer
pFlow::pointSorting::getSortedIndices( pFlow::pointSorting::getSortedIndices(
const box& boundingBox, const box& boundingBox,

Some files were not shown because too many files have changed in this diff Show More