Compare commits
37 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
cc06e30404
|
|||
|
25c79dc7cd
|
|||
|
a725d34dd3
|
|||
| 2791d2e225 | |||
| 72ce153e48 | |||
| 5b7e05cd32 | |||
| 85afe00fc5 | |||
| 5c1790277b | |||
| e09ae438a2 | |||
| d06d5b4db8 | |||
| 50e2a845f8 | |||
| 738498cb28 | |||
| 42b9cf1ad9 | |||
| e9d321fd00 | |||
| ed1d86ade9 | |||
| 471baa5065 | |||
| 4bb6c03013 | |||
|
b8e41b2b39
|
|||
|
133e4f13a2
|
|||
|
914c4f4791
|
|||
|
f345b0e520
|
|||
|
f5ed23d687
|
|||
|
03d501db04
|
|||
| 09ffdb553d | |||
| 699e443c7a | |||
| 24bfa44911 | |||
| 6738854a9d | |||
| 223ec17a54 | |||
|
|
79af79d471 | ||
| 26c81d8e81 | |||
|
|
9deeda9831 | ||
|
|
3a7bce3af2 | ||
|
|
c6945bb095 | ||
|
|
0d24f1503c | ||
|
|
cb252f5ea2 | ||
|
|
7a76cbaafd | ||
|
|
57a7376044 |
4
.gitignore
vendored
4
.gitignore
vendored
@@ -1,2 +1,6 @@
|
|||||||
__pycache__
|
__pycache__
|
||||||
GW150914
|
GW150914
|
||||||
|
GW150914-origin
|
||||||
|
docs
|
||||||
|
*.tmp
|
||||||
|
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ import numpy
|
|||||||
File_directory = "GW150914" ## output file directory
|
File_directory = "GW150914" ## output file directory
|
||||||
Output_directory = "binary_output" ## binary data file directory
|
Output_directory = "binary_output" ## binary data file directory
|
||||||
## The file directory name should not be too long
|
## The file directory name should not be too long
|
||||||
MPI_processes = 8 ## number of mpi processes used in the simulation
|
MPI_processes = 64 ## number of mpi processes used in the simulation
|
||||||
|
|
||||||
GPU_Calculation = "no" ## Use GPU or not
|
GPU_Calculation = "no" ## Use GPU or not
|
||||||
## (prefer "no" in the current version, because the GPU part may have bugs when integrated in this Python interface)
|
## (prefer "no" in the current version, because the GPU part may have bugs when integrated in this Python interface)
|
||||||
|
|||||||
@@ -8,6 +8,14 @@
|
|||||||
##
|
##
|
||||||
##################################################################
|
##################################################################
|
||||||
|
|
||||||
|
## Guard against re-execution by multiprocessing child processes.
|
||||||
|
## Without this, using 'spawn' or 'forkserver' context would cause every
|
||||||
|
## worker to re-run the entire script, spawning exponentially more
|
||||||
|
## workers (fork bomb).
|
||||||
|
if __name__ != '__main__':
|
||||||
|
import sys as _sys
|
||||||
|
_sys.exit(0)
|
||||||
|
|
||||||
|
|
||||||
##################################################################
|
##################################################################
|
||||||
|
|
||||||
@@ -424,26 +432,31 @@ print(
|
|||||||
|
|
||||||
import plot_xiaoqu
|
import plot_xiaoqu
|
||||||
import plot_GW_strain_amplitude_xiaoqu
|
import plot_GW_strain_amplitude_xiaoqu
|
||||||
|
from parallel_plot_helper import run_plot_tasks_parallel
|
||||||
|
|
||||||
|
plot_tasks = []
|
||||||
|
|
||||||
## Plot black hole trajectory
|
## Plot black hole trajectory
|
||||||
plot_xiaoqu.generate_puncture_orbit_plot( binary_results_directory, figure_directory )
|
plot_tasks.append( ( plot_xiaoqu.generate_puncture_orbit_plot, (binary_results_directory, figure_directory) ) )
|
||||||
plot_xiaoqu.generate_puncture_orbit_plot3D( binary_results_directory, figure_directory )
|
plot_tasks.append( ( plot_xiaoqu.generate_puncture_orbit_plot3D, (binary_results_directory, figure_directory) ) )
|
||||||
|
|
||||||
## Plot black hole separation vs. time
|
## Plot black hole separation vs. time
|
||||||
plot_xiaoqu.generate_puncture_distence_plot( binary_results_directory, figure_directory )
|
plot_tasks.append( ( plot_xiaoqu.generate_puncture_distence_plot, (binary_results_directory, figure_directory) ) )
|
||||||
|
|
||||||
## Plot gravitational waveforms (psi4 and strain amplitude)
|
## Plot gravitational waveforms (psi4 and strain amplitude)
|
||||||
for i in range(input_data.Detector_Number):
|
for i in range(input_data.Detector_Number):
|
||||||
plot_xiaoqu.generate_gravitational_wave_psi4_plot( binary_results_directory, figure_directory, i )
|
plot_tasks.append( ( plot_xiaoqu.generate_gravitational_wave_psi4_plot, (binary_results_directory, figure_directory, i) ) )
|
||||||
plot_GW_strain_amplitude_xiaoqu.generate_gravitational_wave_amplitude_plot( binary_results_directory, figure_directory, i )
|
plot_tasks.append( ( plot_GW_strain_amplitude_xiaoqu.generate_gravitational_wave_amplitude_plot, (binary_results_directory, figure_directory, i) ) )
|
||||||
|
|
||||||
## Plot ADM mass evolution
|
## Plot ADM mass evolution
|
||||||
for i in range(input_data.Detector_Number):
|
for i in range(input_data.Detector_Number):
|
||||||
plot_xiaoqu.generate_ADMmass_plot( binary_results_directory, figure_directory, i )
|
plot_tasks.append( ( plot_xiaoqu.generate_ADMmass_plot, (binary_results_directory, figure_directory, i) ) )
|
||||||
|
|
||||||
## Plot Hamiltonian constraint violation over time
|
## Plot Hamiltonian constraint violation over time
|
||||||
for i in range(input_data.grid_level):
|
for i in range(input_data.grid_level):
|
||||||
plot_xiaoqu.generate_constraint_check_plot( binary_results_directory, figure_directory, i )
|
plot_tasks.append( ( plot_xiaoqu.generate_constraint_check_plot, (binary_results_directory, figure_directory, i) ) )
|
||||||
|
|
||||||
|
run_plot_tasks_parallel(plot_tasks)
|
||||||
|
|
||||||
## Plot stored binary data
|
## Plot stored binary data
|
||||||
plot_xiaoqu.generate_binary_data_plot( binary_results_directory, figure_directory )
|
plot_xiaoqu.generate_binary_data_plot( binary_results_directory, figure_directory )
|
||||||
|
|||||||
279
AMSS_NCKU_Verify_ASC26.py
Normal file
279
AMSS_NCKU_Verify_ASC26.py
Normal file
@@ -0,0 +1,279 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
AMSS-NCKU GW150914 Simulation Regression Test Script
|
||||||
|
|
||||||
|
Verification Requirements:
|
||||||
|
1. XY-plane trajectory RMS error < 1% (Optimized vs. baseline, max of BH1 and BH2)
|
||||||
|
2. ADM constraint violation < 2 (Grid Level 0)
|
||||||
|
|
||||||
|
RMS Calculation Method:
|
||||||
|
- Computes trajectory deviation on the XY plane independently for BH1 and BH2
|
||||||
|
- For each black hole: RMS = sqrt((1/M) * sum((Δr_i / r_i^max)^2)) × 100%
|
||||||
|
- Final RMS = max(RMS_BH1, RMS_BH2)
|
||||||
|
|
||||||
|
Usage: python3 AMSS_NCKU_Verify_ASC26.py [output_dir]
|
||||||
|
Default: output_dir = GW150914/AMSS_NCKU_output
|
||||||
|
Reference: GW150914-origin (baseline simulation)
|
||||||
|
"""
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
|
||||||
|
# ANSI Color Codes
|
||||||
|
class Color:
|
||||||
|
GREEN = '\033[92m'
|
||||||
|
RED = '\033[91m'
|
||||||
|
YELLOW = '\033[93m'
|
||||||
|
BLUE = '\033[94m'
|
||||||
|
BOLD = '\033[1m'
|
||||||
|
RESET = '\033[0m'
|
||||||
|
|
||||||
|
def get_status_text(passed):
|
||||||
|
if passed:
|
||||||
|
return f"{Color.GREEN}{Color.BOLD}PASS{Color.RESET}"
|
||||||
|
else:
|
||||||
|
return f"{Color.RED}{Color.BOLD}FAIL{Color.RESET}"
|
||||||
|
|
||||||
|
def load_bh_trajectory(filepath):
|
||||||
|
"""Load black hole trajectory data"""
|
||||||
|
data = np.loadtxt(filepath)
|
||||||
|
return {
|
||||||
|
'time': data[:, 0],
|
||||||
|
'x1': data[:, 1], 'y1': data[:, 2], 'z1': data[:, 3],
|
||||||
|
'x2': data[:, 4], 'y2': data[:, 5], 'z2': data[:, 6]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def load_constraint_data(filepath):
|
||||||
|
"""Load constraint violation data"""
|
||||||
|
data = []
|
||||||
|
with open(filepath, 'r') as f:
|
||||||
|
for line in f:
|
||||||
|
if line.startswith('#'):
|
||||||
|
continue
|
||||||
|
parts = line.split()
|
||||||
|
if len(parts) >= 8:
|
||||||
|
data.append([float(x) for x in parts[:8]])
|
||||||
|
return np.array(data)
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_rms_error(bh_data_ref, bh_data_target):
|
||||||
|
"""
|
||||||
|
Calculate trajectory-based RMS error on the XY plane between baseline and optimized simulations.
|
||||||
|
|
||||||
|
This function computes the RMS error independently for BH1 and BH2 trajectories,
|
||||||
|
then returns the maximum of the two as the final RMS error metric.
|
||||||
|
|
||||||
|
For each black hole, the RMS is calculated as:
|
||||||
|
RMS = sqrt( (1/M) * sum( (Δr_i / r_i^max)^2 ) ) × 100%
|
||||||
|
|
||||||
|
where:
|
||||||
|
Δr_i = sqrt((x_ref,i - x_new,i)^2 + (y_ref,i - y_new,i)^2)
|
||||||
|
r_i^max = max(sqrt(x_ref,i^2 + y_ref,i^2), sqrt(x_new,i^2 + y_new,i^2))
|
||||||
|
|
||||||
|
Args:
|
||||||
|
bh_data_ref: Reference (baseline) trajectory data
|
||||||
|
bh_data_target: Target (optimized) trajectory data
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
rms_value: Final RMS error as a percentage (max of BH1 and BH2)
|
||||||
|
error: Error message if any
|
||||||
|
"""
|
||||||
|
# Align data: truncate to the length of the shorter dataset
|
||||||
|
M = min(len(bh_data_ref['time']), len(bh_data_target['time']))
|
||||||
|
|
||||||
|
if M < 10:
|
||||||
|
return None, "Insufficient data points for comparison"
|
||||||
|
|
||||||
|
# Extract XY coordinates for both black holes
|
||||||
|
x1_ref = bh_data_ref['x1'][:M]
|
||||||
|
y1_ref = bh_data_ref['y1'][:M]
|
||||||
|
x2_ref = bh_data_ref['x2'][:M]
|
||||||
|
y2_ref = bh_data_ref['y2'][:M]
|
||||||
|
|
||||||
|
x1_new = bh_data_target['x1'][:M]
|
||||||
|
y1_new = bh_data_target['y1'][:M]
|
||||||
|
x2_new = bh_data_target['x2'][:M]
|
||||||
|
y2_new = bh_data_target['y2'][:M]
|
||||||
|
|
||||||
|
# Calculate RMS for BH1
|
||||||
|
delta_r1 = np.sqrt((x1_ref - x1_new)**2 + (y1_ref - y1_new)**2)
|
||||||
|
r1_ref = np.sqrt(x1_ref**2 + y1_ref**2)
|
||||||
|
r1_new = np.sqrt(x1_new**2 + y1_new**2)
|
||||||
|
r1_max = np.maximum(r1_ref, r1_new)
|
||||||
|
|
||||||
|
# Calculate RMS for BH2
|
||||||
|
delta_r2 = np.sqrt((x2_ref - x2_new)**2 + (y2_ref - y2_new)**2)
|
||||||
|
r2_ref = np.sqrt(x2_ref**2 + y2_ref**2)
|
||||||
|
r2_new = np.sqrt(x2_new**2 + y2_new**2)
|
||||||
|
r2_max = np.maximum(r2_ref, r2_new)
|
||||||
|
|
||||||
|
# Avoid division by zero for BH1
|
||||||
|
valid_mask1 = r1_max > 1e-15
|
||||||
|
if np.sum(valid_mask1) < 10:
|
||||||
|
return None, "Insufficient valid data points for BH1"
|
||||||
|
|
||||||
|
terms1 = (delta_r1[valid_mask1] / r1_max[valid_mask1])**2
|
||||||
|
rms_bh1 = np.sqrt(np.mean(terms1)) * 100
|
||||||
|
|
||||||
|
# Avoid division by zero for BH2
|
||||||
|
valid_mask2 = r2_max > 1e-15
|
||||||
|
if np.sum(valid_mask2) < 10:
|
||||||
|
return None, "Insufficient valid data points for BH2"
|
||||||
|
|
||||||
|
terms2 = (delta_r2[valid_mask2] / r2_max[valid_mask2])**2
|
||||||
|
rms_bh2 = np.sqrt(np.mean(terms2)) * 100
|
||||||
|
|
||||||
|
# Final RMS is the maximum of BH1 and BH2
|
||||||
|
rms_final = max(rms_bh1, rms_bh2)
|
||||||
|
|
||||||
|
return rms_final, None
|
||||||
|
|
||||||
|
|
||||||
|
def analyze_constraint_violation(constraint_data, n_levels=9):
|
||||||
|
"""
|
||||||
|
Analyze ADM constraint violation
|
||||||
|
Return maximum constraint violation for Grid Level 0
|
||||||
|
"""
|
||||||
|
# Extract Grid Level 0 data (first entry for each time step)
|
||||||
|
level0_data = constraint_data[::n_levels]
|
||||||
|
|
||||||
|
# Calculate maximum absolute value for each constraint
|
||||||
|
results = {
|
||||||
|
'Ham': np.max(np.abs(level0_data[:, 1])),
|
||||||
|
'Px': np.max(np.abs(level0_data[:, 2])),
|
||||||
|
'Py': np.max(np.abs(level0_data[:, 3])),
|
||||||
|
'Pz': np.max(np.abs(level0_data[:, 4])),
|
||||||
|
'Gx': np.max(np.abs(level0_data[:, 5])),
|
||||||
|
'Gy': np.max(np.abs(level0_data[:, 6])),
|
||||||
|
'Gz': np.max(np.abs(level0_data[:, 7]))
|
||||||
|
}
|
||||||
|
|
||||||
|
results['max_violation'] = max(results.values())
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def print_header():
|
||||||
|
"""Print report header"""
|
||||||
|
print("\n" + Color.BLUE + Color.BOLD + "=" * 65 + Color.RESET)
|
||||||
|
print(Color.BOLD + " AMSS-NCKU GW150914 Simulation Regression Test Report" + Color.RESET)
|
||||||
|
print(Color.BLUE + Color.BOLD + "=" * 65 + Color.RESET)
|
||||||
|
|
||||||
|
|
||||||
|
def print_rms_results(rms_rel, error, threshold=1.0):
|
||||||
|
"""Print RMS error results"""
|
||||||
|
print(f"\n{Color.BOLD}1. RMS Error Analysis (Baseline vs Optimized){Color.RESET}")
|
||||||
|
print("-" * 45)
|
||||||
|
|
||||||
|
if error:
|
||||||
|
print(f" {Color.RED}Error: {error}{Color.RESET}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
passed = rms_rel < threshold
|
||||||
|
|
||||||
|
print(f" RMS relative error: {rms_rel:.4f}%")
|
||||||
|
print(f" Requirement: < {threshold}%")
|
||||||
|
print(f" Status: {get_status_text(passed)}")
|
||||||
|
|
||||||
|
return passed
|
||||||
|
|
||||||
|
|
||||||
|
def print_constraint_results(results, threshold=2.0):
|
||||||
|
"""Print constraint violation results"""
|
||||||
|
print(f"\n{Color.BOLD}2. ADM Constraint Violation Analysis (Grid Level 0){Color.RESET}")
|
||||||
|
print("-" * 45)
|
||||||
|
|
||||||
|
names = ['Ham', 'Px', 'Py', 'Pz', 'Gx', 'Gy', 'Gz']
|
||||||
|
for i, name in enumerate(names):
|
||||||
|
print(f" Max |{name:3}|: {results[name]:.6f}", end=" ")
|
||||||
|
if (i + 1) % 2 == 0: print()
|
||||||
|
if len(names) % 2 != 0: print()
|
||||||
|
|
||||||
|
passed = results['max_violation'] < threshold
|
||||||
|
|
||||||
|
print(f"\n Maximum violation: {results['max_violation']:.6f}")
|
||||||
|
print(f" Requirement: < {threshold}")
|
||||||
|
print(f" Status: {get_status_text(passed)}")
|
||||||
|
|
||||||
|
return passed
|
||||||
|
|
||||||
|
|
||||||
|
def print_summary(rms_passed, constraint_passed):
|
||||||
|
"""Print summary"""
|
||||||
|
print("\n" + Color.BLUE + Color.BOLD + "=" * 65 + Color.RESET)
|
||||||
|
print(Color.BOLD + "Verification Summary" + Color.RESET)
|
||||||
|
print(Color.BLUE + Color.BOLD + "=" * 65 + Color.RESET)
|
||||||
|
|
||||||
|
all_passed = rms_passed and constraint_passed
|
||||||
|
|
||||||
|
res_rms = get_status_text(rms_passed)
|
||||||
|
res_con = get_status_text(constraint_passed)
|
||||||
|
|
||||||
|
print(f" [1] RMS trajectory check: {res_rms}")
|
||||||
|
print(f" [2] ADM constraint check: {res_con}")
|
||||||
|
|
||||||
|
final_status = f"{Color.GREEN}{Color.BOLD}ALL CHECKS PASSED{Color.RESET}" if all_passed else f"{Color.RED}{Color.BOLD}SOME CHECKS FAILED{Color.RESET}"
|
||||||
|
print(f"\n Overall result: {final_status}")
|
||||||
|
print(Color.BLUE + Color.BOLD + "=" * 65 + Color.RESET + "\n")
|
||||||
|
|
||||||
|
return all_passed
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
# Determine target (optimized) output directory
|
||||||
|
if len(sys.argv) > 1:
|
||||||
|
target_dir = sys.argv[1]
|
||||||
|
else:
|
||||||
|
script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
target_dir = os.path.join(script_dir, "GW150914/AMSS_NCKU_output")
|
||||||
|
|
||||||
|
# Determine reference (baseline) directory
|
||||||
|
script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
reference_dir = os.path.join(script_dir, "GW150914-origin/AMSS_NCKU_output")
|
||||||
|
|
||||||
|
# Data file paths
|
||||||
|
bh_file_ref = os.path.join(reference_dir, "bssn_BH.dat")
|
||||||
|
bh_file_target = os.path.join(target_dir, "bssn_BH.dat")
|
||||||
|
constraint_file = os.path.join(target_dir, "bssn_constraint.dat")
|
||||||
|
|
||||||
|
# Check if files exist
|
||||||
|
if not os.path.exists(bh_file_ref):
|
||||||
|
print(f"{Color.RED}{Color.BOLD}Error:{Color.RESET} Baseline trajectory file not found: {bh_file_ref}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
if not os.path.exists(bh_file_target):
|
||||||
|
print(f"{Color.RED}{Color.BOLD}Error:{Color.RESET} Target trajectory file not found: {bh_file_target}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
if not os.path.exists(constraint_file):
|
||||||
|
print(f"{Color.RED}{Color.BOLD}Error:{Color.RESET} Constraint data file not found: {constraint_file}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Print header
|
||||||
|
print_header()
|
||||||
|
print(f"\n{Color.BOLD}Reference (Baseline):{Color.RESET} {Color.BLUE}{reference_dir}{Color.RESET}")
|
||||||
|
print(f"{Color.BOLD}Target (Optimized): {Color.RESET} {Color.BLUE}{target_dir}{Color.RESET}")
|
||||||
|
|
||||||
|
# Load data
|
||||||
|
bh_data_ref = load_bh_trajectory(bh_file_ref)
|
||||||
|
bh_data_target = load_bh_trajectory(bh_file_target)
|
||||||
|
constraint_data = load_constraint_data(constraint_file)
|
||||||
|
|
||||||
|
# Calculate RMS error
|
||||||
|
rms_rel, error = calculate_rms_error(bh_data_ref, bh_data_target)
|
||||||
|
rms_passed = print_rms_results(rms_rel, error)
|
||||||
|
|
||||||
|
# Analyze constraint violation
|
||||||
|
constraint_results = analyze_constraint_violation(constraint_data)
|
||||||
|
constraint_passed = print_constraint_results(constraint_results)
|
||||||
|
|
||||||
|
# Print summary
|
||||||
|
all_passed = print_summary(rms_passed, constraint_passed)
|
||||||
|
|
||||||
|
# Return exit code
|
||||||
|
sys.exit(0 if all_passed else 1)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
@@ -37,57 +37,51 @@ close(77)
|
|||||||
end program checkFFT
|
end program checkFFT
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
!-------------
|
||||||
|
! Optimized FFT using Intel oneMKL DFTI
|
||||||
|
! Mathematical equivalence: Standard DFT definition
|
||||||
|
! Forward (isign=1): X[k] = sum_{n=0}^{N-1} x[n] * exp(-2*pi*i*k*n/N)
|
||||||
|
! Backward (isign=-1): X[k] = sum_{n=0}^{N-1} x[n] * exp(+2*pi*i*k*n/N)
|
||||||
|
! Input/Output: dataa is interleaved complex array [Re(0),Im(0),Re(1),Im(1),...]
|
||||||
!-------------
|
!-------------
|
||||||
SUBROUTINE four1(dataa,nn,isign)
|
SUBROUTINE four1(dataa,nn,isign)
|
||||||
|
use MKL_DFTI
|
||||||
implicit none
|
implicit none
|
||||||
INTEGER::isign,nn
|
INTEGER, intent(in) :: isign, nn
|
||||||
double precision,dimension(2*nn)::dataa
|
DOUBLE PRECISION, dimension(2*nn), intent(inout) :: dataa
|
||||||
INTEGER::i,istep,j,m,mmax,n
|
|
||||||
double precision::tempi,tempr
|
type(DFTI_DESCRIPTOR), pointer :: desc
|
||||||
DOUBLE PRECISION::theta,wi,wpi,wpr,wr,wtemp
|
integer :: status
|
||||||
n=2*nn
|
|
||||||
j=1
|
! Create DFTI descriptor for 1D complex-to-complex transform
|
||||||
do i=1,n,2
|
status = DftiCreateDescriptor(desc, DFTI_DOUBLE, DFTI_COMPLEX, 1, nn)
|
||||||
if(j.gt.i)then
|
if (status /= 0) return
|
||||||
tempr=dataa(j)
|
|
||||||
tempi=dataa(j+1)
|
! Set input/output storage as interleaved complex (default)
|
||||||
dataa(j)=dataa(i)
|
status = DftiSetValue(desc, DFTI_PLACEMENT, DFTI_INPLACE)
|
||||||
dataa(j+1)=dataa(i+1)
|
if (status /= 0) then
|
||||||
dataa(i)=tempr
|
status = DftiFreeDescriptor(desc)
|
||||||
dataa(i+1)=tempi
|
return
|
||||||
endif
|
|
||||||
m=nn
|
|
||||||
1 if ((m.ge.2).and.(j.gt.m)) then
|
|
||||||
j=j-m
|
|
||||||
m=m/2
|
|
||||||
goto 1
|
|
||||||
endif
|
|
||||||
j=j+m
|
|
||||||
enddo
|
|
||||||
mmax=2
|
|
||||||
2 if (n.gt.mmax) then
|
|
||||||
istep=2*mmax
|
|
||||||
theta=6.28318530717959d0/(isign*mmax)
|
|
||||||
wpr=-2.d0*sin(0.5d0*theta)**2
|
|
||||||
wpi=sin(theta)
|
|
||||||
wr=1.d0
|
|
||||||
wi=0.d0
|
|
||||||
do m=1,mmax,2
|
|
||||||
do i=m,n,istep
|
|
||||||
j=i+mmax
|
|
||||||
tempr=sngl(wr)*dataa(j)-sngl(wi)*dataa(j+1)
|
|
||||||
tempi=sngl(wr)*dataa(j+1)+sngl(wi)*dataa(j)
|
|
||||||
dataa(j)=dataa(i)-tempr
|
|
||||||
dataa(j+1)=dataa(i+1)-tempi
|
|
||||||
dataa(i)=dataa(i)+tempr
|
|
||||||
dataa(i+1)=dataa(i+1)+tempi
|
|
||||||
enddo
|
|
||||||
wtemp=wr
|
|
||||||
wr=wr*wpr-wi*wpi+wr
|
|
||||||
wi=wi*wpr+wtemp*wpi+wi
|
|
||||||
enddo
|
|
||||||
mmax=istep
|
|
||||||
goto 2
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
! Commit the descriptor
|
||||||
|
status = DftiCommitDescriptor(desc)
|
||||||
|
if (status /= 0) then
|
||||||
|
status = DftiFreeDescriptor(desc)
|
||||||
|
return
|
||||||
|
endif
|
||||||
|
|
||||||
|
! Execute FFT based on direction
|
||||||
|
if (isign == 1) then
|
||||||
|
! Forward FFT: exp(-2*pi*i*k*n/N)
|
||||||
|
status = DftiComputeForward(desc, dataa)
|
||||||
|
else
|
||||||
|
! Backward FFT: exp(+2*pi*i*k*n/N)
|
||||||
|
status = DftiComputeBackward(desc, dataa)
|
||||||
|
endif
|
||||||
|
|
||||||
|
! Free descriptor
|
||||||
|
status = DftiFreeDescriptor(desc)
|
||||||
|
|
||||||
return
|
return
|
||||||
END SUBROUTINE four1
|
END SUBROUTINE four1
|
||||||
|
|||||||
@@ -341,8 +341,9 @@ void Patch::Interp_Points(MyList<var> *VarList,
|
|||||||
double *Shellf, int Symmetry)
|
double *Shellf, int Symmetry)
|
||||||
{
|
{
|
||||||
// NOTE: we do not Synchnize variables here, make sure of that before calling this routine
|
// NOTE: we do not Synchnize variables here, make sure of that before calling this routine
|
||||||
int myrank;
|
int myrank, nprocs;
|
||||||
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
|
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
|
||||||
|
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
|
||||||
|
|
||||||
int ordn = 2 * ghost_width;
|
int ordn = 2 * ghost_width;
|
||||||
MyList<var> *varl;
|
MyList<var> *varl;
|
||||||
@@ -354,24 +355,18 @@ void Patch::Interp_Points(MyList<var> *VarList,
|
|||||||
varl = varl->next;
|
varl = varl->next;
|
||||||
}
|
}
|
||||||
|
|
||||||
double *shellf;
|
memset(Shellf, 0, sizeof(double) * NN * num_var);
|
||||||
shellf = new double[NN * num_var];
|
|
||||||
memset(shellf, 0, sizeof(double) * NN * num_var);
|
|
||||||
|
|
||||||
// we use weight to monitor code, later some day we can move it for optimization
|
// owner_rank[j] records which MPI rank owns point j
|
||||||
int *weight;
|
// All ranks traverse the same block list so they all agree on ownership
|
||||||
weight = new int[NN];
|
int *owner_rank;
|
||||||
memset(weight, 0, sizeof(int) * NN);
|
owner_rank = new int[NN];
|
||||||
|
for (int j = 0; j < NN; j++)
|
||||||
double *DH, *llb, *uub;
|
owner_rank[j] = -1;
|
||||||
DH = new double[dim];
|
|
||||||
|
|
||||||
|
double DH[dim], llb[dim], uub[dim];
|
||||||
for (int i = 0; i < dim; i++)
|
for (int i = 0; i < dim; i++)
|
||||||
{
|
|
||||||
DH[i] = getdX(i);
|
DH[i] = getdX(i);
|
||||||
}
|
|
||||||
llb = new double[dim];
|
|
||||||
uub = new double[dim];
|
|
||||||
|
|
||||||
for (int j = 0; j < NN; j++) // run along points
|
for (int j = 0; j < NN; j++) // run along points
|
||||||
{
|
{
|
||||||
@@ -403,12 +398,6 @@ void Patch::Interp_Points(MyList<var> *VarList,
|
|||||||
bool flag = true;
|
bool flag = true;
|
||||||
for (int i = 0; i < dim; i++)
|
for (int i = 0; i < dim; i++)
|
||||||
{
|
{
|
||||||
// NOTE: our dividing structure is (exclude ghost)
|
|
||||||
// -1 0
|
|
||||||
// 1 2
|
|
||||||
// so (0,1) does not belong to any part for vertex structure
|
|
||||||
// here we put (0,0.5) to left part and (0.5,1) to right part
|
|
||||||
// BUT for cell structure the bbox is (-1.5,0.5) and (0.5,2.5), there is no missing region at all
|
|
||||||
#ifdef Vertex
|
#ifdef Vertex
|
||||||
#ifdef Cell
|
#ifdef Cell
|
||||||
#error Both Cell and Vertex are defined
|
#error Both Cell and Vertex are defined
|
||||||
@@ -433,6 +422,7 @@ void Patch::Interp_Points(MyList<var> *VarList,
|
|||||||
if (flag)
|
if (flag)
|
||||||
{
|
{
|
||||||
notfind = false;
|
notfind = false;
|
||||||
|
owner_rank[j] = BP->rank;
|
||||||
if (myrank == BP->rank)
|
if (myrank == BP->rank)
|
||||||
{
|
{
|
||||||
//---> interpolation
|
//---> interpolation
|
||||||
@@ -440,14 +430,11 @@ void Patch::Interp_Points(MyList<var> *VarList,
|
|||||||
int k = 0;
|
int k = 0;
|
||||||
while (varl) // run along variables
|
while (varl) // run along variables
|
||||||
{
|
{
|
||||||
// shellf[j*num_var+k] = Parallel::global_interp(dim,BP->shape,BP->X,BP->fgfs[varl->data->sgfn],
|
f_global_interp(BP->shape, BP->X[0], BP->X[1], BP->X[2], BP->fgfs[varl->data->sgfn], Shellf[j * num_var + k],
|
||||||
// pox,ordn,varl->data->SoA,Symmetry);
|
|
||||||
f_global_interp(BP->shape, BP->X[0], BP->X[1], BP->X[2], BP->fgfs[varl->data->sgfn], shellf[j * num_var + k],
|
|
||||||
pox[0], pox[1], pox[2], ordn, varl->data->SoA, Symmetry);
|
pox[0], pox[1], pox[2], ordn, varl->data->SoA, Symmetry);
|
||||||
varl = varl->next;
|
varl = varl->next;
|
||||||
k++;
|
k++;
|
||||||
}
|
}
|
||||||
weight[j] = 1;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (Bp == ble)
|
if (Bp == ble)
|
||||||
@@ -456,103 +443,327 @@ void Patch::Interp_Points(MyList<var> *VarList,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
MPI_Allreduce(shellf, Shellf, NN * num_var, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
// Replace MPI_Allreduce with per-owner MPI_Bcast:
|
||||||
int *Weight;
|
// Group consecutive points by owner rank and broadcast each group.
|
||||||
Weight = new int[NN];
|
// Since each point's data is non-zero only on the owner rank,
|
||||||
MPI_Allreduce(weight, Weight, NN, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
// Bcast from owner is equivalent to Allreduce(MPI_SUM) but much cheaper.
|
||||||
|
|
||||||
// misc::tillherecheck("print me");
|
|
||||||
|
|
||||||
for (int i = 0; i < NN; i++)
|
|
||||||
{
|
{
|
||||||
if (Weight[i] > 1)
|
int j = 0;
|
||||||
|
while (j < NN)
|
||||||
{
|
{
|
||||||
if (myrank == 0)
|
int cur_owner = owner_rank[j];
|
||||||
cout << "WARNING: Patch::Interp_Points meets multiple weight" << endl;
|
if (cur_owner < 0)
|
||||||
for (int j = 0; j < num_var; j++)
|
{
|
||||||
Shellf[j + i * num_var] = Shellf[j + i * num_var] / Weight[i];
|
if (myrank == 0)
|
||||||
|
{
|
||||||
|
cout << "ERROR: Patch::Interp_Points fails to find point (";
|
||||||
|
for (int d = 0; d < dim; d++)
|
||||||
|
{
|
||||||
|
cout << XX[d][j];
|
||||||
|
if (d < dim - 1)
|
||||||
|
cout << ",";
|
||||||
|
else
|
||||||
|
cout << ")";
|
||||||
|
}
|
||||||
|
cout << " on Patch (";
|
||||||
|
for (int d = 0; d < dim; d++)
|
||||||
|
{
|
||||||
|
cout << bbox[d] << "+" << lli[d] * DH[d];
|
||||||
|
if (d < dim - 1)
|
||||||
|
cout << ",";
|
||||||
|
else
|
||||||
|
cout << ")--";
|
||||||
|
}
|
||||||
|
cout << "(";
|
||||||
|
for (int d = 0; d < dim; d++)
|
||||||
|
{
|
||||||
|
cout << bbox[dim + d] << "-" << uui[d] * DH[d];
|
||||||
|
if (d < dim - 1)
|
||||||
|
cout << ",";
|
||||||
|
else
|
||||||
|
cout << ")" << endl;
|
||||||
|
}
|
||||||
|
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||||
|
}
|
||||||
|
j++;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
// Find contiguous run of points with the same owner
|
||||||
|
int jstart = j;
|
||||||
|
while (j < NN && owner_rank[j] == cur_owner)
|
||||||
|
j++;
|
||||||
|
int count = (j - jstart) * num_var;
|
||||||
|
MPI_Bcast(Shellf + jstart * num_var, count, MPI_DOUBLE, cur_owner, MPI_COMM_WORLD);
|
||||||
}
|
}
|
||||||
else if (Weight[i] == 0 && myrank == 0)
|
}
|
||||||
|
|
||||||
|
delete[] owner_rank;
|
||||||
|
}
|
||||||
|
void Patch::Interp_Points(MyList<var> *VarList,
|
||||||
|
int NN, double **XX,
|
||||||
|
double *Shellf, int Symmetry,
|
||||||
|
int Nmin_consumer, int Nmax_consumer)
|
||||||
|
{
|
||||||
|
// Targeted point-to-point overload: each owner sends each point only to
|
||||||
|
// the one rank that needs it for integration (consumer), reducing
|
||||||
|
// communication volume by ~nprocs times compared to the Bcast version.
|
||||||
|
int myrank, nprocs;
|
||||||
|
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
|
||||||
|
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
|
||||||
|
|
||||||
|
int ordn = 2 * ghost_width;
|
||||||
|
MyList<var> *varl;
|
||||||
|
int num_var = 0;
|
||||||
|
varl = VarList;
|
||||||
|
while (varl)
|
||||||
|
{
|
||||||
|
num_var++;
|
||||||
|
varl = varl->next;
|
||||||
|
}
|
||||||
|
|
||||||
|
memset(Shellf, 0, sizeof(double) * NN * num_var);
|
||||||
|
|
||||||
|
// owner_rank[j] records which MPI rank owns point j
|
||||||
|
int *owner_rank;
|
||||||
|
owner_rank = new int[NN];
|
||||||
|
for (int j = 0; j < NN; j++)
|
||||||
|
owner_rank[j] = -1;
|
||||||
|
|
||||||
|
double DH[dim], llb[dim], uub[dim];
|
||||||
|
for (int i = 0; i < dim; i++)
|
||||||
|
DH[i] = getdX(i);
|
||||||
|
|
||||||
|
// --- Interpolation phase (identical to original) ---
|
||||||
|
for (int j = 0; j < NN; j++)
|
||||||
|
{
|
||||||
|
double pox[dim];
|
||||||
|
for (int i = 0; i < dim; i++)
|
||||||
|
{
|
||||||
|
pox[i] = XX[i][j];
|
||||||
|
if (myrank == 0 && (XX[i][j] < bbox[i] + lli[i] * DH[i] || XX[i][j] > bbox[dim + i] - uui[i] * DH[i]))
|
||||||
|
{
|
||||||
|
cout << "Patch::Interp_Points: point (";
|
||||||
|
for (int k = 0; k < dim; k++)
|
||||||
|
{
|
||||||
|
cout << XX[k][j];
|
||||||
|
if (k < dim - 1)
|
||||||
|
cout << ",";
|
||||||
|
else
|
||||||
|
cout << ") is out of current Patch." << endl;
|
||||||
|
}
|
||||||
|
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
MyList<Block> *Bp = blb;
|
||||||
|
bool notfind = true;
|
||||||
|
while (notfind && Bp)
|
||||||
|
{
|
||||||
|
Block *BP = Bp->data;
|
||||||
|
|
||||||
|
bool flag = true;
|
||||||
|
for (int i = 0; i < dim; i++)
|
||||||
|
{
|
||||||
|
#ifdef Vertex
|
||||||
|
#ifdef Cell
|
||||||
|
#error Both Cell and Vertex are defined
|
||||||
|
#endif
|
||||||
|
llb[i] = (feq(BP->bbox[i], bbox[i], DH[i] / 2)) ? BP->bbox[i] + lli[i] * DH[i] : BP->bbox[i] + (ghost_width - 0.5) * DH[i];
|
||||||
|
uub[i] = (feq(BP->bbox[dim + i], bbox[dim + i], DH[i] / 2)) ? BP->bbox[dim + i] - uui[i] * DH[i] : BP->bbox[dim + i] - (ghost_width - 0.5) * DH[i];
|
||||||
|
#else
|
||||||
|
#ifdef Cell
|
||||||
|
llb[i] = (feq(BP->bbox[i], bbox[i], DH[i] / 2)) ? BP->bbox[i] + lli[i] * DH[i] : BP->bbox[i] + ghost_width * DH[i];
|
||||||
|
uub[i] = (feq(BP->bbox[dim + i], bbox[dim + i], DH[i] / 2)) ? BP->bbox[dim + i] - uui[i] * DH[i] : BP->bbox[dim + i] - ghost_width * DH[i];
|
||||||
|
#else
|
||||||
|
#error Not define Vertex nor Cell
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
if (XX[i][j] - llb[i] < -DH[i] / 2 || XX[i][j] - uub[i] > DH[i] / 2)
|
||||||
|
{
|
||||||
|
flag = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (flag)
|
||||||
|
{
|
||||||
|
notfind = false;
|
||||||
|
owner_rank[j] = BP->rank;
|
||||||
|
if (myrank == BP->rank)
|
||||||
|
{
|
||||||
|
varl = VarList;
|
||||||
|
int k = 0;
|
||||||
|
while (varl)
|
||||||
|
{
|
||||||
|
f_global_interp(BP->shape, BP->X[0], BP->X[1], BP->X[2], BP->fgfs[varl->data->sgfn], Shellf[j * num_var + k],
|
||||||
|
pox[0], pox[1], pox[2], ordn, varl->data->SoA, Symmetry);
|
||||||
|
varl = varl->next;
|
||||||
|
k++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (Bp == ble)
|
||||||
|
break;
|
||||||
|
Bp = Bp->next;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- Error check for unfound points ---
|
||||||
|
for (int j = 0; j < NN; j++)
|
||||||
|
{
|
||||||
|
if (owner_rank[j] < 0 && myrank == 0)
|
||||||
{
|
{
|
||||||
cout << "ERROR: Patch::Interp_Points fails to find point (";
|
cout << "ERROR: Patch::Interp_Points fails to find point (";
|
||||||
for (int j = 0; j < dim; j++)
|
for (int d = 0; d < dim; d++)
|
||||||
{
|
{
|
||||||
cout << XX[j][i];
|
cout << XX[d][j];
|
||||||
if (j < dim - 1)
|
if (d < dim - 1)
|
||||||
cout << ",";
|
cout << ",";
|
||||||
else
|
else
|
||||||
cout << ")";
|
cout << ")";
|
||||||
}
|
}
|
||||||
cout << " on Patch (";
|
cout << " on Patch (";
|
||||||
for (int j = 0; j < dim; j++)
|
for (int d = 0; d < dim; d++)
|
||||||
{
|
{
|
||||||
cout << bbox[j] << "+" << lli[j] * getdX(j);
|
cout << bbox[d] << "+" << lli[d] * DH[d];
|
||||||
if (j < dim - 1)
|
if (d < dim - 1)
|
||||||
cout << ",";
|
cout << ",";
|
||||||
else
|
else
|
||||||
cout << ")--";
|
cout << ")--";
|
||||||
}
|
}
|
||||||
cout << "(";
|
cout << "(";
|
||||||
for (int j = 0; j < dim; j++)
|
for (int d = 0; d < dim; d++)
|
||||||
{
|
{
|
||||||
cout << bbox[dim + j] << "-" << uui[j] * getdX(j);
|
cout << bbox[dim + d] << "-" << uui[d] * DH[d];
|
||||||
if (j < dim - 1)
|
if (d < dim - 1)
|
||||||
cout << ",";
|
cout << ",";
|
||||||
else
|
else
|
||||||
cout << ")" << endl;
|
cout << ")" << endl;
|
||||||
}
|
}
|
||||||
#if 0
|
|
||||||
checkBlock();
|
|
||||||
#else
|
|
||||||
cout << "splited domains:" << endl;
|
|
||||||
{
|
|
||||||
MyList<Block> *Bp = blb;
|
|
||||||
while (Bp)
|
|
||||||
{
|
|
||||||
Block *BP = Bp->data;
|
|
||||||
|
|
||||||
for (int i = 0; i < dim; i++)
|
|
||||||
{
|
|
||||||
#ifdef Vertex
|
|
||||||
#ifdef Cell
|
|
||||||
#error Both Cell and Vertex are defined
|
|
||||||
#endif
|
|
||||||
llb[i] = (feq(BP->bbox[i], bbox[i], DH[i] / 2)) ? BP->bbox[i] + lli[i] * DH[i] : BP->bbox[i] + (ghost_width - 0.5) * DH[i];
|
|
||||||
uub[i] = (feq(BP->bbox[dim + i], bbox[dim + i], DH[i] / 2)) ? BP->bbox[dim + i] - uui[i] * DH[i] : BP->bbox[dim + i] - (ghost_width - 0.5) * DH[i];
|
|
||||||
#else
|
|
||||||
#ifdef Cell
|
|
||||||
llb[i] = (feq(BP->bbox[i], bbox[i], DH[i] / 2)) ? BP->bbox[i] + lli[i] * DH[i] : BP->bbox[i] + ghost_width * DH[i];
|
|
||||||
uub[i] = (feq(BP->bbox[dim + i], bbox[dim + i], DH[i] / 2)) ? BP->bbox[dim + i] - uui[i] * DH[i] : BP->bbox[dim + i] - ghost_width * DH[i];
|
|
||||||
#else
|
|
||||||
#error Not define Vertex nor Cell
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
cout << "(";
|
|
||||||
for (int j = 0; j < dim; j++)
|
|
||||||
{
|
|
||||||
cout << llb[j] << ":" << uub[j];
|
|
||||||
if (j < dim - 1)
|
|
||||||
cout << ",";
|
|
||||||
else
|
|
||||||
cout << ")" << endl;
|
|
||||||
}
|
|
||||||
if (Bp == ble)
|
|
||||||
break;
|
|
||||||
Bp = Bp->next;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
delete[] shellf;
|
// --- Targeted point-to-point communication phase ---
|
||||||
delete[] weight;
|
// Compute consumer_rank[j] using the same deterministic formula as surface_integral
|
||||||
delete[] Weight;
|
int *consumer_rank = new int[NN];
|
||||||
delete[] DH;
|
{
|
||||||
delete[] llb;
|
int mp = NN / nprocs;
|
||||||
delete[] uub;
|
int Lp = NN - nprocs * mp;
|
||||||
|
for (int j = 0; j < NN; j++)
|
||||||
|
{
|
||||||
|
if (j < Lp * (mp + 1))
|
||||||
|
consumer_rank[j] = j / (mp + 1);
|
||||||
|
else
|
||||||
|
consumer_rank[j] = Lp + (j - Lp * (mp + 1)) / mp;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count sends and recvs per rank
|
||||||
|
int *send_count = new int[nprocs];
|
||||||
|
int *recv_count = new int[nprocs];
|
||||||
|
memset(send_count, 0, sizeof(int) * nprocs);
|
||||||
|
memset(recv_count, 0, sizeof(int) * nprocs);
|
||||||
|
|
||||||
|
for (int j = 0; j < NN; j++)
|
||||||
|
{
|
||||||
|
int own = owner_rank[j];
|
||||||
|
int con = consumer_rank[j];
|
||||||
|
if (own == con)
|
||||||
|
continue; // local — no communication needed
|
||||||
|
if (own == myrank)
|
||||||
|
send_count[con]++;
|
||||||
|
if (con == myrank)
|
||||||
|
recv_count[own]++;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build send buffers: for each destination rank, pack (index, data) pairs
|
||||||
|
// Each entry: 1 int (point index j) + num_var doubles
|
||||||
|
int total_send = 0, total_recv = 0;
|
||||||
|
int *send_offset = new int[nprocs];
|
||||||
|
int *recv_offset = new int[nprocs];
|
||||||
|
for (int r = 0; r < nprocs; r++)
|
||||||
|
{
|
||||||
|
send_offset[r] = total_send;
|
||||||
|
total_send += send_count[r];
|
||||||
|
recv_offset[r] = total_recv;
|
||||||
|
total_recv += recv_count[r];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pack send buffers: each message contains (j, data[0..num_var-1]) per point
|
||||||
|
int stride = 1 + num_var; // 1 double for index + num_var doubles for data
|
||||||
|
double *sendbuf = new double[total_send * stride];
|
||||||
|
double *recvbuf = new double[total_recv * stride];
|
||||||
|
|
||||||
|
// Temporary counters for packing
|
||||||
|
int *pack_pos = new int[nprocs];
|
||||||
|
memset(pack_pos, 0, sizeof(int) * nprocs);
|
||||||
|
|
||||||
|
for (int j = 0; j < NN; j++)
|
||||||
|
{
|
||||||
|
int own = owner_rank[j];
|
||||||
|
int con = consumer_rank[j];
|
||||||
|
if (own != myrank || con == myrank)
|
||||||
|
continue;
|
||||||
|
int pos = (send_offset[con] + pack_pos[con]) * stride;
|
||||||
|
sendbuf[pos] = (double)j; // point index
|
||||||
|
for (int v = 0; v < num_var; v++)
|
||||||
|
sendbuf[pos + 1 + v] = Shellf[j * num_var + v];
|
||||||
|
pack_pos[con]++;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Post non-blocking recvs and sends
|
||||||
|
int n_req = 0;
|
||||||
|
for (int r = 0; r < nprocs; r++)
|
||||||
|
{
|
||||||
|
if (recv_count[r] > 0) n_req++;
|
||||||
|
if (send_count[r] > 0) n_req++;
|
||||||
|
}
|
||||||
|
|
||||||
|
MPI_Request *reqs = new MPI_Request[n_req];
|
||||||
|
int req_idx = 0;
|
||||||
|
|
||||||
|
for (int r = 0; r < nprocs; r++)
|
||||||
|
{
|
||||||
|
if (recv_count[r] > 0)
|
||||||
|
{
|
||||||
|
MPI_Irecv(recvbuf + recv_offset[r] * stride,
|
||||||
|
recv_count[r] * stride, MPI_DOUBLE,
|
||||||
|
r, 0, MPI_COMM_WORLD, &reqs[req_idx++]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (int r = 0; r < nprocs; r++)
|
||||||
|
{
|
||||||
|
if (send_count[r] > 0)
|
||||||
|
{
|
||||||
|
MPI_Isend(sendbuf + send_offset[r] * stride,
|
||||||
|
send_count[r] * stride, MPI_DOUBLE,
|
||||||
|
r, 0, MPI_COMM_WORLD, &reqs[req_idx++]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (n_req > 0)
|
||||||
|
MPI_Waitall(n_req, reqs, MPI_STATUSES_IGNORE);
|
||||||
|
|
||||||
|
// Unpack recv buffers into Shellf
|
||||||
|
for (int i = 0; i < total_recv; i++)
|
||||||
|
{
|
||||||
|
int pos = i * stride;
|
||||||
|
int j = (int)recvbuf[pos];
|
||||||
|
for (int v = 0; v < num_var; v++)
|
||||||
|
Shellf[j * num_var + v] = recvbuf[pos + 1 + v];
|
||||||
|
}
|
||||||
|
|
||||||
|
delete[] reqs;
|
||||||
|
delete[] sendbuf;
|
||||||
|
delete[] recvbuf;
|
||||||
|
delete[] pack_pos;
|
||||||
|
delete[] send_offset;
|
||||||
|
delete[] recv_offset;
|
||||||
|
delete[] send_count;
|
||||||
|
delete[] recv_count;
|
||||||
|
delete[] consumer_rank;
|
||||||
|
delete[] owner_rank;
|
||||||
}
|
}
|
||||||
void Patch::Interp_Points(MyList<var> *VarList,
|
void Patch::Interp_Points(MyList<var> *VarList,
|
||||||
int NN, double **XX,
|
int NN, double **XX,
|
||||||
@@ -573,24 +784,22 @@ void Patch::Interp_Points(MyList<var> *VarList,
|
|||||||
varl = varl->next;
|
varl = varl->next;
|
||||||
}
|
}
|
||||||
|
|
||||||
double *shellf;
|
memset(Shellf, 0, sizeof(double) * NN * num_var);
|
||||||
shellf = new double[NN * num_var];
|
|
||||||
memset(shellf, 0, sizeof(double) * NN * num_var);
|
|
||||||
|
|
||||||
// we use weight to monitor code, later some day we can move it for optimization
|
// owner_rank[j] stores the global rank that owns point j
|
||||||
int *weight;
|
int *owner_rank;
|
||||||
weight = new int[NN];
|
owner_rank = new int[NN];
|
||||||
memset(weight, 0, sizeof(int) * NN);
|
for (int j = 0; j < NN; j++)
|
||||||
|
owner_rank[j] = -1;
|
||||||
|
|
||||||
double *DH, *llb, *uub;
|
// Build global-to-local rank translation for Comm_here
|
||||||
DH = new double[dim];
|
MPI_Group world_group, local_group;
|
||||||
|
MPI_Comm_group(MPI_COMM_WORLD, &world_group);
|
||||||
|
MPI_Comm_group(Comm_here, &local_group);
|
||||||
|
|
||||||
|
double DH[dim], llb[dim], uub[dim];
|
||||||
for (int i = 0; i < dim; i++)
|
for (int i = 0; i < dim; i++)
|
||||||
{
|
|
||||||
DH[i] = getdX(i);
|
DH[i] = getdX(i);
|
||||||
}
|
|
||||||
llb = new double[dim];
|
|
||||||
uub = new double[dim];
|
|
||||||
|
|
||||||
for (int j = 0; j < NN; j++) // run along points
|
for (int j = 0; j < NN; j++) // run along points
|
||||||
{
|
{
|
||||||
@@ -622,12 +831,6 @@ void Patch::Interp_Points(MyList<var> *VarList,
|
|||||||
bool flag = true;
|
bool flag = true;
|
||||||
for (int i = 0; i < dim; i++)
|
for (int i = 0; i < dim; i++)
|
||||||
{
|
{
|
||||||
// NOTE: our dividing structure is (exclude ghost)
|
|
||||||
// -1 0
|
|
||||||
// 1 2
|
|
||||||
// so (0,1) does not belong to any part for vertex structure
|
|
||||||
// here we put (0,0.5) to left part and (0.5,1) to right part
|
|
||||||
// BUT for cell structure the bbox is (-1.5,0.5) and (0.5,2.5), there is no missing region at all
|
|
||||||
#ifdef Vertex
|
#ifdef Vertex
|
||||||
#ifdef Cell
|
#ifdef Cell
|
||||||
#error Both Cell and Vertex are defined
|
#error Both Cell and Vertex are defined
|
||||||
@@ -652,6 +855,7 @@ void Patch::Interp_Points(MyList<var> *VarList,
|
|||||||
if (flag)
|
if (flag)
|
||||||
{
|
{
|
||||||
notfind = false;
|
notfind = false;
|
||||||
|
owner_rank[j] = BP->rank;
|
||||||
if (myrank == BP->rank)
|
if (myrank == BP->rank)
|
||||||
{
|
{
|
||||||
//---> interpolation
|
//---> interpolation
|
||||||
@@ -659,14 +863,11 @@ void Patch::Interp_Points(MyList<var> *VarList,
|
|||||||
int k = 0;
|
int k = 0;
|
||||||
while (varl) // run along variables
|
while (varl) // run along variables
|
||||||
{
|
{
|
||||||
// shellf[j*num_var+k] = Parallel::global_interp(dim,BP->shape,BP->X,BP->fgfs[varl->data->sgfn],
|
f_global_interp(BP->shape, BP->X[0], BP->X[1], BP->X[2], BP->fgfs[varl->data->sgfn], Shellf[j * num_var + k],
|
||||||
// pox,ordn,varl->data->SoA,Symmetry);
|
|
||||||
f_global_interp(BP->shape, BP->X[0], BP->X[1], BP->X[2], BP->fgfs[varl->data->sgfn], shellf[j * num_var + k],
|
|
||||||
pox[0], pox[1], pox[2], ordn, varl->data->SoA, Symmetry);
|
pox[0], pox[1], pox[2], ordn, varl->data->SoA, Symmetry);
|
||||||
varl = varl->next;
|
varl = varl->next;
|
||||||
k++;
|
k++;
|
||||||
}
|
}
|
||||||
weight[j] = 1;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (Bp == ble)
|
if (Bp == ble)
|
||||||
@@ -675,97 +876,35 @@ void Patch::Interp_Points(MyList<var> *VarList,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
MPI_Allreduce(shellf, Shellf, NN * num_var, MPI_DOUBLE, MPI_SUM, Comm_here);
|
// Collect unique global owner ranks and translate to local ranks in Comm_here
|
||||||
int *Weight;
|
// Then broadcast each owner's points via MPI_Bcast on Comm_here
|
||||||
Weight = new int[NN];
|
|
||||||
MPI_Allreduce(weight, Weight, NN, MPI_INT, MPI_SUM, Comm_here);
|
|
||||||
|
|
||||||
// misc::tillherecheck("print me");
|
|
||||||
// if(lmyrank == 0) cout<<"myrank = "<<myrank<<"print me"<<endl;
|
|
||||||
|
|
||||||
for (int i = 0; i < NN; i++)
|
|
||||||
{
|
{
|
||||||
if (Weight[i] > 1)
|
int j = 0;
|
||||||
|
while (j < NN)
|
||||||
{
|
{
|
||||||
if (lmyrank == 0)
|
int cur_owner_global = owner_rank[j];
|
||||||
cout << "WARNING: Patch::Interp_Points meets multiple weight" << endl;
|
if (cur_owner_global < 0)
|
||||||
for (int j = 0; j < num_var; j++)
|
{
|
||||||
Shellf[j + i * num_var] = Shellf[j + i * num_var] / Weight[i];
|
// Point not found — skip (error check disabled for sub-communicator levels)
|
||||||
|
j++;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
// Translate global rank to local rank in Comm_here
|
||||||
|
int cur_owner_local;
|
||||||
|
MPI_Group_translate_ranks(world_group, 1, &cur_owner_global, local_group, &cur_owner_local);
|
||||||
|
|
||||||
|
// Find contiguous run of points with the same owner
|
||||||
|
int jstart = j;
|
||||||
|
while (j < NN && owner_rank[j] == cur_owner_global)
|
||||||
|
j++;
|
||||||
|
int count = (j - jstart) * num_var;
|
||||||
|
MPI_Bcast(Shellf + jstart * num_var, count, MPI_DOUBLE, cur_owner_local, Comm_here);
|
||||||
}
|
}
|
||||||
#if 0 // for not involved levels, this may fail
|
|
||||||
else if(Weight[i] == 0 && lmyrank == 0)
|
|
||||||
{
|
|
||||||
cout<<"ERROR: Patch::Interp_Points fails to find point (";
|
|
||||||
for(int j=0;j<dim;j++)
|
|
||||||
{
|
|
||||||
cout<<XX[j][i];
|
|
||||||
if(j<dim-1) cout<<",";
|
|
||||||
else cout<<")";
|
|
||||||
}
|
|
||||||
cout<<" on Patch (";
|
|
||||||
for(int j=0;j<dim;j++)
|
|
||||||
{
|
|
||||||
cout<<bbox[j]<<"+"<<lli[j]*getdX(j);
|
|
||||||
if(j<dim-1) cout<<",";
|
|
||||||
else cout<<")--";
|
|
||||||
}
|
|
||||||
cout<<"(";
|
|
||||||
for(int j=0;j<dim;j++)
|
|
||||||
{
|
|
||||||
cout<<bbox[dim+j]<<"-"<<uui[j]*getdX(j);
|
|
||||||
if(j<dim-1) cout<<",";
|
|
||||||
else cout<<")"<<endl;
|
|
||||||
}
|
|
||||||
#if 0
|
|
||||||
checkBlock();
|
|
||||||
#else
|
|
||||||
cout<<"splited domains:"<<endl;
|
|
||||||
{
|
|
||||||
MyList<Block> *Bp=blb;
|
|
||||||
while(Bp)
|
|
||||||
{
|
|
||||||
Block *BP=Bp->data;
|
|
||||||
|
|
||||||
for(int i=0;i<dim;i++)
|
|
||||||
{
|
|
||||||
#ifdef Vertex
|
|
||||||
#ifdef Cell
|
|
||||||
#error Both Cell and Vertex are defined
|
|
||||||
#endif
|
|
||||||
llb[i] = (feq(BP->bbox[i] ,bbox[i] ,DH[i]/2)) ? BP->bbox[i]+lli[i]*DH[i] : BP->bbox[i] +(ghost_width-0.5)*DH[i];
|
|
||||||
uub[i] = (feq(BP->bbox[dim+i],bbox[dim+i],DH[i]/2)) ? BP->bbox[dim+i]-uui[i]*DH[i] : BP->bbox[dim+i]-(ghost_width-0.5)*DH[i];
|
|
||||||
#else
|
|
||||||
#ifdef Cell
|
|
||||||
llb[i] = (feq(BP->bbox[i] ,bbox[i] ,DH[i]/2)) ? BP->bbox[i]+lli[i]*DH[i] : BP->bbox[i] +ghost_width*DH[i];
|
|
||||||
uub[i] = (feq(BP->bbox[dim+i],bbox[dim+i],DH[i]/2)) ? BP->bbox[dim+i]-uui[i]*DH[i] : BP->bbox[dim+i]-ghost_width*DH[i];
|
|
||||||
#else
|
|
||||||
#error Not define Vertex nor Cell
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
cout<<"(";
|
|
||||||
for(int j=0;j<dim;j++)
|
|
||||||
{
|
|
||||||
cout<<llb[j]<<":"<<uub[j];
|
|
||||||
if(j<dim-1) cout<<",";
|
|
||||||
else cout<<")"<<endl;
|
|
||||||
}
|
|
||||||
if(Bp == ble) break;
|
|
||||||
Bp=Bp->next;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
MPI_Abort(MPI_COMM_WORLD,1);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
delete[] shellf;
|
MPI_Group_free(&world_group);
|
||||||
delete[] weight;
|
MPI_Group_free(&local_group);
|
||||||
delete[] Weight;
|
delete[] owner_rank;
|
||||||
delete[] DH;
|
|
||||||
delete[] llb;
|
|
||||||
delete[] uub;
|
|
||||||
}
|
}
|
||||||
void Patch::checkBlock()
|
void Patch::checkBlock()
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -39,6 +39,10 @@ public:
|
|||||||
|
|
||||||
bool Find_Point(double *XX);
|
bool Find_Point(double *XX);
|
||||||
|
|
||||||
|
void Interp_Points(MyList<var> *VarList,
|
||||||
|
int NN, double **XX,
|
||||||
|
double *Shellf, int Symmetry,
|
||||||
|
int Nmin_consumer, int Nmax_consumer);
|
||||||
void Interp_Points(MyList<var> *VarList,
|
void Interp_Points(MyList<var> *VarList,
|
||||||
int NN, double **XX,
|
int NN, double **XX,
|
||||||
double *Shellf, int Symmetry, MPI_Comm Comm_here);
|
double *Shellf, int Symmetry, MPI_Comm Comm_here);
|
||||||
|
|||||||
@@ -3756,6 +3756,502 @@ void Parallel::Sync(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry)
|
|||||||
delete[] transfer_src;
|
delete[] transfer_src;
|
||||||
delete[] transfer_dst;
|
delete[] transfer_dst;
|
||||||
}
|
}
|
||||||
|
// Merged Sync: collect all intra-patch and inter-patch grid segment lists,
|
||||||
|
// then issue a single transfer() call instead of N+1 separate ones.
|
||||||
|
void Parallel::Sync_merged(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry)
|
||||||
|
{
|
||||||
|
int cpusize;
|
||||||
|
MPI_Comm_size(MPI_COMM_WORLD, &cpusize);
|
||||||
|
|
||||||
|
MyList<Parallel::gridseg> **combined_src = new MyList<Parallel::gridseg> *[cpusize];
|
||||||
|
MyList<Parallel::gridseg> **combined_dst = new MyList<Parallel::gridseg> *[cpusize];
|
||||||
|
for (int node = 0; node < cpusize; node++)
|
||||||
|
combined_src[node] = combined_dst[node] = 0;
|
||||||
|
|
||||||
|
// Phase A: Intra-patch ghost exchange segments
|
||||||
|
MyList<Patch> *Pp = PatL;
|
||||||
|
while (Pp)
|
||||||
|
{
|
||||||
|
Patch *Pat = Pp->data;
|
||||||
|
MyList<Parallel::gridseg> *dst_ghost = build_ghost_gsl(Pat);
|
||||||
|
|
||||||
|
for (int node = 0; node < cpusize; node++)
|
||||||
|
{
|
||||||
|
MyList<Parallel::gridseg> *src_owned = build_owned_gsl0(Pat, node);
|
||||||
|
MyList<Parallel::gridseg> *tsrc = 0, *tdst = 0;
|
||||||
|
build_gstl(src_owned, dst_ghost, &tsrc, &tdst);
|
||||||
|
|
||||||
|
if (tsrc)
|
||||||
|
{
|
||||||
|
if (combined_src[node])
|
||||||
|
combined_src[node]->catList(tsrc);
|
||||||
|
else
|
||||||
|
combined_src[node] = tsrc;
|
||||||
|
}
|
||||||
|
if (tdst)
|
||||||
|
{
|
||||||
|
if (combined_dst[node])
|
||||||
|
combined_dst[node]->catList(tdst);
|
||||||
|
else
|
||||||
|
combined_dst[node] = tdst;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (src_owned)
|
||||||
|
src_owned->destroyList();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (dst_ghost)
|
||||||
|
dst_ghost->destroyList();
|
||||||
|
|
||||||
|
Pp = Pp->next;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Phase B: Inter-patch buffer exchange segments
|
||||||
|
MyList<Parallel::gridseg> *dst_buffer = build_buffer_gsl(PatL);
|
||||||
|
for (int node = 0; node < cpusize; node++)
|
||||||
|
{
|
||||||
|
MyList<Parallel::gridseg> *src_owned = build_owned_gsl(PatL, node, 5, Symmetry);
|
||||||
|
MyList<Parallel::gridseg> *tsrc = 0, *tdst = 0;
|
||||||
|
build_gstl(src_owned, dst_buffer, &tsrc, &tdst);
|
||||||
|
|
||||||
|
if (tsrc)
|
||||||
|
{
|
||||||
|
if (combined_src[node])
|
||||||
|
combined_src[node]->catList(tsrc);
|
||||||
|
else
|
||||||
|
combined_src[node] = tsrc;
|
||||||
|
}
|
||||||
|
if (tdst)
|
||||||
|
{
|
||||||
|
if (combined_dst[node])
|
||||||
|
combined_dst[node]->catList(tdst);
|
||||||
|
else
|
||||||
|
combined_dst[node] = tdst;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (src_owned)
|
||||||
|
src_owned->destroyList();
|
||||||
|
}
|
||||||
|
if (dst_buffer)
|
||||||
|
dst_buffer->destroyList();
|
||||||
|
|
||||||
|
// Phase C: Single transfer
|
||||||
|
transfer(combined_src, combined_dst, VarList, VarList, Symmetry);
|
||||||
|
|
||||||
|
// Phase D: Cleanup
|
||||||
|
for (int node = 0; node < cpusize; node++)
|
||||||
|
{
|
||||||
|
if (combined_src[node])
|
||||||
|
combined_src[node]->destroyList();
|
||||||
|
if (combined_dst[node])
|
||||||
|
combined_dst[node]->destroyList();
|
||||||
|
}
|
||||||
|
delete[] combined_src;
|
||||||
|
delete[] combined_dst;
|
||||||
|
}
|
||||||
|
// SyncCache constructor
|
||||||
|
Parallel::SyncCache::SyncCache()
|
||||||
|
: valid(false), cpusize(0), combined_src(0), combined_dst(0),
|
||||||
|
send_lengths(0), recv_lengths(0), send_bufs(0), recv_bufs(0),
|
||||||
|
send_buf_caps(0), recv_buf_caps(0), reqs(0), stats(0), max_reqs(0),
|
||||||
|
lengths_valid(false)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
// SyncCache invalidate: free grid segment lists but keep buffers
|
||||||
|
void Parallel::SyncCache::invalidate()
|
||||||
|
{
|
||||||
|
if (!valid)
|
||||||
|
return;
|
||||||
|
for (int i = 0; i < cpusize; i++)
|
||||||
|
{
|
||||||
|
if (combined_src[i])
|
||||||
|
combined_src[i]->destroyList();
|
||||||
|
if (combined_dst[i])
|
||||||
|
combined_dst[i]->destroyList();
|
||||||
|
combined_src[i] = combined_dst[i] = 0;
|
||||||
|
send_lengths[i] = recv_lengths[i] = 0;
|
||||||
|
}
|
||||||
|
valid = false;
|
||||||
|
lengths_valid = false;
|
||||||
|
}
|
||||||
|
// SyncCache destroy: free everything
|
||||||
|
void Parallel::SyncCache::destroy()
|
||||||
|
{
|
||||||
|
invalidate();
|
||||||
|
if (combined_src) delete[] combined_src;
|
||||||
|
if (combined_dst) delete[] combined_dst;
|
||||||
|
if (send_lengths) delete[] send_lengths;
|
||||||
|
if (recv_lengths) delete[] recv_lengths;
|
||||||
|
if (send_buf_caps) delete[] send_buf_caps;
|
||||||
|
if (recv_buf_caps) delete[] recv_buf_caps;
|
||||||
|
for (int i = 0; i < cpusize; i++)
|
||||||
|
{
|
||||||
|
if (send_bufs && send_bufs[i]) delete[] send_bufs[i];
|
||||||
|
if (recv_bufs && recv_bufs[i]) delete[] recv_bufs[i];
|
||||||
|
}
|
||||||
|
if (send_bufs) delete[] send_bufs;
|
||||||
|
if (recv_bufs) delete[] recv_bufs;
|
||||||
|
if (reqs) delete[] reqs;
|
||||||
|
if (stats) delete[] stats;
|
||||||
|
combined_src = combined_dst = 0;
|
||||||
|
send_lengths = recv_lengths = 0;
|
||||||
|
send_buf_caps = recv_buf_caps = 0;
|
||||||
|
send_bufs = recv_bufs = 0;
|
||||||
|
reqs = 0; stats = 0;
|
||||||
|
cpusize = 0; max_reqs = 0;
|
||||||
|
}
|
||||||
|
// transfer_cached: reuse pre-allocated buffers from SyncCache
|
||||||
|
void Parallel::transfer_cached(MyList<Parallel::gridseg> **src, MyList<Parallel::gridseg> **dst,
|
||||||
|
MyList<var> *VarList1, MyList<var> *VarList2,
|
||||||
|
int Symmetry, SyncCache &cache)
|
||||||
|
{
|
||||||
|
int myrank;
|
||||||
|
MPI_Comm_size(MPI_COMM_WORLD, &cache.cpusize);
|
||||||
|
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
|
||||||
|
int cpusize = cache.cpusize;
|
||||||
|
|
||||||
|
int req_no = 0;
|
||||||
|
int node;
|
||||||
|
|
||||||
|
for (node = 0; node < cpusize; node++)
|
||||||
|
{
|
||||||
|
if (node == myrank)
|
||||||
|
{
|
||||||
|
int length = data_packer(0, src[myrank], dst[myrank], node, PACK, VarList1, VarList2, Symmetry);
|
||||||
|
cache.recv_lengths[node] = length;
|
||||||
|
if (length > 0)
|
||||||
|
{
|
||||||
|
if (length > cache.recv_buf_caps[node])
|
||||||
|
{
|
||||||
|
if (cache.recv_bufs[node]) delete[] cache.recv_bufs[node];
|
||||||
|
cache.recv_bufs[node] = new double[length];
|
||||||
|
cache.recv_buf_caps[node] = length;
|
||||||
|
}
|
||||||
|
data_packer(cache.recv_bufs[node], src[myrank], dst[myrank], node, PACK, VarList1, VarList2, Symmetry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// send
|
||||||
|
int slength = data_packer(0, src[myrank], dst[myrank], node, PACK, VarList1, VarList2, Symmetry);
|
||||||
|
cache.send_lengths[node] = slength;
|
||||||
|
if (slength > 0)
|
||||||
|
{
|
||||||
|
if (slength > cache.send_buf_caps[node])
|
||||||
|
{
|
||||||
|
if (cache.send_bufs[node]) delete[] cache.send_bufs[node];
|
||||||
|
cache.send_bufs[node] = new double[slength];
|
||||||
|
cache.send_buf_caps[node] = slength;
|
||||||
|
}
|
||||||
|
data_packer(cache.send_bufs[node], src[myrank], dst[myrank], node, PACK, VarList1, VarList2, Symmetry);
|
||||||
|
MPI_Isend((void *)cache.send_bufs[node], slength, MPI_DOUBLE, node, 1, MPI_COMM_WORLD, cache.reqs + req_no++);
|
||||||
|
}
|
||||||
|
// recv
|
||||||
|
int rlength = data_packer(0, src[node], dst[node], node, UNPACK, VarList1, VarList2, Symmetry);
|
||||||
|
cache.recv_lengths[node] = rlength;
|
||||||
|
if (rlength > 0)
|
||||||
|
{
|
||||||
|
if (rlength > cache.recv_buf_caps[node])
|
||||||
|
{
|
||||||
|
if (cache.recv_bufs[node]) delete[] cache.recv_bufs[node];
|
||||||
|
cache.recv_bufs[node] = new double[rlength];
|
||||||
|
cache.recv_buf_caps[node] = rlength;
|
||||||
|
}
|
||||||
|
MPI_Irecv((void *)cache.recv_bufs[node], rlength, MPI_DOUBLE, node, 1, MPI_COMM_WORLD, cache.reqs + req_no++);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
MPI_Waitall(req_no, cache.reqs, cache.stats);
|
||||||
|
|
||||||
|
for (node = 0; node < cpusize; node++)
|
||||||
|
if (cache.recv_bufs[node] && cache.recv_lengths[node] > 0)
|
||||||
|
data_packer(cache.recv_bufs[node], src[node], dst[node], node, UNPACK, VarList1, VarList2, Symmetry);
|
||||||
|
}
|
||||||
|
// Sync_cached: build grid segment lists on first call, reuse on subsequent calls
|
||||||
|
void Parallel::Sync_cached(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry, SyncCache &cache)
|
||||||
|
{
|
||||||
|
if (!cache.valid)
|
||||||
|
{
|
||||||
|
int cpusize;
|
||||||
|
MPI_Comm_size(MPI_COMM_WORLD, &cpusize);
|
||||||
|
cache.cpusize = cpusize;
|
||||||
|
|
||||||
|
// Allocate cache arrays if needed
|
||||||
|
if (!cache.combined_src)
|
||||||
|
{
|
||||||
|
cache.combined_src = new MyList<Parallel::gridseg> *[cpusize];
|
||||||
|
cache.combined_dst = new MyList<Parallel::gridseg> *[cpusize];
|
||||||
|
cache.send_lengths = new int[cpusize];
|
||||||
|
cache.recv_lengths = new int[cpusize];
|
||||||
|
cache.send_bufs = new double *[cpusize];
|
||||||
|
cache.recv_bufs = new double *[cpusize];
|
||||||
|
cache.send_buf_caps = new int[cpusize];
|
||||||
|
cache.recv_buf_caps = new int[cpusize];
|
||||||
|
for (int i = 0; i < cpusize; i++)
|
||||||
|
{
|
||||||
|
cache.send_bufs[i] = cache.recv_bufs[i] = 0;
|
||||||
|
cache.send_buf_caps[i] = cache.recv_buf_caps[i] = 0;
|
||||||
|
}
|
||||||
|
cache.max_reqs = 2 * cpusize;
|
||||||
|
cache.reqs = new MPI_Request[cache.max_reqs];
|
||||||
|
cache.stats = new MPI_Status[cache.max_reqs];
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int node = 0; node < cpusize; node++)
|
||||||
|
{
|
||||||
|
cache.combined_src[node] = cache.combined_dst[node] = 0;
|
||||||
|
cache.send_lengths[node] = cache.recv_lengths[node] = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build intra-patch segments (same as Sync_merged Phase A)
|
||||||
|
MyList<Patch> *Pp = PatL;
|
||||||
|
while (Pp)
|
||||||
|
{
|
||||||
|
Patch *Pat = Pp->data;
|
||||||
|
MyList<Parallel::gridseg> *dst_ghost = build_ghost_gsl(Pat);
|
||||||
|
for (int node = 0; node < cpusize; node++)
|
||||||
|
{
|
||||||
|
MyList<Parallel::gridseg> *src_owned = build_owned_gsl0(Pat, node);
|
||||||
|
MyList<Parallel::gridseg> *tsrc = 0, *tdst = 0;
|
||||||
|
build_gstl(src_owned, dst_ghost, &tsrc, &tdst);
|
||||||
|
if (tsrc)
|
||||||
|
{
|
||||||
|
if (cache.combined_src[node])
|
||||||
|
cache.combined_src[node]->catList(tsrc);
|
||||||
|
else
|
||||||
|
cache.combined_src[node] = tsrc;
|
||||||
|
}
|
||||||
|
if (tdst)
|
||||||
|
{
|
||||||
|
if (cache.combined_dst[node])
|
||||||
|
cache.combined_dst[node]->catList(tdst);
|
||||||
|
else
|
||||||
|
cache.combined_dst[node] = tdst;
|
||||||
|
}
|
||||||
|
if (src_owned) src_owned->destroyList();
|
||||||
|
}
|
||||||
|
if (dst_ghost) dst_ghost->destroyList();
|
||||||
|
Pp = Pp->next;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build inter-patch segments (same as Sync_merged Phase B)
|
||||||
|
MyList<Parallel::gridseg> *dst_buffer = build_buffer_gsl(PatL);
|
||||||
|
for (int node = 0; node < cpusize; node++)
|
||||||
|
{
|
||||||
|
MyList<Parallel::gridseg> *src_owned = build_owned_gsl(PatL, node, 5, Symmetry);
|
||||||
|
MyList<Parallel::gridseg> *tsrc = 0, *tdst = 0;
|
||||||
|
build_gstl(src_owned, dst_buffer, &tsrc, &tdst);
|
||||||
|
if (tsrc)
|
||||||
|
{
|
||||||
|
if (cache.combined_src[node])
|
||||||
|
cache.combined_src[node]->catList(tsrc);
|
||||||
|
else
|
||||||
|
cache.combined_src[node] = tsrc;
|
||||||
|
}
|
||||||
|
if (tdst)
|
||||||
|
{
|
||||||
|
if (cache.combined_dst[node])
|
||||||
|
cache.combined_dst[node]->catList(tdst);
|
||||||
|
else
|
||||||
|
cache.combined_dst[node] = tdst;
|
||||||
|
}
|
||||||
|
if (src_owned) src_owned->destroyList();
|
||||||
|
}
|
||||||
|
if (dst_buffer) dst_buffer->destroyList();
|
||||||
|
|
||||||
|
cache.valid = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use cached lists with buffer-reusing transfer
|
||||||
|
transfer_cached(cache.combined_src, cache.combined_dst, VarList, VarList, Symmetry, cache);
|
||||||
|
}
|
||||||
|
// Sync_start: pack and post MPI_Isend/Irecv, return immediately
|
||||||
|
void Parallel::Sync_start(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry,
|
||||||
|
SyncCache &cache, AsyncSyncState &state)
|
||||||
|
{
|
||||||
|
// Ensure cache is built
|
||||||
|
if (!cache.valid)
|
||||||
|
{
|
||||||
|
// Build cache (same logic as Sync_cached)
|
||||||
|
int cpusize;
|
||||||
|
MPI_Comm_size(MPI_COMM_WORLD, &cpusize);
|
||||||
|
cache.cpusize = cpusize;
|
||||||
|
|
||||||
|
if (!cache.combined_src)
|
||||||
|
{
|
||||||
|
cache.combined_src = new MyList<Parallel::gridseg> *[cpusize];
|
||||||
|
cache.combined_dst = new MyList<Parallel::gridseg> *[cpusize];
|
||||||
|
cache.send_lengths = new int[cpusize];
|
||||||
|
cache.recv_lengths = new int[cpusize];
|
||||||
|
cache.send_bufs = new double *[cpusize];
|
||||||
|
cache.recv_bufs = new double *[cpusize];
|
||||||
|
cache.send_buf_caps = new int[cpusize];
|
||||||
|
cache.recv_buf_caps = new int[cpusize];
|
||||||
|
for (int i = 0; i < cpusize; i++)
|
||||||
|
{
|
||||||
|
cache.send_bufs[i] = cache.recv_bufs[i] = 0;
|
||||||
|
cache.send_buf_caps[i] = cache.recv_buf_caps[i] = 0;
|
||||||
|
}
|
||||||
|
cache.max_reqs = 2 * cpusize;
|
||||||
|
cache.reqs = new MPI_Request[cache.max_reqs];
|
||||||
|
cache.stats = new MPI_Status[cache.max_reqs];
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int node = 0; node < cpusize; node++)
|
||||||
|
{
|
||||||
|
cache.combined_src[node] = cache.combined_dst[node] = 0;
|
||||||
|
cache.send_lengths[node] = cache.recv_lengths[node] = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
MyList<Patch> *Pp = PatL;
|
||||||
|
while (Pp)
|
||||||
|
{
|
||||||
|
Patch *Pat = Pp->data;
|
||||||
|
MyList<Parallel::gridseg> *dst_ghost = build_ghost_gsl(Pat);
|
||||||
|
for (int node = 0; node < cpusize; node++)
|
||||||
|
{
|
||||||
|
MyList<Parallel::gridseg> *src_owned = build_owned_gsl0(Pat, node);
|
||||||
|
MyList<Parallel::gridseg> *tsrc = 0, *tdst = 0;
|
||||||
|
build_gstl(src_owned, dst_ghost, &tsrc, &tdst);
|
||||||
|
if (tsrc)
|
||||||
|
{
|
||||||
|
if (cache.combined_src[node])
|
||||||
|
cache.combined_src[node]->catList(tsrc);
|
||||||
|
else
|
||||||
|
cache.combined_src[node] = tsrc;
|
||||||
|
}
|
||||||
|
if (tdst)
|
||||||
|
{
|
||||||
|
if (cache.combined_dst[node])
|
||||||
|
cache.combined_dst[node]->catList(tdst);
|
||||||
|
else
|
||||||
|
cache.combined_dst[node] = tdst;
|
||||||
|
}
|
||||||
|
if (src_owned) src_owned->destroyList();
|
||||||
|
}
|
||||||
|
if (dst_ghost) dst_ghost->destroyList();
|
||||||
|
Pp = Pp->next;
|
||||||
|
}
|
||||||
|
|
||||||
|
MyList<Parallel::gridseg> *dst_buffer = build_buffer_gsl(PatL);
|
||||||
|
for (int node = 0; node < cpusize; node++)
|
||||||
|
{
|
||||||
|
MyList<Parallel::gridseg> *src_owned = build_owned_gsl(PatL, node, 5, Symmetry);
|
||||||
|
MyList<Parallel::gridseg> *tsrc = 0, *tdst = 0;
|
||||||
|
build_gstl(src_owned, dst_buffer, &tsrc, &tdst);
|
||||||
|
if (tsrc)
|
||||||
|
{
|
||||||
|
if (cache.combined_src[node])
|
||||||
|
cache.combined_src[node]->catList(tsrc);
|
||||||
|
else
|
||||||
|
cache.combined_src[node] = tsrc;
|
||||||
|
}
|
||||||
|
if (tdst)
|
||||||
|
{
|
||||||
|
if (cache.combined_dst[node])
|
||||||
|
cache.combined_dst[node]->catList(tdst);
|
||||||
|
else
|
||||||
|
cache.combined_dst[node] = tdst;
|
||||||
|
}
|
||||||
|
if (src_owned) src_owned->destroyList();
|
||||||
|
}
|
||||||
|
if (dst_buffer) dst_buffer->destroyList();
|
||||||
|
cache.valid = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now pack and post async MPI operations
|
||||||
|
int myrank;
|
||||||
|
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
|
||||||
|
int cpusize = cache.cpusize;
|
||||||
|
state.req_no = 0;
|
||||||
|
state.active = true;
|
||||||
|
|
||||||
|
MyList<Parallel::gridseg> **src = cache.combined_src;
|
||||||
|
MyList<Parallel::gridseg> **dst = cache.combined_dst;
|
||||||
|
|
||||||
|
for (int node = 0; node < cpusize; node++)
|
||||||
|
{
|
||||||
|
if (node == myrank)
|
||||||
|
{
|
||||||
|
int length;
|
||||||
|
if (!cache.lengths_valid) {
|
||||||
|
length = data_packer(0, src[myrank], dst[myrank], node, PACK, VarList, VarList, Symmetry);
|
||||||
|
cache.recv_lengths[node] = length;
|
||||||
|
} else {
|
||||||
|
length = cache.recv_lengths[node];
|
||||||
|
}
|
||||||
|
if (length > 0)
|
||||||
|
{
|
||||||
|
if (length > cache.recv_buf_caps[node])
|
||||||
|
{
|
||||||
|
if (cache.recv_bufs[node]) delete[] cache.recv_bufs[node];
|
||||||
|
cache.recv_bufs[node] = new double[length];
|
||||||
|
cache.recv_buf_caps[node] = length;
|
||||||
|
}
|
||||||
|
data_packer(cache.recv_bufs[node], src[myrank], dst[myrank], node, PACK, VarList, VarList, Symmetry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
int slength;
|
||||||
|
if (!cache.lengths_valid) {
|
||||||
|
slength = data_packer(0, src[myrank], dst[myrank], node, PACK, VarList, VarList, Symmetry);
|
||||||
|
cache.send_lengths[node] = slength;
|
||||||
|
} else {
|
||||||
|
slength = cache.send_lengths[node];
|
||||||
|
}
|
||||||
|
if (slength > 0)
|
||||||
|
{
|
||||||
|
if (slength > cache.send_buf_caps[node])
|
||||||
|
{
|
||||||
|
if (cache.send_bufs[node]) delete[] cache.send_bufs[node];
|
||||||
|
cache.send_bufs[node] = new double[slength];
|
||||||
|
cache.send_buf_caps[node] = slength;
|
||||||
|
}
|
||||||
|
data_packer(cache.send_bufs[node], src[myrank], dst[myrank], node, PACK, VarList, VarList, Symmetry);
|
||||||
|
MPI_Isend((void *)cache.send_bufs[node], slength, MPI_DOUBLE, node, 2, MPI_COMM_WORLD, cache.reqs + state.req_no++);
|
||||||
|
}
|
||||||
|
int rlength;
|
||||||
|
if (!cache.lengths_valid) {
|
||||||
|
rlength = data_packer(0, src[node], dst[node], node, UNPACK, VarList, VarList, Symmetry);
|
||||||
|
cache.recv_lengths[node] = rlength;
|
||||||
|
} else {
|
||||||
|
rlength = cache.recv_lengths[node];
|
||||||
|
}
|
||||||
|
if (rlength > 0)
|
||||||
|
{
|
||||||
|
if (rlength > cache.recv_buf_caps[node])
|
||||||
|
{
|
||||||
|
if (cache.recv_bufs[node]) delete[] cache.recv_bufs[node];
|
||||||
|
cache.recv_bufs[node] = new double[rlength];
|
||||||
|
cache.recv_buf_caps[node] = rlength;
|
||||||
|
}
|
||||||
|
MPI_Irecv((void *)cache.recv_bufs[node], rlength, MPI_DOUBLE, node, 2, MPI_COMM_WORLD, cache.reqs + state.req_no++);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cache.lengths_valid = true;
|
||||||
|
}
|
||||||
|
// Sync_finish: wait for async MPI operations and unpack
|
||||||
|
void Parallel::Sync_finish(SyncCache &cache, AsyncSyncState &state,
|
||||||
|
MyList<var> *VarList, int Symmetry)
|
||||||
|
{
|
||||||
|
if (!state.active)
|
||||||
|
return;
|
||||||
|
|
||||||
|
MPI_Waitall(state.req_no, cache.reqs, cache.stats);
|
||||||
|
|
||||||
|
int cpusize = cache.cpusize;
|
||||||
|
MyList<Parallel::gridseg> **src = cache.combined_src;
|
||||||
|
MyList<Parallel::gridseg> **dst = cache.combined_dst;
|
||||||
|
|
||||||
|
for (int node = 0; node < cpusize; node++)
|
||||||
|
if (cache.recv_bufs[node] && cache.recv_lengths[node] > 0)
|
||||||
|
data_packer(cache.recv_bufs[node], src[node], dst[node], node, UNPACK, VarList, VarList, Symmetry);
|
||||||
|
|
||||||
|
state.active = false;
|
||||||
|
}
|
||||||
// collect buffer grid segments or blocks for the periodic boundary condition of given patch
|
// collect buffer grid segments or blocks for the periodic boundary condition of given patch
|
||||||
// ---------------------------------------------------
|
// ---------------------------------------------------
|
||||||
// |con | |con |
|
// |con | |con |
|
||||||
@@ -4790,6 +5286,203 @@ void Parallel::OutBdLow2Himix(MyList<Patch> *PatcL, MyList<Patch> *PatfL,
|
|||||||
delete[] transfer_src;
|
delete[] transfer_src;
|
||||||
delete[] transfer_dst;
|
delete[] transfer_dst;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Restrict_cached: cache grid segment lists, reuse buffers via transfer_cached
|
||||||
|
void Parallel::Restrict_cached(MyList<Patch> *PatcL, MyList<Patch> *PatfL,
|
||||||
|
MyList<var> *VarList1, MyList<var> *VarList2,
|
||||||
|
int Symmetry, SyncCache &cache)
|
||||||
|
{
|
||||||
|
if (!cache.valid)
|
||||||
|
{
|
||||||
|
int cpusize;
|
||||||
|
MPI_Comm_size(MPI_COMM_WORLD, &cpusize);
|
||||||
|
cache.cpusize = cpusize;
|
||||||
|
|
||||||
|
if (!cache.combined_src)
|
||||||
|
{
|
||||||
|
cache.combined_src = new MyList<Parallel::gridseg> *[cpusize];
|
||||||
|
cache.combined_dst = new MyList<Parallel::gridseg> *[cpusize];
|
||||||
|
cache.send_lengths = new int[cpusize];
|
||||||
|
cache.recv_lengths = new int[cpusize];
|
||||||
|
cache.send_bufs = new double *[cpusize];
|
||||||
|
cache.recv_bufs = new double *[cpusize];
|
||||||
|
cache.send_buf_caps = new int[cpusize];
|
||||||
|
cache.recv_buf_caps = new int[cpusize];
|
||||||
|
for (int i = 0; i < cpusize; i++)
|
||||||
|
{
|
||||||
|
cache.send_bufs[i] = cache.recv_bufs[i] = 0;
|
||||||
|
cache.send_buf_caps[i] = cache.recv_buf_caps[i] = 0;
|
||||||
|
}
|
||||||
|
cache.max_reqs = 2 * cpusize;
|
||||||
|
cache.reqs = new MPI_Request[cache.max_reqs];
|
||||||
|
cache.stats = new MPI_Status[cache.max_reqs];
|
||||||
|
}
|
||||||
|
|
||||||
|
MyList<Parallel::gridseg> *dst = build_complete_gsl(PatcL);
|
||||||
|
for (int node = 0; node < cpusize; node++)
|
||||||
|
{
|
||||||
|
MyList<Parallel::gridseg> *src_owned = build_owned_gsl(PatfL, node, 2, Symmetry);
|
||||||
|
build_gstl(src_owned, dst, &cache.combined_src[node], &cache.combined_dst[node]);
|
||||||
|
if (src_owned) src_owned->destroyList();
|
||||||
|
}
|
||||||
|
if (dst) dst->destroyList();
|
||||||
|
|
||||||
|
cache.valid = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
transfer_cached(cache.combined_src, cache.combined_dst, VarList1, VarList2, Symmetry, cache);
|
||||||
|
}
|
||||||
|
|
||||||
|
// OutBdLow2Hi_cached: cache grid segment lists, reuse buffers via transfer_cached
|
||||||
|
void Parallel::OutBdLow2Hi_cached(MyList<Patch> *PatcL, MyList<Patch> *PatfL,
|
||||||
|
MyList<var> *VarList1, MyList<var> *VarList2,
|
||||||
|
int Symmetry, SyncCache &cache)
|
||||||
|
{
|
||||||
|
if (!cache.valid)
|
||||||
|
{
|
||||||
|
int cpusize;
|
||||||
|
MPI_Comm_size(MPI_COMM_WORLD, &cpusize);
|
||||||
|
cache.cpusize = cpusize;
|
||||||
|
|
||||||
|
if (!cache.combined_src)
|
||||||
|
{
|
||||||
|
cache.combined_src = new MyList<Parallel::gridseg> *[cpusize];
|
||||||
|
cache.combined_dst = new MyList<Parallel::gridseg> *[cpusize];
|
||||||
|
cache.send_lengths = new int[cpusize];
|
||||||
|
cache.recv_lengths = new int[cpusize];
|
||||||
|
cache.send_bufs = new double *[cpusize];
|
||||||
|
cache.recv_bufs = new double *[cpusize];
|
||||||
|
cache.send_buf_caps = new int[cpusize];
|
||||||
|
cache.recv_buf_caps = new int[cpusize];
|
||||||
|
for (int i = 0; i < cpusize; i++)
|
||||||
|
{
|
||||||
|
cache.send_bufs[i] = cache.recv_bufs[i] = 0;
|
||||||
|
cache.send_buf_caps[i] = cache.recv_buf_caps[i] = 0;
|
||||||
|
}
|
||||||
|
cache.max_reqs = 2 * cpusize;
|
||||||
|
cache.reqs = new MPI_Request[cache.max_reqs];
|
||||||
|
cache.stats = new MPI_Status[cache.max_reqs];
|
||||||
|
}
|
||||||
|
|
||||||
|
MyList<Parallel::gridseg> *dst = build_buffer_gsl(PatfL);
|
||||||
|
for (int node = 0; node < cpusize; node++)
|
||||||
|
{
|
||||||
|
MyList<Parallel::gridseg> *src_owned = build_owned_gsl(PatcL, node, 4, Symmetry);
|
||||||
|
build_gstl(src_owned, dst, &cache.combined_src[node], &cache.combined_dst[node]);
|
||||||
|
if (src_owned) src_owned->destroyList();
|
||||||
|
}
|
||||||
|
if (dst) dst->destroyList();
|
||||||
|
|
||||||
|
cache.valid = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
transfer_cached(cache.combined_src, cache.combined_dst, VarList1, VarList2, Symmetry, cache);
|
||||||
|
}
|
||||||
|
|
||||||
|
// OutBdLow2Himix_cached: same as OutBdLow2Hi_cached but uses transfermix for unpacking
|
||||||
|
void Parallel::OutBdLow2Himix_cached(MyList<Patch> *PatcL, MyList<Patch> *PatfL,
|
||||||
|
MyList<var> *VarList1, MyList<var> *VarList2,
|
||||||
|
int Symmetry, SyncCache &cache)
|
||||||
|
{
|
||||||
|
if (!cache.valid)
|
||||||
|
{
|
||||||
|
int cpusize;
|
||||||
|
MPI_Comm_size(MPI_COMM_WORLD, &cpusize);
|
||||||
|
cache.cpusize = cpusize;
|
||||||
|
|
||||||
|
if (!cache.combined_src)
|
||||||
|
{
|
||||||
|
cache.combined_src = new MyList<Parallel::gridseg> *[cpusize];
|
||||||
|
cache.combined_dst = new MyList<Parallel::gridseg> *[cpusize];
|
||||||
|
cache.send_lengths = new int[cpusize];
|
||||||
|
cache.recv_lengths = new int[cpusize];
|
||||||
|
cache.send_bufs = new double *[cpusize];
|
||||||
|
cache.recv_bufs = new double *[cpusize];
|
||||||
|
cache.send_buf_caps = new int[cpusize];
|
||||||
|
cache.recv_buf_caps = new int[cpusize];
|
||||||
|
for (int i = 0; i < cpusize; i++)
|
||||||
|
{
|
||||||
|
cache.send_bufs[i] = cache.recv_bufs[i] = 0;
|
||||||
|
cache.send_buf_caps[i] = cache.recv_buf_caps[i] = 0;
|
||||||
|
}
|
||||||
|
cache.max_reqs = 2 * cpusize;
|
||||||
|
cache.reqs = new MPI_Request[cache.max_reqs];
|
||||||
|
cache.stats = new MPI_Status[cache.max_reqs];
|
||||||
|
}
|
||||||
|
|
||||||
|
MyList<Parallel::gridseg> *dst = build_buffer_gsl(PatfL);
|
||||||
|
for (int node = 0; node < cpusize; node++)
|
||||||
|
{
|
||||||
|
MyList<Parallel::gridseg> *src_owned = build_owned_gsl(PatcL, node, 4, Symmetry);
|
||||||
|
build_gstl(src_owned, dst, &cache.combined_src[node], &cache.combined_dst[node]);
|
||||||
|
if (src_owned) src_owned->destroyList();
|
||||||
|
}
|
||||||
|
if (dst) dst->destroyList();
|
||||||
|
|
||||||
|
cache.valid = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use transfermix instead of transfer for mix-mode interpolation
|
||||||
|
int myrank;
|
||||||
|
MPI_Comm_size(MPI_COMM_WORLD, &cache.cpusize);
|
||||||
|
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
|
||||||
|
int cpusize = cache.cpusize;
|
||||||
|
|
||||||
|
int req_no = 0;
|
||||||
|
for (int node = 0; node < cpusize; node++)
|
||||||
|
{
|
||||||
|
if (node == myrank)
|
||||||
|
{
|
||||||
|
int length = data_packermix(0, cache.combined_src[myrank], cache.combined_dst[myrank], node, PACK, VarList1, VarList2, Symmetry);
|
||||||
|
cache.recv_lengths[node] = length;
|
||||||
|
if (length > 0)
|
||||||
|
{
|
||||||
|
if (length > cache.recv_buf_caps[node])
|
||||||
|
{
|
||||||
|
if (cache.recv_bufs[node]) delete[] cache.recv_bufs[node];
|
||||||
|
cache.recv_bufs[node] = new double[length];
|
||||||
|
cache.recv_buf_caps[node] = length;
|
||||||
|
}
|
||||||
|
data_packermix(cache.recv_bufs[node], cache.combined_src[myrank], cache.combined_dst[myrank], node, PACK, VarList1, VarList2, Symmetry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
int slength = data_packermix(0, cache.combined_src[myrank], cache.combined_dst[myrank], node, PACK, VarList1, VarList2, Symmetry);
|
||||||
|
cache.send_lengths[node] = slength;
|
||||||
|
if (slength > 0)
|
||||||
|
{
|
||||||
|
if (slength > cache.send_buf_caps[node])
|
||||||
|
{
|
||||||
|
if (cache.send_bufs[node]) delete[] cache.send_bufs[node];
|
||||||
|
cache.send_bufs[node] = new double[slength];
|
||||||
|
cache.send_buf_caps[node] = slength;
|
||||||
|
}
|
||||||
|
data_packermix(cache.send_bufs[node], cache.combined_src[myrank], cache.combined_dst[myrank], node, PACK, VarList1, VarList2, Symmetry);
|
||||||
|
MPI_Isend((void *)cache.send_bufs[node], slength, MPI_DOUBLE, node, 1, MPI_COMM_WORLD, cache.reqs + req_no++);
|
||||||
|
}
|
||||||
|
int rlength = data_packermix(0, cache.combined_src[node], cache.combined_dst[node], node, UNPACK, VarList1, VarList2, Symmetry);
|
||||||
|
cache.recv_lengths[node] = rlength;
|
||||||
|
if (rlength > 0)
|
||||||
|
{
|
||||||
|
if (rlength > cache.recv_buf_caps[node])
|
||||||
|
{
|
||||||
|
if (cache.recv_bufs[node]) delete[] cache.recv_bufs[node];
|
||||||
|
cache.recv_bufs[node] = new double[rlength];
|
||||||
|
cache.recv_buf_caps[node] = rlength;
|
||||||
|
}
|
||||||
|
MPI_Irecv((void *)cache.recv_bufs[node], rlength, MPI_DOUBLE, node, 1, MPI_COMM_WORLD, cache.reqs + req_no++);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
MPI_Waitall(req_no, cache.reqs, cache.stats);
|
||||||
|
|
||||||
|
for (int node = 0; node < cpusize; node++)
|
||||||
|
if (cache.recv_bufs[node] && cache.recv_lengths[node] > 0)
|
||||||
|
data_packermix(cache.recv_bufs[node], cache.combined_src[node], cache.combined_dst[node], node, UNPACK, VarList1, VarList2, Symmetry);
|
||||||
|
}
|
||||||
|
|
||||||
// collect all buffer grid segments or blocks for given patch
|
// collect all buffer grid segments or blocks for given patch
|
||||||
MyList<Parallel::gridseg> *Parallel::build_buffer_gsl(Patch *Pat)
|
MyList<Parallel::gridseg> *Parallel::build_buffer_gsl(Patch *Pat)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -81,6 +81,43 @@ namespace Parallel
|
|||||||
int Symmetry);
|
int Symmetry);
|
||||||
void Sync(Patch *Pat, MyList<var> *VarList, int Symmetry);
|
void Sync(Patch *Pat, MyList<var> *VarList, int Symmetry);
|
||||||
void Sync(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry);
|
void Sync(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry);
|
||||||
|
void Sync_merged(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry);
|
||||||
|
|
||||||
|
struct SyncCache {
|
||||||
|
bool valid;
|
||||||
|
int cpusize;
|
||||||
|
MyList<gridseg> **combined_src;
|
||||||
|
MyList<gridseg> **combined_dst;
|
||||||
|
int *send_lengths;
|
||||||
|
int *recv_lengths;
|
||||||
|
double **send_bufs;
|
||||||
|
double **recv_bufs;
|
||||||
|
int *send_buf_caps;
|
||||||
|
int *recv_buf_caps;
|
||||||
|
MPI_Request *reqs;
|
||||||
|
MPI_Status *stats;
|
||||||
|
int max_reqs;
|
||||||
|
bool lengths_valid;
|
||||||
|
SyncCache();
|
||||||
|
void invalidate();
|
||||||
|
void destroy();
|
||||||
|
};
|
||||||
|
|
||||||
|
void Sync_cached(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry, SyncCache &cache);
|
||||||
|
void transfer_cached(MyList<gridseg> **src, MyList<gridseg> **dst,
|
||||||
|
MyList<var> *VarList1, MyList<var> *VarList2,
|
||||||
|
int Symmetry, SyncCache &cache);
|
||||||
|
|
||||||
|
struct AsyncSyncState {
|
||||||
|
int req_no;
|
||||||
|
bool active;
|
||||||
|
AsyncSyncState() : req_no(0), active(false) {}
|
||||||
|
};
|
||||||
|
|
||||||
|
void Sync_start(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry,
|
||||||
|
SyncCache &cache, AsyncSyncState &state);
|
||||||
|
void Sync_finish(SyncCache &cache, AsyncSyncState &state,
|
||||||
|
MyList<var> *VarList, int Symmetry);
|
||||||
void OutBdLow2Hi(Patch *Patc, Patch *Patf,
|
void OutBdLow2Hi(Patch *Patc, Patch *Patf,
|
||||||
MyList<var> *VarList1 /* source */, MyList<var> *VarList2 /* target */,
|
MyList<var> *VarList1 /* source */, MyList<var> *VarList2 /* target */,
|
||||||
int Symmetry);
|
int Symmetry);
|
||||||
@@ -93,6 +130,15 @@ namespace Parallel
|
|||||||
void OutBdLow2Himix(MyList<Patch> *PatcL, MyList<Patch> *PatfL,
|
void OutBdLow2Himix(MyList<Patch> *PatcL, MyList<Patch> *PatfL,
|
||||||
MyList<var> *VarList1 /* source */, MyList<var> *VarList2 /* target */,
|
MyList<var> *VarList1 /* source */, MyList<var> *VarList2 /* target */,
|
||||||
int Symmetry);
|
int Symmetry);
|
||||||
|
void Restrict_cached(MyList<Patch> *PatcL, MyList<Patch> *PatfL,
|
||||||
|
MyList<var> *VarList1, MyList<var> *VarList2,
|
||||||
|
int Symmetry, SyncCache &cache);
|
||||||
|
void OutBdLow2Hi_cached(MyList<Patch> *PatcL, MyList<Patch> *PatfL,
|
||||||
|
MyList<var> *VarList1, MyList<var> *VarList2,
|
||||||
|
int Symmetry, SyncCache &cache);
|
||||||
|
void OutBdLow2Himix_cached(MyList<Patch> *PatcL, MyList<Patch> *PatfL,
|
||||||
|
MyList<var> *VarList1, MyList<var> *VarList2,
|
||||||
|
int Symmetry, SyncCache &cache);
|
||||||
void Prolong(Patch *Patc, Patch *Patf,
|
void Prolong(Patch *Patc, Patch *Patf,
|
||||||
MyList<var> *VarList1 /* source */, MyList<var> *VarList2 /* target */,
|
MyList<var> *VarList1 /* source */, MyList<var> *VarList2 /* target */,
|
||||||
int Symmetry);
|
int Symmetry);
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,7 +1,8 @@
|
|||||||
|
|
||||||
#ifndef TWO_PUNCTURES_H
|
#ifndef TWO_PUNCTURES_H
|
||||||
#define TWO_PUNCTURES_H
|
#define TWO_PUNCTURES_H
|
||||||
|
|
||||||
|
#include <omp.h>
|
||||||
|
|
||||||
#define StencilSize 19
|
#define StencilSize 19
|
||||||
#define N_PlaneRelax 1
|
#define N_PlaneRelax 1
|
||||||
#define NRELAX 200
|
#define NRELAX 200
|
||||||
@@ -32,7 +33,7 @@ private:
|
|||||||
int npoints_A, npoints_B, npoints_phi;
|
int npoints_A, npoints_B, npoints_phi;
|
||||||
|
|
||||||
double target_M_plus, target_M_minus;
|
double target_M_plus, target_M_minus;
|
||||||
|
|
||||||
double admMass;
|
double admMass;
|
||||||
|
|
||||||
double adm_tol;
|
double adm_tol;
|
||||||
@@ -42,6 +43,18 @@ private:
|
|||||||
|
|
||||||
int ntotal;
|
int ntotal;
|
||||||
|
|
||||||
|
// ===== Precomputed spectral derivative matrices =====
|
||||||
|
double *D1_A, *D2_A;
|
||||||
|
double *D1_B, *D2_B;
|
||||||
|
double *DF1_phi, *DF2_phi;
|
||||||
|
|
||||||
|
// ===== Pre-allocated workspace for LineRelax (per-thread) =====
|
||||||
|
int max_threads;
|
||||||
|
double **ws_diag_be, **ws_e_be, **ws_f_be, **ws_b_be, **ws_x_be;
|
||||||
|
double **ws_l_be, **ws_u_be, **ws_d_be, **ws_y_be;
|
||||||
|
double **ws_diag_al, **ws_e_al, **ws_f_al, **ws_b_al, **ws_x_al;
|
||||||
|
double **ws_l_al, **ws_u_al, **ws_d_al, **ws_y_al;
|
||||||
|
|
||||||
struct parameters
|
struct parameters
|
||||||
{
|
{
|
||||||
int nvar, n1, n2, n3;
|
int nvar, n1, n2, n3;
|
||||||
@@ -58,6 +71,28 @@ public:
|
|||||||
int Newtonmaxit);
|
int Newtonmaxit);
|
||||||
~TwoPunctures();
|
~TwoPunctures();
|
||||||
|
|
||||||
|
// 02/07: New/modified methods
|
||||||
|
void allocate_workspace();
|
||||||
|
void free_workspace();
|
||||||
|
void precompute_derivative_matrices();
|
||||||
|
void build_cheb_deriv_matrices(int n, double *D1, double *D2);
|
||||||
|
void build_fourier_deriv_matrices(int N, double *DF1, double *DF2);
|
||||||
|
void Derivatives_AB3_MatMul(int nvar, int n1, int n2, int n3, derivs v);
|
||||||
|
void ThomasAlgorithm_ws(int N, double *b, double *a, double *c, double *x, double *q,
|
||||||
|
double *l, double *u_ws, double *d, double *y);
|
||||||
|
void LineRelax_be_omp(double *dv,
|
||||||
|
int const i, int const k, int const nvar,
|
||||||
|
int const n1, int const n2, int const n3,
|
||||||
|
double const *rhs, int const *ncols, int **cols,
|
||||||
|
double **JFD, int tid);
|
||||||
|
void LineRelax_al_omp(double *dv,
|
||||||
|
int const j, int const k, int const nvar,
|
||||||
|
int const n1, int const n2, int const n3,
|
||||||
|
double const *rhs, int const *ncols,
|
||||||
|
int **cols, double **JFD, int tid);
|
||||||
|
void relax_omp(double *dv, int const nvar, int const n1, int const n2, int const n3,
|
||||||
|
double const *rhs, int const *ncols, int **cols, double **JFD);
|
||||||
|
|
||||||
void Solve();
|
void Solve();
|
||||||
void set_initial_guess(derivs v);
|
void set_initial_guess(derivs v);
|
||||||
int index(int i, int j, int k, int l, int a, int b, int c, int d);
|
int index(int i, int j, int k, int l, int a, int b, int c, int d);
|
||||||
@@ -116,23 +151,11 @@ public:
|
|||||||
double BY_KKofxyz(double x, double y, double z);
|
double BY_KKofxyz(double x, double y, double z);
|
||||||
void SetMatrix_JFD(int nvar, int n1, int n2, int n3, derivs u, int *ncols, int **cols, double **Matrix);
|
void SetMatrix_JFD(int nvar, int n1, int n2, int n3, derivs u, int *ncols, int **cols, double **Matrix);
|
||||||
void J_times_dv(int nvar, int n1, int n2, int n3, derivs dv, double *Jdv, derivs u);
|
void J_times_dv(int nvar, int n1, int n2, int n3, derivs dv, double *Jdv, derivs u);
|
||||||
void relax(double *dv, int const nvar, int const n1, int const n2, int const n3,
|
|
||||||
double const *rhs, int const *ncols, int **cols, double **JFD);
|
|
||||||
void LineRelax_be(double *dv,
|
|
||||||
int const i, int const k, int const nvar,
|
|
||||||
int const n1, int const n2, int const n3,
|
|
||||||
double const *rhs, int const *ncols, int **cols,
|
|
||||||
double **JFD);
|
|
||||||
void JFD_times_dv(int i, int j, int k, int nvar, int n1, int n2,
|
void JFD_times_dv(int i, int j, int k, int nvar, int n1, int n2,
|
||||||
int n3, derivs dv, derivs u, double *values);
|
int n3, derivs dv, derivs u, double *values);
|
||||||
void LinEquations(double A, double B, double X, double R,
|
void LinEquations(double A, double B, double X, double R,
|
||||||
double x, double r, double phi,
|
double x, double r, double phi,
|
||||||
double y, double z, derivs dU, derivs U, double *values);
|
double y, double z, derivs dU, derivs U, double *values);
|
||||||
void LineRelax_al(double *dv,
|
|
||||||
int const j, int const k, int const nvar,
|
|
||||||
int const n1, int const n2, int const n3,
|
|
||||||
double const *rhs, int const *ncols,
|
|
||||||
int **cols, double **JFD);
|
|
||||||
void ThomasAlgorithm(int N, double *b, double *a, double *c, double *x, double *q);
|
void ThomasAlgorithm(int N, double *b, double *a, double *c, double *x, double *q);
|
||||||
void Save(char *fname);
|
void Save(char *fname);
|
||||||
// provided by Vasileios Paschalidis (vpaschal@illinois.edu)
|
// provided by Vasileios Paschalidis (vpaschal@illinois.edu)
|
||||||
@@ -141,4 +164,4 @@ public:
|
|||||||
void SpecCoef(parameters par, int ivar, double *v, double *cf);
|
void SpecCoef(parameters par, int ivar, double *v, double *cf);
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* TWO_PUNCTURES_H */
|
#endif /* TWO_PUNCTURES_H */
|
||||||
@@ -321,22 +321,7 @@ void Z4c_class::Step(int lev, int YN)
|
|||||||
}
|
}
|
||||||
Pp = Pp->next;
|
Pp = Pp->next;
|
||||||
}
|
}
|
||||||
// check error information
|
// NOTE: error check deferred to after Shell Patch computation to reduce MPI_Allreduce calls
|
||||||
{
|
|
||||||
int erh = ERROR;
|
|
||||||
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
|
||||||
}
|
|
||||||
if (ERROR)
|
|
||||||
{
|
|
||||||
Parallel::Dump_Data(GH->PatL[lev], StateList, 0, PhysTime, dT_lev);
|
|
||||||
if (myrank == 0)
|
|
||||||
{
|
|
||||||
if (ErrorMonitor->outfile)
|
|
||||||
ErrorMonitor->outfile << "find NaN in state variables at t = " << PhysTime
|
|
||||||
<< ", lev = " << lev << endl;
|
|
||||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef WithShell
|
#ifdef WithShell
|
||||||
// evolve Shell Patches
|
// evolve Shell Patches
|
||||||
@@ -354,9 +339,9 @@ void Z4c_class::Step(int lev, int YN)
|
|||||||
{
|
{
|
||||||
#if (AGM == 0)
|
#if (AGM == 0)
|
||||||
f_enforce_ga(cg->shape,
|
f_enforce_ga(cg->shape,
|
||||||
cg->fgfs[gxx0->sgfn], cg->fgfs[gxy0->sgfn], cg->fgfs[gxz0->sgfn],
|
cg->fgfs[gxx0->sgfn], cg->fgfs[gxy0->sgfn], cg->fgfs[gxz0->sgfn],
|
||||||
cg->fgfs[gyy0->sgfn], cg->fgfs[gyz0->sgfn], cg->fgfs[gzz0->sgfn],
|
cg->fgfs[gyy0->sgfn], cg->fgfs[gyz0->sgfn], cg->fgfs[gzz0->sgfn],
|
||||||
cg->fgfs[Axx0->sgfn], cg->fgfs[Axy0->sgfn], cg->fgfs[Axz0->sgfn],
|
cg->fgfs[Axx0->sgfn], cg->fgfs[Axy0->sgfn], cg->fgfs[Axz0->sgfn],
|
||||||
cg->fgfs[Ayy0->sgfn], cg->fgfs[Ayz0->sgfn], cg->fgfs[Azz0->sgfn]);
|
cg->fgfs[Ayy0->sgfn], cg->fgfs[Ayz0->sgfn], cg->fgfs[Azz0->sgfn]);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@@ -468,24 +453,16 @@ void Z4c_class::Step(int lev, int YN)
|
|||||||
sPp = sPp->next;
|
sPp = sPp->next;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// check error information
|
// Non-blocking error reduction overlapped with Sync to hide Allreduce latency
|
||||||
|
MPI_Request err_req_pre;
|
||||||
{
|
{
|
||||||
int erh = ERROR;
|
int erh = ERROR;
|
||||||
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &err_req_pre);
|
||||||
}
|
|
||||||
if (ERROR)
|
|
||||||
{
|
|
||||||
SH->Dump_Data(StateList, 0, PhysTime, dT_lev);
|
|
||||||
if (myrank == 0)
|
|
||||||
{
|
|
||||||
if (ErrorMonitor->outfile)
|
|
||||||
ErrorMonitor->outfile << "find NaN in state variables on Shell Patches at t = " << PhysTime << endl;
|
|
||||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
Parallel::Sync(GH->PatL[lev], SynchList_pre, Symmetry);
|
Parallel::AsyncSyncState async_pre;
|
||||||
|
Parallel::Sync_start(GH->PatL[lev], SynchList_pre, Symmetry, sync_cache_pre[lev], async_pre);
|
||||||
|
|
||||||
#ifdef WithShell
|
#ifdef WithShell
|
||||||
if (lev == 0)
|
if (lev == 0)
|
||||||
@@ -498,12 +475,30 @@ void Z4c_class::Step(int lev, int YN)
|
|||||||
{
|
{
|
||||||
prev_clock = curr_clock;
|
prev_clock = curr_clock;
|
||||||
curr_clock = clock();
|
curr_clock = clock();
|
||||||
cout << " Shell stuff synchronization used "
|
cout << " Shell stuff synchronization used "
|
||||||
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
||||||
<< " seconds! " << endl;
|
<< " seconds! " << endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
Parallel::Sync_finish(sync_cache_pre[lev], async_pre, SynchList_pre, Symmetry);
|
||||||
|
|
||||||
|
#ifdef WithShell
|
||||||
|
// Complete non-blocking error reduction and check
|
||||||
|
MPI_Wait(&err_req_pre, MPI_STATUS_IGNORE);
|
||||||
|
if (ERROR)
|
||||||
|
{
|
||||||
|
Parallel::Dump_Data(GH->PatL[lev], StateList, 0, PhysTime, dT_lev);
|
||||||
|
SH->Dump_Data(StateList, 0, PhysTime, dT_lev);
|
||||||
|
if (myrank == 0)
|
||||||
|
{
|
||||||
|
if (ErrorMonitor->outfile)
|
||||||
|
ErrorMonitor->outfile << "find NaN in state variables at t = " << PhysTime
|
||||||
|
<< ", lev = " << lev << endl;
|
||||||
|
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
// for black hole position
|
// for black hole position
|
||||||
if (BH_num > 0 && lev == GH->levels - 1)
|
if (BH_num > 0 && lev == GH->levels - 1)
|
||||||
@@ -693,23 +688,7 @@ void Z4c_class::Step(int lev, int YN)
|
|||||||
Pp = Pp->next;
|
Pp = Pp->next;
|
||||||
}
|
}
|
||||||
|
|
||||||
// check error information
|
// NOTE: error check deferred to after Shell Patch computation to reduce MPI_Allreduce calls
|
||||||
{
|
|
||||||
int erh = ERROR;
|
|
||||||
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
|
||||||
}
|
|
||||||
if (ERROR)
|
|
||||||
{
|
|
||||||
Parallel::Dump_Data(GH->PatL[lev], SynchList_pre, 0, PhysTime, dT_lev);
|
|
||||||
if (myrank == 0)
|
|
||||||
{
|
|
||||||
if (ErrorMonitor->outfile)
|
|
||||||
ErrorMonitor->outfile << "find NaN in RK4 substep#" << iter_count
|
|
||||||
<< " variables at t = " << PhysTime
|
|
||||||
<< ", lev = " << lev << endl;
|
|
||||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef WithShell
|
#ifdef WithShell
|
||||||
// evolve Shell Patches
|
// evolve Shell Patches
|
||||||
@@ -850,25 +829,16 @@ void Z4c_class::Step(int lev, int YN)
|
|||||||
sPp = sPp->next;
|
sPp = sPp->next;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// check error information
|
// Non-blocking error reduction overlapped with Sync to hide Allreduce latency
|
||||||
|
MPI_Request err_req_cor;
|
||||||
{
|
{
|
||||||
int erh = ERROR;
|
int erh = ERROR;
|
||||||
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &err_req_cor);
|
||||||
}
|
|
||||||
if (ERROR)
|
|
||||||
{
|
|
||||||
SH->Dump_Data(SynchList_pre, 0, PhysTime, dT_lev);
|
|
||||||
if (myrank == 0)
|
|
||||||
{
|
|
||||||
if (ErrorMonitor->outfile)
|
|
||||||
ErrorMonitor->outfile << "find NaN on Shell Patches in RK4 substep#" << iter_count
|
|
||||||
<< " variables at t = " << PhysTime << endl;
|
|
||||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
Parallel::Sync(GH->PatL[lev], SynchList_cor, Symmetry);
|
Parallel::AsyncSyncState async_cor;
|
||||||
|
Parallel::Sync_start(GH->PatL[lev], SynchList_cor, Symmetry, sync_cache_cor[lev], async_cor);
|
||||||
|
|
||||||
#ifdef WithShell
|
#ifdef WithShell
|
||||||
if (lev == 0)
|
if (lev == 0)
|
||||||
@@ -881,11 +851,30 @@ void Z4c_class::Step(int lev, int YN)
|
|||||||
{
|
{
|
||||||
prev_clock = curr_clock;
|
prev_clock = curr_clock;
|
||||||
curr_clock = clock();
|
curr_clock = clock();
|
||||||
cout << " Shell stuff synchronization used "
|
cout << " Shell stuff synchronization used "
|
||||||
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
||||||
<< " seconds! " << endl;
|
<< " seconds! " << endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
Parallel::Sync_finish(sync_cache_cor[lev], async_cor, SynchList_cor, Symmetry);
|
||||||
|
|
||||||
|
#ifdef WithShell
|
||||||
|
// Complete non-blocking error reduction and check
|
||||||
|
MPI_Wait(&err_req_cor, MPI_STATUS_IGNORE);
|
||||||
|
if (ERROR)
|
||||||
|
{
|
||||||
|
Parallel::Dump_Data(GH->PatL[lev], SynchList_pre, 0, PhysTime, dT_lev);
|
||||||
|
SH->Dump_Data(SynchList_pre, 0, PhysTime, dT_lev);
|
||||||
|
if (myrank == 0)
|
||||||
|
{
|
||||||
|
if (ErrorMonitor->outfile)
|
||||||
|
ErrorMonitor->outfile << "find NaN in RK4 substep#" << iter_count
|
||||||
|
<< " variables at t = " << PhysTime
|
||||||
|
<< ", lev = " << lev << endl;
|
||||||
|
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
// for black hole position
|
// for black hole position
|
||||||
if (BH_num > 0 && lev == GH->levels - 1)
|
if (BH_num > 0 && lev == GH->levels - 1)
|
||||||
@@ -1252,22 +1241,7 @@ void Z4c_class::Step(int lev, int YN)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
// check error information
|
// NOTE: error check deferred to after Shell Patch computation to reduce MPI_Allreduce calls
|
||||||
{
|
|
||||||
int erh = ERROR;
|
|
||||||
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
|
||||||
}
|
|
||||||
if (ERROR)
|
|
||||||
{
|
|
||||||
Parallel::Dump_Data(GH->PatL[lev], StateList, 0, PhysTime, dT_lev);
|
|
||||||
if (myrank == 0)
|
|
||||||
{
|
|
||||||
if (ErrorMonitor->outfile)
|
|
||||||
ErrorMonitor->outfile << "find NaN in state variables at t = " << PhysTime
|
|
||||||
<< ", lev = " << lev << endl;
|
|
||||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// evolve Shell Patches
|
// evolve Shell Patches
|
||||||
if (lev == 0)
|
if (lev == 0)
|
||||||
@@ -1542,23 +1516,15 @@ void Z4c_class::Step(int lev, int YN)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
// check error information
|
// Non-blocking error reduction overlapped with Sync to hide Allreduce latency
|
||||||
|
MPI_Request err_req_pre;
|
||||||
{
|
{
|
||||||
int erh = ERROR;
|
int erh = ERROR;
|
||||||
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &err_req_pre);
|
||||||
}
|
|
||||||
if (ERROR)
|
|
||||||
{
|
|
||||||
SH->Dump_Data(StateList, 0, PhysTime, dT_lev);
|
|
||||||
if (myrank == 0)
|
|
||||||
{
|
|
||||||
if (ErrorMonitor->outfile)
|
|
||||||
ErrorMonitor->outfile << "find NaN in state variables on Shell Patches at t = " << PhysTime << endl;
|
|
||||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Parallel::Sync(GH->PatL[lev], SynchList_pre, Symmetry);
|
Parallel::AsyncSyncState async_pre;
|
||||||
|
Parallel::Sync_start(GH->PatL[lev], SynchList_pre, Symmetry, sync_cache_pre[lev], async_pre);
|
||||||
|
|
||||||
if (lev == 0)
|
if (lev == 0)
|
||||||
{
|
{
|
||||||
@@ -1570,8 +1536,8 @@ void Z4c_class::Step(int lev, int YN)
|
|||||||
{
|
{
|
||||||
prev_clock = curr_clock;
|
prev_clock = curr_clock;
|
||||||
curr_clock = clock();
|
curr_clock = clock();
|
||||||
cout << " Shell stuff synchronization used "
|
cout << " Shell stuff synchronization used "
|
||||||
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
||||||
<< " seconds! " << endl;
|
<< " seconds! " << endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1620,6 +1586,22 @@ void Z4c_class::Step(int lev, int YN)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
Parallel::Sync_finish(sync_cache_pre[lev], async_pre, SynchList_pre, Symmetry);
|
||||||
|
|
||||||
|
// Complete non-blocking error reduction and check
|
||||||
|
MPI_Wait(&err_req_pre, MPI_STATUS_IGNORE);
|
||||||
|
if (ERROR)
|
||||||
|
{
|
||||||
|
Parallel::Dump_Data(GH->PatL[lev], StateList, 0, PhysTime, dT_lev);
|
||||||
|
SH->Dump_Data(StateList, 0, PhysTime, dT_lev);
|
||||||
|
if (myrank == 0)
|
||||||
|
{
|
||||||
|
if (ErrorMonitor->outfile)
|
||||||
|
ErrorMonitor->outfile << "find NaN in state variables at t = " << PhysTime
|
||||||
|
<< ", lev = " << lev << endl;
|
||||||
|
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// for black hole position
|
// for black hole position
|
||||||
if (BH_num > 0 && lev == GH->levels - 1)
|
if (BH_num > 0 && lev == GH->levels - 1)
|
||||||
@@ -1841,23 +1823,7 @@ void Z4c_class::Step(int lev, int YN)
|
|||||||
Pp = Pp->next;
|
Pp = Pp->next;
|
||||||
}
|
}
|
||||||
|
|
||||||
// check error information
|
// NOTE: error check deferred to after Shell Patch computation to reduce MPI_Allreduce calls
|
||||||
{
|
|
||||||
int erh = ERROR;
|
|
||||||
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
|
||||||
}
|
|
||||||
if (ERROR)
|
|
||||||
{
|
|
||||||
Parallel::Dump_Data(GH->PatL[lev], SynchList_pre, 0, PhysTime, dT_lev);
|
|
||||||
if (myrank == 0)
|
|
||||||
{
|
|
||||||
if (ErrorMonitor->outfile)
|
|
||||||
ErrorMonitor->outfile << "find NaN in RK4 substep#" << iter_count
|
|
||||||
<< " variables at t = " << PhysTime
|
|
||||||
<< ", lev = " << lev << endl;
|
|
||||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// evolve Shell Patches
|
// evolve Shell Patches
|
||||||
if (lev == 0)
|
if (lev == 0)
|
||||||
@@ -2103,24 +2069,15 @@ void Z4c_class::Step(int lev, int YN)
|
|||||||
sPp = sPp->next;
|
sPp = sPp->next;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// check error information
|
// Non-blocking error reduction overlapped with Sync to hide Allreduce latency
|
||||||
|
MPI_Request err_req_cor;
|
||||||
{
|
{
|
||||||
int erh = ERROR;
|
int erh = ERROR;
|
||||||
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &err_req_cor);
|
||||||
}
|
|
||||||
if (ERROR)
|
|
||||||
{
|
|
||||||
SH->Dump_Data(SynchList_pre, 0, PhysTime, dT_lev);
|
|
||||||
if (myrank == 0)
|
|
||||||
{
|
|
||||||
if (ErrorMonitor->outfile)
|
|
||||||
ErrorMonitor->outfile << "find NaN on Shell Patches in RK4 substep#" << iter_count
|
|
||||||
<< " variables at t = " << PhysTime << endl;
|
|
||||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Parallel::Sync(GH->PatL[lev], SynchList_cor, Symmetry);
|
Parallel::AsyncSyncState async_cor;
|
||||||
|
Parallel::Sync_start(GH->PatL[lev], SynchList_cor, Symmetry, sync_cache_cor[lev], async_cor);
|
||||||
|
|
||||||
if (lev == 0)
|
if (lev == 0)
|
||||||
{
|
{
|
||||||
@@ -2132,8 +2089,8 @@ void Z4c_class::Step(int lev, int YN)
|
|||||||
{
|
{
|
||||||
prev_clock = curr_clock;
|
prev_clock = curr_clock;
|
||||||
curr_clock = clock();
|
curr_clock = clock();
|
||||||
cout << " Shell stuff synchronization used "
|
cout << " Shell stuff synchronization used "
|
||||||
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
||||||
<< " seconds! " << endl;
|
<< " seconds! " << endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -2170,6 +2127,23 @@ void Z4c_class::Step(int lev, int YN)
|
|||||||
}
|
}
|
||||||
// end smooth
|
// end smooth
|
||||||
#endif
|
#endif
|
||||||
|
Parallel::Sync_finish(sync_cache_cor[lev], async_cor, SynchList_cor, Symmetry);
|
||||||
|
|
||||||
|
// Complete non-blocking error reduction and check
|
||||||
|
MPI_Wait(&err_req_cor, MPI_STATUS_IGNORE);
|
||||||
|
if (ERROR)
|
||||||
|
{
|
||||||
|
Parallel::Dump_Data(GH->PatL[lev], SynchList_pre, 0, PhysTime, dT_lev);
|
||||||
|
SH->Dump_Data(SynchList_pre, 0, PhysTime, dT_lev);
|
||||||
|
if (myrank == 0)
|
||||||
|
{
|
||||||
|
if (ErrorMonitor->outfile)
|
||||||
|
ErrorMonitor->outfile << "find NaN in RK4 substep#" << iter_count
|
||||||
|
<< " variables at t = " << PhysTime
|
||||||
|
<< ", lev = " << lev << endl;
|
||||||
|
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// for black hole position
|
// for black hole position
|
||||||
if (BH_num > 0 && lev == GH->levels - 1)
|
if (BH_num > 0 && lev == GH->levels - 1)
|
||||||
|
|||||||
@@ -730,6 +730,12 @@ void bssn_class::Initialize()
|
|||||||
PhysTime = StartTime;
|
PhysTime = StartTime;
|
||||||
Setup_Black_Hole_position();
|
Setup_Black_Hole_position();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Initialize sync caches (per-level, for predictor and corrector)
|
||||||
|
sync_cache_pre = new Parallel::SyncCache[GH->levels];
|
||||||
|
sync_cache_cor = new Parallel::SyncCache[GH->levels];
|
||||||
|
sync_cache_rp_coarse = new Parallel::SyncCache[GH->levels];
|
||||||
|
sync_cache_rp_fine = new Parallel::SyncCache[GH->levels];
|
||||||
}
|
}
|
||||||
|
|
||||||
//================================================================================================
|
//================================================================================================
|
||||||
@@ -981,6 +987,32 @@ bssn_class::~bssn_class()
|
|||||||
delete Azzz;
|
delete Azzz;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
// Destroy sync caches before GH
|
||||||
|
if (sync_cache_pre)
|
||||||
|
{
|
||||||
|
for (int i = 0; i < GH->levels; i++)
|
||||||
|
sync_cache_pre[i].destroy();
|
||||||
|
delete[] sync_cache_pre;
|
||||||
|
}
|
||||||
|
if (sync_cache_cor)
|
||||||
|
{
|
||||||
|
for (int i = 0; i < GH->levels; i++)
|
||||||
|
sync_cache_cor[i].destroy();
|
||||||
|
delete[] sync_cache_cor;
|
||||||
|
}
|
||||||
|
if (sync_cache_rp_coarse)
|
||||||
|
{
|
||||||
|
for (int i = 0; i < GH->levels; i++)
|
||||||
|
sync_cache_rp_coarse[i].destroy();
|
||||||
|
delete[] sync_cache_rp_coarse;
|
||||||
|
}
|
||||||
|
if (sync_cache_rp_fine)
|
||||||
|
{
|
||||||
|
for (int i = 0; i < GH->levels; i++)
|
||||||
|
sync_cache_rp_fine[i].destroy();
|
||||||
|
delete[] sync_cache_rp_fine;
|
||||||
|
}
|
||||||
|
|
||||||
delete GH;
|
delete GH;
|
||||||
#ifdef WithShell
|
#ifdef WithShell
|
||||||
delete SH;
|
delete SH;
|
||||||
@@ -2181,6 +2213,7 @@ void bssn_class::Evolve(int Steps)
|
|||||||
GH->Regrid(Symmetry, BH_num, Porgbr, Porg0,
|
GH->Regrid(Symmetry, BH_num, Porgbr, Porg0,
|
||||||
SynchList_cor, OldStateList, StateList, SynchList_pre,
|
SynchList_cor, OldStateList, StateList, SynchList_pre,
|
||||||
fgt(PhysTime - dT_mon, StartTime, dT_mon / 2), ErrorMonitor);
|
fgt(PhysTime - dT_mon, StartTime, dT_mon / 2), ErrorMonitor);
|
||||||
|
for (int il = 0; il < GH->levels; il++) { sync_cache_pre[il].invalidate(); sync_cache_cor[il].invalidate(); sync_cache_rp_coarse[il].invalidate(); sync_cache_rp_fine[il].invalidate(); }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if (REGLEV == 0 && (PSTR == 1 || PSTR == 2))
|
#if (REGLEV == 0 && (PSTR == 1 || PSTR == 2))
|
||||||
@@ -2396,6 +2429,7 @@ void bssn_class::RecursiveStep(int lev)
|
|||||||
GH->Regrid_Onelevel(lev, Symmetry, BH_num, Porgbr, Porg0,
|
GH->Regrid_Onelevel(lev, Symmetry, BH_num, Porgbr, Porg0,
|
||||||
SynchList_cor, OldStateList, StateList, SynchList_pre,
|
SynchList_cor, OldStateList, StateList, SynchList_pre,
|
||||||
fgt(PhysTime - dT_lev, StartTime, dT_lev / 2), ErrorMonitor);
|
fgt(PhysTime - dT_lev, StartTime, dT_lev / 2), ErrorMonitor);
|
||||||
|
for (int il = 0; il < GH->levels; il++) { sync_cache_pre[il].invalidate(); sync_cache_cor[il].invalidate(); sync_cache_rp_coarse[il].invalidate(); sync_cache_rp_fine[il].invalidate(); }
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2574,6 +2608,7 @@ void bssn_class::ParallelStep()
|
|||||||
GH->Regrid_Onelevel(GH->mylev, Symmetry, BH_num, Porgbr, Porg0,
|
GH->Regrid_Onelevel(GH->mylev, Symmetry, BH_num, Porgbr, Porg0,
|
||||||
SynchList_cor, OldStateList, StateList, SynchList_pre,
|
SynchList_cor, OldStateList, StateList, SynchList_pre,
|
||||||
fgt(PhysTime - dT_lev, StartTime, dT_lev / 2), ErrorMonitor);
|
fgt(PhysTime - dT_lev, StartTime, dT_lev / 2), ErrorMonitor);
|
||||||
|
for (int il = 0; il < GH->levels; il++) { sync_cache_pre[il].invalidate(); sync_cache_cor[il].invalidate(); sync_cache_rp_coarse[il].invalidate(); sync_cache_rp_fine[il].invalidate(); }
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2740,6 +2775,7 @@ void bssn_class::ParallelStep()
|
|||||||
GH->Regrid_Onelevel(lev + 1, Symmetry, BH_num, Porgbr, Porg0,
|
GH->Regrid_Onelevel(lev + 1, Symmetry, BH_num, Porgbr, Porg0,
|
||||||
SynchList_cor, OldStateList, StateList, SynchList_pre,
|
SynchList_cor, OldStateList, StateList, SynchList_pre,
|
||||||
fgt(PhysTime - dT_levp1, StartTime, dT_levp1 / 2), ErrorMonitor);
|
fgt(PhysTime - dT_levp1, StartTime, dT_levp1 / 2), ErrorMonitor);
|
||||||
|
for (int il = 0; il < GH->levels; il++) { sync_cache_pre[il].invalidate(); sync_cache_cor[il].invalidate(); sync_cache_rp_coarse[il].invalidate(); sync_cache_rp_fine[il].invalidate(); }
|
||||||
|
|
||||||
// a_stream.clear();
|
// a_stream.clear();
|
||||||
// a_stream.str("");
|
// a_stream.str("");
|
||||||
@@ -2754,6 +2790,7 @@ void bssn_class::ParallelStep()
|
|||||||
GH->Regrid_Onelevel(lev, Symmetry, BH_num, Porgbr, Porg0,
|
GH->Regrid_Onelevel(lev, Symmetry, BH_num, Porgbr, Porg0,
|
||||||
SynchList_cor, OldStateList, StateList, SynchList_pre,
|
SynchList_cor, OldStateList, StateList, SynchList_pre,
|
||||||
fgt(PhysTime - dT_lev, StartTime, dT_lev / 2), ErrorMonitor);
|
fgt(PhysTime - dT_lev, StartTime, dT_lev / 2), ErrorMonitor);
|
||||||
|
for (int il = 0; il < GH->levels; il++) { sync_cache_pre[il].invalidate(); sync_cache_cor[il].invalidate(); sync_cache_rp_coarse[il].invalidate(); sync_cache_rp_fine[il].invalidate(); }
|
||||||
|
|
||||||
// a_stream.clear();
|
// a_stream.clear();
|
||||||
// a_stream.str("");
|
// a_stream.str("");
|
||||||
@@ -2772,6 +2809,7 @@ void bssn_class::ParallelStep()
|
|||||||
GH->Regrid_Onelevel(lev - 1, Symmetry, BH_num, Porgbr, Porg0,
|
GH->Regrid_Onelevel(lev - 1, Symmetry, BH_num, Porgbr, Porg0,
|
||||||
SynchList_cor, OldStateList, StateList, SynchList_pre,
|
SynchList_cor, OldStateList, StateList, SynchList_pre,
|
||||||
fgt(PhysTime - dT_lev, StartTime, dT_levm1 / 2), ErrorMonitor);
|
fgt(PhysTime - dT_lev, StartTime, dT_levm1 / 2), ErrorMonitor);
|
||||||
|
for (int il = 0; il < GH->levels; il++) { sync_cache_pre[il].invalidate(); sync_cache_cor[il].invalidate(); sync_cache_rp_coarse[il].invalidate(); sync_cache_rp_fine[il].invalidate(); }
|
||||||
|
|
||||||
// a_stream.clear();
|
// a_stream.clear();
|
||||||
// a_stream.str("");
|
// a_stream.str("");
|
||||||
@@ -2787,6 +2825,7 @@ void bssn_class::ParallelStep()
|
|||||||
GH->Regrid_Onelevel(lev - 1, Symmetry, BH_num, Porgbr, Porg0,
|
GH->Regrid_Onelevel(lev - 1, Symmetry, BH_num, Porgbr, Porg0,
|
||||||
SynchList_cor, OldStateList, StateList, SynchList_pre,
|
SynchList_cor, OldStateList, StateList, SynchList_pre,
|
||||||
fgt(PhysTime - dT_lev, StartTime, dT_levm1 / 2), ErrorMonitor);
|
fgt(PhysTime - dT_lev, StartTime, dT_levm1 / 2), ErrorMonitor);
|
||||||
|
for (int il = 0; il < GH->levels; il++) { sync_cache_pre[il].invalidate(); sync_cache_cor[il].invalidate(); sync_cache_rp_coarse[il].invalidate(); sync_cache_rp_fine[il].invalidate(); }
|
||||||
|
|
||||||
// a_stream.clear();
|
// a_stream.clear();
|
||||||
// a_stream.str("");
|
// a_stream.str("");
|
||||||
@@ -3158,21 +3197,7 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
}
|
}
|
||||||
Pp = Pp->next;
|
Pp = Pp->next;
|
||||||
}
|
}
|
||||||
// check error information
|
// NOTE: error check deferred to after Shell Patch computation to reduce MPI_Allreduce calls
|
||||||
{
|
|
||||||
int erh = ERROR;
|
|
||||||
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
|
||||||
}
|
|
||||||
if (ERROR)
|
|
||||||
{
|
|
||||||
Parallel::Dump_Data(GH->PatL[lev], StateList, 0, PhysTime, dT_lev);
|
|
||||||
if (myrank == 0)
|
|
||||||
{
|
|
||||||
if (ErrorMonitor->outfile)
|
|
||||||
ErrorMonitor->outfile << "find NaN in state variables at t = " << PhysTime << ", lev = " << lev << endl;
|
|
||||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef WithShell
|
#ifdef WithShell
|
||||||
// evolve Shell Patches
|
// evolve Shell Patches
|
||||||
@@ -3190,9 +3215,9 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
{
|
{
|
||||||
#if (AGM == 0)
|
#if (AGM == 0)
|
||||||
f_enforce_ga(cg->shape,
|
f_enforce_ga(cg->shape,
|
||||||
cg->fgfs[gxx0->sgfn], cg->fgfs[gxy0->sgfn], cg->fgfs[gxz0->sgfn],
|
cg->fgfs[gxx0->sgfn], cg->fgfs[gxy0->sgfn], cg->fgfs[gxz0->sgfn],
|
||||||
cg->fgfs[gyy0->sgfn], cg->fgfs[gyz0->sgfn], cg->fgfs[gzz0->sgfn],
|
cg->fgfs[gyy0->sgfn], cg->fgfs[gyz0->sgfn], cg->fgfs[gzz0->sgfn],
|
||||||
cg->fgfs[Axx0->sgfn], cg->fgfs[Axy0->sgfn], cg->fgfs[Axz0->sgfn],
|
cg->fgfs[Axx0->sgfn], cg->fgfs[Axy0->sgfn], cg->fgfs[Axz0->sgfn],
|
||||||
cg->fgfs[Ayy0->sgfn], cg->fgfs[Ayz0->sgfn], cg->fgfs[Azz0->sgfn]);
|
cg->fgfs[Ayy0->sgfn], cg->fgfs[Ayz0->sgfn], cg->fgfs[Azz0->sgfn]);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@@ -3316,25 +3341,16 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
// check error information
|
// Non-blocking error reduction overlapped with Sync to hide Allreduce latency
|
||||||
|
MPI_Request err_req;
|
||||||
{
|
{
|
||||||
int erh = ERROR;
|
int erh = ERROR;
|
||||||
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &err_req);
|
||||||
}
|
|
||||||
|
|
||||||
if (ERROR)
|
|
||||||
{
|
|
||||||
SH->Dump_Data(StateList, 0, PhysTime, dT_lev);
|
|
||||||
if (myrank == 0)
|
|
||||||
{
|
|
||||||
if (ErrorMonitor->outfile)
|
|
||||||
ErrorMonitor->outfile << "find NaN in state variables on Shell Patches at t = " << PhysTime << endl;
|
|
||||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
Parallel::Sync(GH->PatL[lev], SynchList_pre, Symmetry);
|
Parallel::AsyncSyncState async_pre;
|
||||||
|
Parallel::Sync_start(GH->PatL[lev], SynchList_pre, Symmetry, sync_cache_pre[lev], async_pre);
|
||||||
|
|
||||||
#ifdef WithShell
|
#ifdef WithShell
|
||||||
if (lev == 0)
|
if (lev == 0)
|
||||||
@@ -3347,12 +3363,29 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
{
|
{
|
||||||
prev_clock = curr_clock;
|
prev_clock = curr_clock;
|
||||||
curr_clock = clock();
|
curr_clock = clock();
|
||||||
cout << " Shell stuff synchronization used "
|
cout << " Shell stuff synchronization used "
|
||||||
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
||||||
<< " seconds! " << endl;
|
<< " seconds! " << endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
Parallel::Sync_finish(sync_cache_pre[lev], async_pre, SynchList_pre, Symmetry);
|
||||||
|
|
||||||
|
#ifdef WithShell
|
||||||
|
// Complete non-blocking error reduction and check
|
||||||
|
MPI_Wait(&err_req, MPI_STATUS_IGNORE);
|
||||||
|
if (ERROR)
|
||||||
|
{
|
||||||
|
Parallel::Dump_Data(GH->PatL[lev], StateList, 0, PhysTime, dT_lev);
|
||||||
|
SH->Dump_Data(StateList, 0, PhysTime, dT_lev);
|
||||||
|
if (myrank == 0)
|
||||||
|
{
|
||||||
|
if (ErrorMonitor->outfile)
|
||||||
|
ErrorMonitor->outfile << "find NaN in state variables at t = " << PhysTime << ", lev = " << lev << endl;
|
||||||
|
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#if (MAPBH == 0)
|
#if (MAPBH == 0)
|
||||||
// for black hole position
|
// for black hole position
|
||||||
@@ -3528,24 +3561,7 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
Pp = Pp->next;
|
Pp = Pp->next;
|
||||||
}
|
}
|
||||||
|
|
||||||
// check error information
|
// NOTE: error check deferred to after Shell Patch computation to reduce MPI_Allreduce calls
|
||||||
{
|
|
||||||
int erh = ERROR;
|
|
||||||
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ERROR)
|
|
||||||
{
|
|
||||||
Parallel::Dump_Data(GH->PatL[lev], SynchList_pre, 0, PhysTime, dT_lev);
|
|
||||||
if (myrank == 0)
|
|
||||||
{
|
|
||||||
if (ErrorMonitor->outfile)
|
|
||||||
ErrorMonitor->outfile << "find NaN in RK4 substep#" << iter_count
|
|
||||||
<< " variables at t = " << PhysTime
|
|
||||||
<< ", lev = " << lev << endl;
|
|
||||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef WithShell
|
#ifdef WithShell
|
||||||
// evolve Shell Patches
|
// evolve Shell Patches
|
||||||
@@ -3563,9 +3579,9 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
{
|
{
|
||||||
#if (AGM == 0)
|
#if (AGM == 0)
|
||||||
f_enforce_ga(cg->shape,
|
f_enforce_ga(cg->shape,
|
||||||
cg->fgfs[gxx->sgfn], cg->fgfs[gxy->sgfn], cg->fgfs[gxz->sgfn],
|
cg->fgfs[gxx->sgfn], cg->fgfs[gxy->sgfn], cg->fgfs[gxz->sgfn],
|
||||||
cg->fgfs[gyy->sgfn], cg->fgfs[gyz->sgfn], cg->fgfs[gzz->sgfn],
|
cg->fgfs[gyy->sgfn], cg->fgfs[gyz->sgfn], cg->fgfs[gzz->sgfn],
|
||||||
cg->fgfs[Axx->sgfn], cg->fgfs[Axy->sgfn], cg->fgfs[Axz->sgfn],
|
cg->fgfs[Axx->sgfn], cg->fgfs[Axy->sgfn], cg->fgfs[Axz->sgfn],
|
||||||
cg->fgfs[Ayy->sgfn], cg->fgfs[Ayz->sgfn], cg->fgfs[Azz->sgfn]);
|
cg->fgfs[Ayy->sgfn], cg->fgfs[Ayz->sgfn], cg->fgfs[Azz->sgfn]);
|
||||||
#elif (AGM == 1)
|
#elif (AGM == 1)
|
||||||
if (iter_count == 3)
|
if (iter_count == 3)
|
||||||
@@ -3685,26 +3701,16 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
sPp = sPp->next;
|
sPp = sPp->next;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// check error information
|
// Non-blocking error reduction overlapped with Sync to hide Allreduce latency
|
||||||
|
MPI_Request err_req_cor;
|
||||||
{
|
{
|
||||||
int erh = ERROR;
|
int erh = ERROR;
|
||||||
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &err_req_cor);
|
||||||
}
|
|
||||||
if (ERROR)
|
|
||||||
{
|
|
||||||
SH->Dump_Data(SynchList_pre, 0, PhysTime, dT_lev);
|
|
||||||
if (myrank == 0)
|
|
||||||
{
|
|
||||||
if (ErrorMonitor->outfile)
|
|
||||||
ErrorMonitor->outfile << "find NaN on Shell Patches in RK4 substep#"
|
|
||||||
<< iter_count << " variables at t = "
|
|
||||||
<< PhysTime << endl;
|
|
||||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
Parallel::Sync(GH->PatL[lev], SynchList_cor, Symmetry);
|
Parallel::AsyncSyncState async_cor;
|
||||||
|
Parallel::Sync_start(GH->PatL[lev], SynchList_cor, Symmetry, sync_cache_cor[lev], async_cor);
|
||||||
|
|
||||||
#ifdef WithShell
|
#ifdef WithShell
|
||||||
if (lev == 0)
|
if (lev == 0)
|
||||||
@@ -3717,12 +3723,31 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
{
|
{
|
||||||
prev_clock = curr_clock;
|
prev_clock = curr_clock;
|
||||||
curr_clock = clock();
|
curr_clock = clock();
|
||||||
cout << " Shell stuff synchronization used "
|
cout << " Shell stuff synchronization used "
|
||||||
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
||||||
<< " seconds! " << endl;
|
<< " seconds! " << endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
Parallel::Sync_finish(sync_cache_cor[lev], async_cor, SynchList_cor, Symmetry);
|
||||||
|
|
||||||
|
#ifdef WithShell
|
||||||
|
// Complete non-blocking error reduction and check
|
||||||
|
MPI_Wait(&err_req_cor, MPI_STATUS_IGNORE);
|
||||||
|
if (ERROR)
|
||||||
|
{
|
||||||
|
Parallel::Dump_Data(GH->PatL[lev], SynchList_pre, 0, PhysTime, dT_lev);
|
||||||
|
SH->Dump_Data(SynchList_pre, 0, PhysTime, dT_lev);
|
||||||
|
if (myrank == 0)
|
||||||
|
{
|
||||||
|
if (ErrorMonitor->outfile)
|
||||||
|
ErrorMonitor->outfile << "find NaN in RK4 substep#" << iter_count
|
||||||
|
<< " variables at t = " << PhysTime
|
||||||
|
<< ", lev = " << lev << endl;
|
||||||
|
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#if (MAPBH == 0)
|
#if (MAPBH == 0)
|
||||||
// for black hole position
|
// for black hole position
|
||||||
@@ -4034,22 +4059,7 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
}
|
}
|
||||||
Pp = Pp->next;
|
Pp = Pp->next;
|
||||||
}
|
}
|
||||||
// check error information
|
// NOTE: error check deferred to after Shell Patch computation to reduce MPI_Allreduce calls
|
||||||
{
|
|
||||||
int erh = ERROR;
|
|
||||||
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
|
||||||
}
|
|
||||||
if (ERROR)
|
|
||||||
{
|
|
||||||
Parallel::Dump_Data(GH->PatL[lev], StateList, 0, PhysTime, dT_lev);
|
|
||||||
if (myrank == 0)
|
|
||||||
{
|
|
||||||
if (ErrorMonitor->outfile)
|
|
||||||
ErrorMonitor->outfile << "find NaN in state variables at t = " << PhysTime
|
|
||||||
<< ", lev = " << lev << endl;
|
|
||||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef WithShell
|
#ifdef WithShell
|
||||||
// evolve Shell Patches
|
// evolve Shell Patches
|
||||||
@@ -4067,15 +4077,15 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
{
|
{
|
||||||
#if (AGM == 0)
|
#if (AGM == 0)
|
||||||
f_enforce_ga(cg->shape,
|
f_enforce_ga(cg->shape,
|
||||||
cg->fgfs[gxx0->sgfn], cg->fgfs[gxy0->sgfn], cg->fgfs[gxz0->sgfn],
|
cg->fgfs[gxx0->sgfn], cg->fgfs[gxy0->sgfn], cg->fgfs[gxz0->sgfn],
|
||||||
cg->fgfs[gyy0->sgfn], cg->fgfs[gyz0->sgfn], cg->fgfs[gzz0->sgfn],
|
cg->fgfs[gyy0->sgfn], cg->fgfs[gyz0->sgfn], cg->fgfs[gzz0->sgfn],
|
||||||
cg->fgfs[Axx0->sgfn], cg->fgfs[Axy0->sgfn], cg->fgfs[Axz0->sgfn],
|
cg->fgfs[Axx0->sgfn], cg->fgfs[Axy0->sgfn], cg->fgfs[Axz0->sgfn],
|
||||||
cg->fgfs[Ayy0->sgfn], cg->fgfs[Ayz0->sgfn], cg->fgfs[Azz0->sgfn]);
|
cg->fgfs[Ayy0->sgfn], cg->fgfs[Ayz0->sgfn], cg->fgfs[Azz0->sgfn]);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (f_compute_rhs_bssn_ss(cg->shape, TRK4, cg->X[0], cg->X[1], cg->X[2],
|
if (f_compute_rhs_bssn_ss(cg->shape, TRK4, cg->X[0], cg->X[1], cg->X[2],
|
||||||
cg->fgfs[fngfs + ShellPatch::gx],
|
cg->fgfs[fngfs + ShellPatch::gx],
|
||||||
cg->fgfs[fngfs + ShellPatch::gy],
|
cg->fgfs[fngfs + ShellPatch::gy],
|
||||||
cg->fgfs[fngfs + ShellPatch::gz],
|
cg->fgfs[fngfs + ShellPatch::gz],
|
||||||
cg->fgfs[fngfs + ShellPatch::drhodx],
|
cg->fgfs[fngfs + ShellPatch::drhodx],
|
||||||
cg->fgfs[fngfs + ShellPatch::drhody],
|
cg->fgfs[fngfs + ShellPatch::drhody],
|
||||||
@@ -4190,25 +4200,16 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
// check error information
|
// Non-blocking error reduction overlapped with Sync to hide Allreduce latency
|
||||||
|
MPI_Request err_req;
|
||||||
{
|
{
|
||||||
int erh = ERROR;
|
int erh = ERROR;
|
||||||
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &err_req);
|
||||||
}
|
|
||||||
if (ERROR)
|
|
||||||
{
|
|
||||||
SH->Dump_Data(StateList, 0, PhysTime, dT_lev);
|
|
||||||
if (myrank == 0)
|
|
||||||
{
|
|
||||||
if (ErrorMonitor->outfile)
|
|
||||||
ErrorMonitor->outfile << "find NaN in state variables on Shell Patches at t = "
|
|
||||||
<< PhysTime << endl;
|
|
||||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
Parallel::Sync(GH->PatL[lev], SynchList_pre, Symmetry);
|
Parallel::AsyncSyncState async_pre;
|
||||||
|
Parallel::Sync_start(GH->PatL[lev], SynchList_pre, Symmetry, sync_cache_pre[lev], async_pre);
|
||||||
|
|
||||||
#ifdef WithShell
|
#ifdef WithShell
|
||||||
if (lev == 0)
|
if (lev == 0)
|
||||||
@@ -4221,9 +4222,27 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
{
|
{
|
||||||
prev_clock = curr_clock;
|
prev_clock = curr_clock;
|
||||||
curr_clock = clock();
|
curr_clock = clock();
|
||||||
cout << " Shell stuff synchronization used "
|
cout << " Shell stuff synchronization used "
|
||||||
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
||||||
<< " seconds! " << endl;
|
<< " seconds! " << endl;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
Parallel::Sync_finish(sync_cache_pre[lev], async_pre, SynchList_pre, Symmetry);
|
||||||
|
|
||||||
|
#ifdef WithShell
|
||||||
|
// Complete non-blocking error reduction and check
|
||||||
|
MPI_Wait(&err_req, MPI_STATUS_IGNORE);
|
||||||
|
if (ERROR)
|
||||||
|
{
|
||||||
|
Parallel::Dump_Data(GH->PatL[lev], StateList, 0, PhysTime, dT_lev);
|
||||||
|
SH->Dump_Data(StateList, 0, PhysTime, dT_lev);
|
||||||
|
if (myrank == 0)
|
||||||
|
{
|
||||||
|
if (ErrorMonitor->outfile)
|
||||||
|
ErrorMonitor->outfile << "find NaN in state variables at t = " << PhysTime
|
||||||
|
<< ", lev = " << lev << endl;
|
||||||
|
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@@ -4386,23 +4405,7 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
Pp = Pp->next;
|
Pp = Pp->next;
|
||||||
}
|
}
|
||||||
|
|
||||||
// check error information
|
// NOTE: error check deferred to after Shell Patch computation to reduce MPI_Allreduce calls
|
||||||
{
|
|
||||||
int erh = ERROR;
|
|
||||||
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
|
||||||
}
|
|
||||||
if (ERROR)
|
|
||||||
{
|
|
||||||
Parallel::Dump_Data(GH->PatL[lev], SynchList_pre, 0, PhysTime, dT_lev);
|
|
||||||
if (myrank == 0)
|
|
||||||
{
|
|
||||||
if (ErrorMonitor->outfile)
|
|
||||||
ErrorMonitor->outfile << "find NaN in RK4 substep#" << iter_count
|
|
||||||
<< " variables at t = " << PhysTime
|
|
||||||
<< ", lev = " << lev << endl;
|
|
||||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef WithShell
|
#ifdef WithShell
|
||||||
// evolve Shell Patches
|
// evolve Shell Patches
|
||||||
@@ -4420,9 +4423,9 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
{
|
{
|
||||||
#if (AGM == 0)
|
#if (AGM == 0)
|
||||||
f_enforce_ga(cg->shape,
|
f_enforce_ga(cg->shape,
|
||||||
cg->fgfs[gxx->sgfn], cg->fgfs[gxy->sgfn], cg->fgfs[gxz->sgfn],
|
cg->fgfs[gxx->sgfn], cg->fgfs[gxy->sgfn], cg->fgfs[gxz->sgfn],
|
||||||
cg->fgfs[gyy->sgfn], cg->fgfs[gyz->sgfn], cg->fgfs[gzz->sgfn],
|
cg->fgfs[gyy->sgfn], cg->fgfs[gyz->sgfn], cg->fgfs[gzz->sgfn],
|
||||||
cg->fgfs[Axx->sgfn], cg->fgfs[Axy->sgfn], cg->fgfs[Axz->sgfn],
|
cg->fgfs[Axx->sgfn], cg->fgfs[Axy->sgfn], cg->fgfs[Axz->sgfn],
|
||||||
cg->fgfs[Ayy->sgfn], cg->fgfs[Ayz->sgfn], cg->fgfs[Azz->sgfn]);
|
cg->fgfs[Ayy->sgfn], cg->fgfs[Ayz->sgfn], cg->fgfs[Azz->sgfn]);
|
||||||
#elif (AGM == 1)
|
#elif (AGM == 1)
|
||||||
if (iter_count == 3)
|
if (iter_count == 3)
|
||||||
@@ -4542,25 +4545,16 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
sPp = sPp->next;
|
sPp = sPp->next;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// check error information
|
// Non-blocking error reduction overlapped with Sync to hide Allreduce latency
|
||||||
|
MPI_Request err_req_cor;
|
||||||
{
|
{
|
||||||
int erh = ERROR;
|
int erh = ERROR;
|
||||||
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &err_req_cor);
|
||||||
}
|
|
||||||
if (ERROR)
|
|
||||||
{
|
|
||||||
SH->Dump_Data(SynchList_pre, 0, PhysTime, dT_lev);
|
|
||||||
if (myrank == 0)
|
|
||||||
{
|
|
||||||
if (ErrorMonitor->outfile)
|
|
||||||
ErrorMonitor->outfile << "find NaN on Shell Patches in RK4 substep#" << iter_count
|
|
||||||
<< " variables at t = " << PhysTime << endl;
|
|
||||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
Parallel::Sync(GH->PatL[lev], SynchList_cor, Symmetry);
|
Parallel::AsyncSyncState async_cor;
|
||||||
|
Parallel::Sync_start(GH->PatL[lev], SynchList_cor, Symmetry, sync_cache_cor[lev], async_cor);
|
||||||
|
|
||||||
#ifdef WithShell
|
#ifdef WithShell
|
||||||
if (lev == 0)
|
if (lev == 0)
|
||||||
@@ -4573,11 +4567,30 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
{
|
{
|
||||||
prev_clock = curr_clock;
|
prev_clock = curr_clock;
|
||||||
curr_clock = clock();
|
curr_clock = clock();
|
||||||
cout << " Shell stuff synchronization used "
|
cout << " Shell stuff synchronization used "
|
||||||
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
||||||
<< " seconds! " << endl;
|
<< " seconds! " << endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
Parallel::Sync_finish(sync_cache_cor[lev], async_cor, SynchList_cor, Symmetry);
|
||||||
|
|
||||||
|
#ifdef WithShell
|
||||||
|
// Complete non-blocking error reduction and check
|
||||||
|
MPI_Wait(&err_req_cor, MPI_STATUS_IGNORE);
|
||||||
|
if (ERROR)
|
||||||
|
{
|
||||||
|
Parallel::Dump_Data(GH->PatL[lev], SynchList_pre, 0, PhysTime, dT_lev);
|
||||||
|
SH->Dump_Data(SynchList_pre, 0, PhysTime, dT_lev);
|
||||||
|
if (myrank == 0)
|
||||||
|
{
|
||||||
|
if (ErrorMonitor->outfile)
|
||||||
|
ErrorMonitor->outfile << "find NaN in RK4 substep#" << iter_count
|
||||||
|
<< " variables at t = " << PhysTime
|
||||||
|
<< ", lev = " << lev << endl;
|
||||||
|
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
// for black hole position
|
// for black hole position
|
||||||
if (BH_num > 0 && lev == GH->levels - 1)
|
if (BH_num > 0 && lev == GH->levels - 1)
|
||||||
@@ -4943,11 +4956,19 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
|
|
||||||
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"after Predictor rhs calculation");
|
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"after Predictor rhs calculation");
|
||||||
|
|
||||||
// check error information
|
// Non-blocking error reduction overlapped with Sync to hide Allreduce latency
|
||||||
|
MPI_Request err_req;
|
||||||
{
|
{
|
||||||
int erh = ERROR;
|
int erh = ERROR;
|
||||||
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, GH->Commlev[lev]);
|
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, GH->Commlev[lev], &err_req);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"before Predictor sync");
|
||||||
|
|
||||||
|
Parallel::Sync_cached(GH->PatL[lev], SynchList_pre, Symmetry, sync_cache_pre[lev]);
|
||||||
|
|
||||||
|
// Complete non-blocking error reduction and check
|
||||||
|
MPI_Wait(&err_req, MPI_STATUS_IGNORE);
|
||||||
if (ERROR)
|
if (ERROR)
|
||||||
{
|
{
|
||||||
Parallel::Dump_Data(GH->PatL[lev], StateList, 0, PhysTime, dT_lev);
|
Parallel::Dump_Data(GH->PatL[lev], StateList, 0, PhysTime, dT_lev);
|
||||||
@@ -4959,10 +4980,6 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"before Predictor sync");
|
|
||||||
|
|
||||||
Parallel::Sync(GH->PatL[lev], SynchList_pre, Symmetry);
|
|
||||||
|
|
||||||
#if (MAPBH == 0)
|
#if (MAPBH == 0)
|
||||||
// for black hole position
|
// for black hole position
|
||||||
if (BH_num > 0 && lev == GH->levels - 1)
|
if (BH_num > 0 && lev == GH->levels - 1)
|
||||||
@@ -5140,30 +5157,34 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
|
|
||||||
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"before Corrector error check");
|
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"before Corrector error check");
|
||||||
|
|
||||||
// check error information
|
// Non-blocking error reduction overlapped with Sync to hide Allreduce latency
|
||||||
|
MPI_Request err_req_cor;
|
||||||
{
|
{
|
||||||
int erh = ERROR;
|
int erh = ERROR;
|
||||||
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, GH->Commlev[lev]);
|
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, GH->Commlev[lev], &err_req_cor);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"before Corrector sync");
|
||||||
|
|
||||||
|
Parallel::Sync_cached(GH->PatL[lev], SynchList_cor, Symmetry, sync_cache_cor[lev]);
|
||||||
|
|
||||||
|
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"after Corrector sync");
|
||||||
|
|
||||||
|
// Complete non-blocking error reduction and check
|
||||||
|
MPI_Wait(&err_req_cor, MPI_STATUS_IGNORE);
|
||||||
if (ERROR)
|
if (ERROR)
|
||||||
{
|
{
|
||||||
Parallel::Dump_Data(GH->PatL[lev], SynchList_pre, 0, PhysTime, dT_lev);
|
Parallel::Dump_Data(GH->PatL[lev], SynchList_pre, 0, PhysTime, dT_lev);
|
||||||
if (myrank == 0)
|
if (myrank == 0)
|
||||||
{
|
{
|
||||||
if (ErrorMonitor->outfile)
|
if (ErrorMonitor->outfile)
|
||||||
ErrorMonitor->outfile << "find NaN in RK4 substep#" << iter_count
|
ErrorMonitor->outfile << "find NaN in RK4 substep#" << iter_count
|
||||||
<< " variables at t = " << PhysTime
|
<< " variables at t = " << PhysTime
|
||||||
<< ", lev = " << lev << endl;
|
<< ", lev = " << lev << endl;
|
||||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"before Corrector sync");
|
|
||||||
|
|
||||||
Parallel::Sync(GH->PatL[lev], SynchList_cor, Symmetry);
|
|
||||||
|
|
||||||
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"after Corrector sync");
|
|
||||||
|
|
||||||
#if (MAPBH == 0)
|
#if (MAPBH == 0)
|
||||||
// for black hole position
|
// for black hole position
|
||||||
if (BH_num > 0 && lev == GH->levels - 1)
|
if (BH_num > 0 && lev == GH->levels - 1)
|
||||||
@@ -5447,21 +5468,11 @@ void bssn_class::SHStep()
|
|||||||
#if (PSTR == 1 || PSTR == 2)
|
#if (PSTR == 1 || PSTR == 2)
|
||||||
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"before Predictor's error check");
|
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"before Predictor's error check");
|
||||||
#endif
|
#endif
|
||||||
// check error information
|
// Non-blocking error reduction overlapped with Synch to hide Allreduce latency
|
||||||
|
MPI_Request err_req;
|
||||||
{
|
{
|
||||||
int erh = ERROR;
|
int erh = ERROR;
|
||||||
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &err_req);
|
||||||
}
|
|
||||||
|
|
||||||
if (ERROR)
|
|
||||||
{
|
|
||||||
SH->Dump_Data(StateList, 0, PhysTime, dT_lev);
|
|
||||||
if (myrank == 0)
|
|
||||||
{
|
|
||||||
if (ErrorMonitor->outfile)
|
|
||||||
ErrorMonitor->outfile << "find NaN in state variables on Shell Patches at t = " << PhysTime << endl;
|
|
||||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
@@ -5473,12 +5484,25 @@ void bssn_class::SHStep()
|
|||||||
{
|
{
|
||||||
prev_clock = curr_clock;
|
prev_clock = curr_clock;
|
||||||
curr_clock = clock();
|
curr_clock = clock();
|
||||||
cout << " Shell stuff synchronization used "
|
cout << " Shell stuff synchronization used "
|
||||||
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
||||||
<< " seconds! " << endl;
|
<< " seconds! " << endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Complete non-blocking error reduction and check
|
||||||
|
MPI_Wait(&err_req, MPI_STATUS_IGNORE);
|
||||||
|
if (ERROR)
|
||||||
|
{
|
||||||
|
SH->Dump_Data(StateList, 0, PhysTime, dT_lev);
|
||||||
|
if (myrank == 0)
|
||||||
|
{
|
||||||
|
if (ErrorMonitor->outfile)
|
||||||
|
ErrorMonitor->outfile << "find NaN in state variables on Shell Patches at t = " << PhysTime << endl;
|
||||||
|
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// corrector
|
// corrector
|
||||||
for (iter_count = 1; iter_count < 4; iter_count++)
|
for (iter_count = 1; iter_count < 4; iter_count++)
|
||||||
{
|
{
|
||||||
@@ -5621,21 +5645,11 @@ void bssn_class::SHStep()
|
|||||||
sPp = sPp->next;
|
sPp = sPp->next;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// check error information
|
// Non-blocking error reduction overlapped with Synch to hide Allreduce latency
|
||||||
|
MPI_Request err_req_cor;
|
||||||
{
|
{
|
||||||
int erh = ERROR;
|
int erh = ERROR;
|
||||||
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &err_req_cor);
|
||||||
}
|
|
||||||
if (ERROR)
|
|
||||||
{
|
|
||||||
SH->Dump_Data(SynchList_pre, 0, PhysTime, dT_lev);
|
|
||||||
if (myrank == 0)
|
|
||||||
{
|
|
||||||
if (ErrorMonitor->outfile)
|
|
||||||
ErrorMonitor->outfile << "find NaN on Shell Patches in RK4 substep#" << iter_count
|
|
||||||
<< " variables at t = " << PhysTime << endl;
|
|
||||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
@@ -5647,12 +5661,26 @@ void bssn_class::SHStep()
|
|||||||
{
|
{
|
||||||
prev_clock = curr_clock;
|
prev_clock = curr_clock;
|
||||||
curr_clock = clock();
|
curr_clock = clock();
|
||||||
cout << " Shell stuff synchronization used "
|
cout << " Shell stuff synchronization used "
|
||||||
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
||||||
<< " seconds! " << endl;
|
<< " seconds! " << endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Complete non-blocking error reduction and check
|
||||||
|
MPI_Wait(&err_req_cor, MPI_STATUS_IGNORE);
|
||||||
|
if (ERROR)
|
||||||
|
{
|
||||||
|
SH->Dump_Data(SynchList_pre, 0, PhysTime, dT_lev);
|
||||||
|
if (myrank == 0)
|
||||||
|
{
|
||||||
|
if (ErrorMonitor->outfile)
|
||||||
|
ErrorMonitor->outfile << "find NaN on Shell Patches in RK4 substep#" << iter_count
|
||||||
|
<< " variables at t = " << PhysTime << endl;
|
||||||
|
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
sPp = SH->PatL;
|
sPp = SH->PatL;
|
||||||
while (sPp)
|
while (sPp)
|
||||||
{
|
{
|
||||||
@@ -5781,7 +5809,7 @@ void bssn_class::RestrictProlong(int lev, int YN, bool BB,
|
|||||||
// misc::tillherecheck(GH->Commlev[GH->mylev],GH->start_rank[GH->mylev],a_stream.str());
|
// misc::tillherecheck(GH->Commlev[GH->mylev],GH->start_rank[GH->mylev],a_stream.str());
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
Parallel::Sync(GH->PatL[lev - 1], SynchList_pre, Symmetry);
|
Parallel::Sync_cached(GH->PatL[lev - 1], SynchList_pre, Symmetry, sync_cache_rp_coarse[lev]);
|
||||||
|
|
||||||
#if (PSTR == 1 || PSTR == 2)
|
#if (PSTR == 1 || PSTR == 2)
|
||||||
// a_stream.clear();
|
// a_stream.clear();
|
||||||
@@ -5791,21 +5819,11 @@ void bssn_class::RestrictProlong(int lev, int YN, bool BB,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if (RPB == 0)
|
#if (RPB == 0)
|
||||||
Ppc = GH->PatL[lev - 1];
|
|
||||||
while (Ppc)
|
|
||||||
{
|
|
||||||
Pp = GH->PatL[lev];
|
|
||||||
while (Pp)
|
|
||||||
{
|
|
||||||
#if (MIXOUTB == 0)
|
#if (MIXOUTB == 0)
|
||||||
Parallel::OutBdLow2Hi(Ppc->data, Pp->data, SynchList_pre, SL, Symmetry);
|
Parallel::OutBdLow2Hi(GH->PatL[lev - 1], GH->PatL[lev], SynchList_pre, SL, Symmetry);
|
||||||
#elif (MIXOUTB == 1)
|
#elif (MIXOUTB == 1)
|
||||||
Parallel::OutBdLow2Himix(Ppc->data, Pp->data, SynchList_pre, SL, Symmetry);
|
Parallel::OutBdLow2Himix(GH->PatL[lev - 1], GH->PatL[lev], SynchList_pre, SL, Symmetry);
|
||||||
#endif
|
#endif
|
||||||
Pp = Pp->next;
|
|
||||||
}
|
|
||||||
Ppc = Ppc->next;
|
|
||||||
}
|
|
||||||
#elif (RPB == 1)
|
#elif (RPB == 1)
|
||||||
// Parallel::OutBdLow2Hi_bam(GH->PatL[lev-1],GH->PatL[lev],SynchList_pre,SL,Symmetry);
|
// Parallel::OutBdLow2Hi_bam(GH->PatL[lev-1],GH->PatL[lev],SynchList_pre,SL,Symmetry);
|
||||||
Parallel::OutBdLow2Hi_bam(GH->PatL[lev - 1], GH->PatL[lev], SynchList_pre, SL, GH->bdsul[lev], Symmetry);
|
Parallel::OutBdLow2Hi_bam(GH->PatL[lev - 1], GH->PatL[lev], SynchList_pre, SL, GH->bdsul[lev], Symmetry);
|
||||||
@@ -5842,7 +5860,7 @@ void bssn_class::RestrictProlong(int lev, int YN, bool BB,
|
|||||||
// misc::tillherecheck(GH->Commlev[GH->mylev],GH->start_rank[GH->mylev],a_stream.str());
|
// misc::tillherecheck(GH->Commlev[GH->mylev],GH->start_rank[GH->mylev],a_stream.str());
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
Parallel::Sync(GH->PatL[lev - 1], SL, Symmetry);
|
Parallel::Sync_cached(GH->PatL[lev - 1], SL, Symmetry, sync_cache_rp_coarse[lev]);
|
||||||
|
|
||||||
#if (PSTR == 1 || PSTR == 2)
|
#if (PSTR == 1 || PSTR == 2)
|
||||||
// a_stream.clear();
|
// a_stream.clear();
|
||||||
@@ -5852,21 +5870,11 @@ void bssn_class::RestrictProlong(int lev, int YN, bool BB,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if (RPB == 0)
|
#if (RPB == 0)
|
||||||
Ppc = GH->PatL[lev - 1];
|
|
||||||
while (Ppc)
|
|
||||||
{
|
|
||||||
Pp = GH->PatL[lev];
|
|
||||||
while (Pp)
|
|
||||||
{
|
|
||||||
#if (MIXOUTB == 0)
|
#if (MIXOUTB == 0)
|
||||||
Parallel::OutBdLow2Hi(Ppc->data, Pp->data, SL, SL, Symmetry);
|
Parallel::OutBdLow2Hi(GH->PatL[lev - 1], GH->PatL[lev], SL, SL, Symmetry);
|
||||||
#elif (MIXOUTB == 1)
|
#elif (MIXOUTB == 1)
|
||||||
Parallel::OutBdLow2Himix(Ppc->data, Pp->data, SL, SL, Symmetry);
|
Parallel::OutBdLow2Himix(GH->PatL[lev - 1], GH->PatL[lev], SL, SL, Symmetry);
|
||||||
#endif
|
#endif
|
||||||
Pp = Pp->next;
|
|
||||||
}
|
|
||||||
Ppc = Ppc->next;
|
|
||||||
}
|
|
||||||
#elif (RPB == 1)
|
#elif (RPB == 1)
|
||||||
// Parallel::OutBdLow2Hi_bam(GH->PatL[lev-1],GH->PatL[lev],SL,SL,Symmetry);
|
// Parallel::OutBdLow2Hi_bam(GH->PatL[lev-1],GH->PatL[lev],SL,SL,Symmetry);
|
||||||
Parallel::OutBdLow2Hi_bam(GH->PatL[lev - 1], GH->PatL[lev], SL, SL, GH->bdsul[lev], Symmetry);
|
Parallel::OutBdLow2Hi_bam(GH->PatL[lev - 1], GH->PatL[lev], SL, SL, GH->bdsul[lev], Symmetry);
|
||||||
@@ -5880,7 +5888,7 @@ void bssn_class::RestrictProlong(int lev, int YN, bool BB,
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
Parallel::Sync(GH->PatL[lev], SL, Symmetry);
|
Parallel::Sync_cached(GH->PatL[lev], SL, Symmetry, sync_cache_rp_fine[lev]);
|
||||||
|
|
||||||
#if (PSTR == 1 || PSTR == 2)
|
#if (PSTR == 1 || PSTR == 2)
|
||||||
// a_stream.clear();
|
// a_stream.clear();
|
||||||
@@ -5938,24 +5946,14 @@ void bssn_class::RestrictProlong_aux(int lev, int YN, bool BB,
|
|||||||
Parallel::Restrict_bam(GH->PatL[lev - 1], GH->PatL[lev], SL, SynchList_pre, GH->rsul[lev], Symmetry);
|
Parallel::Restrict_bam(GH->PatL[lev - 1], GH->PatL[lev], SL, SynchList_pre, GH->rsul[lev], Symmetry);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
Parallel::Sync(GH->PatL[lev - 1], SynchList_pre, Symmetry);
|
Parallel::Sync_cached(GH->PatL[lev - 1], SynchList_pre, Symmetry, sync_cache_rp_coarse[lev]);
|
||||||
|
|
||||||
#if (RPB == 0)
|
#if (RPB == 0)
|
||||||
Ppc = GH->PatL[lev - 1];
|
|
||||||
while (Ppc)
|
|
||||||
{
|
|
||||||
Pp = GH->PatL[lev];
|
|
||||||
while (Pp)
|
|
||||||
{
|
|
||||||
#if (MIXOUTB == 0)
|
#if (MIXOUTB == 0)
|
||||||
Parallel::OutBdLow2Hi(Ppc->data, Pp->data, SynchList_pre, SL, Symmetry);
|
Parallel::OutBdLow2Hi(GH->PatL[lev - 1], GH->PatL[lev], SynchList_pre, SL, Symmetry);
|
||||||
#elif (MIXOUTB == 1)
|
#elif (MIXOUTB == 1)
|
||||||
Parallel::OutBdLow2Himix(Ppc->data, Pp->data, SynchList_pre, SL, Symmetry);
|
Parallel::OutBdLow2Himix(GH->PatL[lev - 1], GH->PatL[lev], SynchList_pre, SL, Symmetry);
|
||||||
#endif
|
#endif
|
||||||
Pp = Pp->next;
|
|
||||||
}
|
|
||||||
Ppc = Ppc->next;
|
|
||||||
}
|
|
||||||
#elif (RPB == 1)
|
#elif (RPB == 1)
|
||||||
// Parallel::OutBdLow2Hi_bam(GH->PatL[lev-1],GH->PatL[lev],SynchList_pre,SL,Symmetry);
|
// Parallel::OutBdLow2Hi_bam(GH->PatL[lev-1],GH->PatL[lev],SynchList_pre,SL,Symmetry);
|
||||||
Parallel::OutBdLow2Hi_bam(GH->PatL[lev - 1], GH->PatL[lev], SynchList_pre, SL, GH->bdsul[lev], Symmetry);
|
Parallel::OutBdLow2Hi_bam(GH->PatL[lev - 1], GH->PatL[lev], SynchList_pre, SL, GH->bdsul[lev], Symmetry);
|
||||||
@@ -5970,31 +5968,21 @@ void bssn_class::RestrictProlong_aux(int lev, int YN, bool BB,
|
|||||||
Parallel::Restrict_bam(GH->PatL[lev - 1], GH->PatL[lev], SL, SL, GH->rsul[lev], Symmetry);
|
Parallel::Restrict_bam(GH->PatL[lev - 1], GH->PatL[lev], SL, SL, GH->rsul[lev], Symmetry);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
Parallel::Sync(GH->PatL[lev - 1], SL, Symmetry);
|
Parallel::Sync_cached(GH->PatL[lev - 1], SL, Symmetry, sync_cache_rp_coarse[lev]);
|
||||||
|
|
||||||
#if (RPB == 0)
|
#if (RPB == 0)
|
||||||
Ppc = GH->PatL[lev - 1];
|
|
||||||
while (Ppc)
|
|
||||||
{
|
|
||||||
Pp = GH->PatL[lev];
|
|
||||||
while (Pp)
|
|
||||||
{
|
|
||||||
#if (MIXOUTB == 0)
|
#if (MIXOUTB == 0)
|
||||||
Parallel::OutBdLow2Hi(Ppc->data, Pp->data, SL, SL, Symmetry);
|
Parallel::OutBdLow2Hi(GH->PatL[lev - 1], GH->PatL[lev], SL, SL, Symmetry);
|
||||||
#elif (MIXOUTB == 1)
|
#elif (MIXOUTB == 1)
|
||||||
Parallel::OutBdLow2Himix(Ppc->data, Pp->data, SL, SL, Symmetry);
|
Parallel::OutBdLow2Himix(GH->PatL[lev - 1], GH->PatL[lev], SL, SL, Symmetry);
|
||||||
#endif
|
#endif
|
||||||
Pp = Pp->next;
|
|
||||||
}
|
|
||||||
Ppc = Ppc->next;
|
|
||||||
}
|
|
||||||
#elif (RPB == 1)
|
#elif (RPB == 1)
|
||||||
// Parallel::OutBdLow2Hi_bam(GH->PatL[lev-1],GH->PatL[lev],SL,SL,Symmetry);
|
// Parallel::OutBdLow2Hi_bam(GH->PatL[lev-1],GH->PatL[lev],SL,SL,Symmetry);
|
||||||
Parallel::OutBdLow2Hi_bam(GH->PatL[lev - 1], GH->PatL[lev], SL, SL, GH->bdsul[lev], Symmetry);
|
Parallel::OutBdLow2Hi_bam(GH->PatL[lev - 1], GH->PatL[lev], SL, SL, GH->bdsul[lev], Symmetry);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
Parallel::Sync(GH->PatL[lev], SL, Symmetry);
|
Parallel::Sync_cached(GH->PatL[lev], SL, Symmetry, sync_cache_rp_fine[lev]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -6045,24 +6033,14 @@ void bssn_class::RestrictProlong(int lev, int YN, bool BB)
|
|||||||
Parallel::Restrict_bam(GH->PatL[lev - 1], GH->PatL[lev], SynchList_cor, SynchList_pre, GH->rsul[lev], Symmetry);
|
Parallel::Restrict_bam(GH->PatL[lev - 1], GH->PatL[lev], SynchList_cor, SynchList_pre, GH->rsul[lev], Symmetry);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
Parallel::Sync(GH->PatL[lev - 1], SynchList_pre, Symmetry);
|
Parallel::Sync_cached(GH->PatL[lev - 1], SynchList_pre, Symmetry, sync_cache_rp_coarse[lev]);
|
||||||
|
|
||||||
#if (RPB == 0)
|
#if (RPB == 0)
|
||||||
Ppc = GH->PatL[lev - 1];
|
|
||||||
while (Ppc)
|
|
||||||
{
|
|
||||||
Pp = GH->PatL[lev];
|
|
||||||
while (Pp)
|
|
||||||
{
|
|
||||||
#if (MIXOUTB == 0)
|
#if (MIXOUTB == 0)
|
||||||
Parallel::OutBdLow2Hi(Ppc->data, Pp->data, SynchList_pre, SynchList_cor, Symmetry);
|
Parallel::OutBdLow2Hi(GH->PatL[lev - 1], GH->PatL[lev], SynchList_pre, SynchList_cor, Symmetry);
|
||||||
#elif (MIXOUTB == 1)
|
#elif (MIXOUTB == 1)
|
||||||
Parallel::OutBdLow2Himix(Ppc->data, Pp->data, SynchList_pre, SynchList_cor, Symmetry);
|
Parallel::OutBdLow2Himix(GH->PatL[lev - 1], GH->PatL[lev], SynchList_pre, SynchList_cor, Symmetry);
|
||||||
#endif
|
#endif
|
||||||
Pp = Pp->next;
|
|
||||||
}
|
|
||||||
Ppc = Ppc->next;
|
|
||||||
}
|
|
||||||
#elif (RPB == 1)
|
#elif (RPB == 1)
|
||||||
// Parallel::OutBdLow2Hi_bam(GH->PatL[lev-1],GH->PatL[lev],SynchList_pre,SynchList_cor,Symmetry);
|
// Parallel::OutBdLow2Hi_bam(GH->PatL[lev-1],GH->PatL[lev],SynchList_pre,SynchList_cor,Symmetry);
|
||||||
Parallel::OutBdLow2Hi_bam(GH->PatL[lev - 1], GH->PatL[lev], SynchList_pre, SynchList_cor, GH->bdsul[lev], Symmetry);
|
Parallel::OutBdLow2Hi_bam(GH->PatL[lev - 1], GH->PatL[lev], SynchList_pre, SynchList_cor, GH->bdsul[lev], Symmetry);
|
||||||
@@ -6079,31 +6057,21 @@ void bssn_class::RestrictProlong(int lev, int YN, bool BB)
|
|||||||
Parallel::Restrict_bam(GH->PatL[lev - 1], GH->PatL[lev], SynchList_cor, StateList, GH->rsul[lev], Symmetry);
|
Parallel::Restrict_bam(GH->PatL[lev - 1], GH->PatL[lev], SynchList_cor, StateList, GH->rsul[lev], Symmetry);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
Parallel::Sync(GH->PatL[lev - 1], StateList, Symmetry);
|
Parallel::Sync_cached(GH->PatL[lev - 1], StateList, Symmetry, sync_cache_rp_coarse[lev]);
|
||||||
|
|
||||||
#if (RPB == 0)
|
#if (RPB == 0)
|
||||||
Ppc = GH->PatL[lev - 1];
|
|
||||||
while (Ppc)
|
|
||||||
{
|
|
||||||
Pp = GH->PatL[lev];
|
|
||||||
while (Pp)
|
|
||||||
{
|
|
||||||
#if (MIXOUTB == 0)
|
#if (MIXOUTB == 0)
|
||||||
Parallel::OutBdLow2Hi(Ppc->data, Pp->data, StateList, SynchList_cor, Symmetry);
|
Parallel::OutBdLow2Hi(GH->PatL[lev - 1], GH->PatL[lev], StateList, SynchList_cor, Symmetry);
|
||||||
#elif (MIXOUTB == 1)
|
#elif (MIXOUTB == 1)
|
||||||
Parallel::OutBdLow2Himix(Ppc->data, Pp->data, StateList, SynchList_cor, Symmetry);
|
Parallel::OutBdLow2Himix(GH->PatL[lev - 1], GH->PatL[lev], StateList, SynchList_cor, Symmetry);
|
||||||
#endif
|
#endif
|
||||||
Pp = Pp->next;
|
|
||||||
}
|
|
||||||
Ppc = Ppc->next;
|
|
||||||
}
|
|
||||||
#elif (RPB == 1)
|
#elif (RPB == 1)
|
||||||
// Parallel::OutBdLow2Hi_bam(GH->PatL[lev-1],GH->PatL[lev],StateList,SynchList_cor,Symmetry);
|
// Parallel::OutBdLow2Hi_bam(GH->PatL[lev-1],GH->PatL[lev],StateList,SynchList_cor,Symmetry);
|
||||||
Parallel::OutBdLow2Hi_bam(GH->PatL[lev - 1], GH->PatL[lev], StateList, SynchList_cor, GH->bdsul[lev], Symmetry);
|
Parallel::OutBdLow2Hi_bam(GH->PatL[lev - 1], GH->PatL[lev], StateList, SynchList_cor, GH->bdsul[lev], Symmetry);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
Parallel::Sync(GH->PatL[lev], SynchList_cor, Symmetry);
|
Parallel::Sync_cached(GH->PatL[lev], SynchList_cor, Symmetry, sync_cache_rp_fine[lev]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -6133,21 +6101,11 @@ void bssn_class::ProlongRestrict(int lev, int YN, bool BB)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#if (RPB == 0)
|
#if (RPB == 0)
|
||||||
Ppc = GH->PatL[lev - 1];
|
|
||||||
while (Ppc)
|
|
||||||
{
|
|
||||||
Pp = GH->PatL[lev];
|
|
||||||
while (Pp)
|
|
||||||
{
|
|
||||||
#if (MIXOUTB == 0)
|
#if (MIXOUTB == 0)
|
||||||
Parallel::OutBdLow2Hi(Ppc->data, Pp->data, SynchList_pre, SynchList_cor, Symmetry);
|
Parallel::OutBdLow2Hi(GH->PatL[lev - 1], GH->PatL[lev], SynchList_pre, SynchList_cor, Symmetry);
|
||||||
#elif (MIXOUTB == 1)
|
#elif (MIXOUTB == 1)
|
||||||
Parallel::OutBdLow2Himix(Ppc->data, Pp->data, SynchList_pre, SynchList_cor, Symmetry);
|
Parallel::OutBdLow2Himix(GH->PatL[lev - 1], GH->PatL[lev], SynchList_pre, SynchList_cor, Symmetry);
|
||||||
#endif
|
#endif
|
||||||
Pp = Pp->next;
|
|
||||||
}
|
|
||||||
Ppc = Ppc->next;
|
|
||||||
}
|
|
||||||
#elif (RPB == 1)
|
#elif (RPB == 1)
|
||||||
// Parallel::OutBdLow2Hi_bam(GH->PatL[lev-1],GH->PatL[lev],SynchList_pre,SynchList_cor,Symmetry);
|
// Parallel::OutBdLow2Hi_bam(GH->PatL[lev-1],GH->PatL[lev],SynchList_pre,SynchList_cor,Symmetry);
|
||||||
Parallel::OutBdLow2Hi_bam(GH->PatL[lev - 1], GH->PatL[lev], SynchList_pre, SynchList_cor, GH->bdsul[lev], Symmetry);
|
Parallel::OutBdLow2Hi_bam(GH->PatL[lev - 1], GH->PatL[lev], SynchList_pre, SynchList_cor, GH->bdsul[lev], Symmetry);
|
||||||
@@ -6156,21 +6114,11 @@ void bssn_class::ProlongRestrict(int lev, int YN, bool BB)
|
|||||||
else // no time refinement levels and for all same time levels
|
else // no time refinement levels and for all same time levels
|
||||||
{
|
{
|
||||||
#if (RPB == 0)
|
#if (RPB == 0)
|
||||||
Ppc = GH->PatL[lev - 1];
|
|
||||||
while (Ppc)
|
|
||||||
{
|
|
||||||
Pp = GH->PatL[lev];
|
|
||||||
while (Pp)
|
|
||||||
{
|
|
||||||
#if (MIXOUTB == 0)
|
#if (MIXOUTB == 0)
|
||||||
Parallel::OutBdLow2Hi(Ppc->data, Pp->data, StateList, SynchList_cor, Symmetry);
|
Parallel::OutBdLow2Hi(GH->PatL[lev - 1], GH->PatL[lev], StateList, SynchList_cor, Symmetry);
|
||||||
#elif (MIXOUTB == 1)
|
#elif (MIXOUTB == 1)
|
||||||
Parallel::OutBdLow2Himix(Ppc->data, Pp->data, StateList, SynchList_cor, Symmetry);
|
Parallel::OutBdLow2Himix(GH->PatL[lev - 1], GH->PatL[lev], StateList, SynchList_cor, Symmetry);
|
||||||
#endif
|
#endif
|
||||||
Pp = Pp->next;
|
|
||||||
}
|
|
||||||
Ppc = Ppc->next;
|
|
||||||
}
|
|
||||||
#elif (RPB == 1)
|
#elif (RPB == 1)
|
||||||
// Parallel::OutBdLow2Hi_bam(GH->PatL[lev-1],GH->PatL[lev],StateList,SynchList_cor,Symmetry);
|
// Parallel::OutBdLow2Hi_bam(GH->PatL[lev-1],GH->PatL[lev],StateList,SynchList_cor,Symmetry);
|
||||||
Parallel::OutBdLow2Hi_bam(GH->PatL[lev - 1], GH->PatL[lev], StateList, SynchList_cor, GH->bdsul[lev], Symmetry);
|
Parallel::OutBdLow2Hi_bam(GH->PatL[lev - 1], GH->PatL[lev], StateList, SynchList_cor, GH->bdsul[lev], Symmetry);
|
||||||
@@ -6186,10 +6134,10 @@ void bssn_class::ProlongRestrict(int lev, int YN, bool BB)
|
|||||||
#else
|
#else
|
||||||
Parallel::Restrict_after(GH->PatL[lev - 1], GH->PatL[lev], SynchList_cor, StateList, Symmetry);
|
Parallel::Restrict_after(GH->PatL[lev - 1], GH->PatL[lev], SynchList_cor, StateList, Symmetry);
|
||||||
#endif
|
#endif
|
||||||
Parallel::Sync(GH->PatL[lev - 1], StateList, Symmetry);
|
Parallel::Sync_cached(GH->PatL[lev - 1], StateList, Symmetry, sync_cache_rp_coarse[lev]);
|
||||||
}
|
}
|
||||||
|
|
||||||
Parallel::Sync(GH->PatL[lev], SynchList_cor, Symmetry);
|
Parallel::Sync_cached(GH->PatL[lev], SynchList_cor, Symmetry, sync_cache_rp_fine[lev]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#undef MIXOUTB
|
#undef MIXOUTB
|
||||||
|
|||||||
@@ -126,6 +126,11 @@ public:
|
|||||||
MyList<var> *OldStateList, *DumpList;
|
MyList<var> *OldStateList, *DumpList;
|
||||||
MyList<var> *ConstraintList;
|
MyList<var> *ConstraintList;
|
||||||
|
|
||||||
|
Parallel::SyncCache *sync_cache_pre; // per-level cache for predictor sync
|
||||||
|
Parallel::SyncCache *sync_cache_cor; // per-level cache for corrector sync
|
||||||
|
Parallel::SyncCache *sync_cache_rp_coarse; // RestrictProlong sync on PatL[lev-1]
|
||||||
|
Parallel::SyncCache *sync_cache_rp_fine; // RestrictProlong sync on PatL[lev]
|
||||||
|
|
||||||
monitor *ErrorMonitor, *Psi4Monitor, *BHMonitor, *MAPMonitor;
|
monitor *ErrorMonitor, *Psi4Monitor, *BHMonitor, *MAPMonitor;
|
||||||
monitor *ConVMonitor;
|
monitor *ConVMonitor;
|
||||||
surface_integral *Waveshell;
|
surface_integral *Waveshell;
|
||||||
|
|||||||
@@ -106,7 +106,8 @@
|
|||||||
call getpbh(BHN,Porg,Mass)
|
call getpbh(BHN,Porg,Mass)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
!!! sanity check
|
!!! sanity check (disabled in production builds for performance)
|
||||||
|
#ifdef DEBUG
|
||||||
dX = sum(chi)+sum(trK)+sum(dxx)+sum(gxy)+sum(gxz)+sum(dyy)+sum(gyz)+sum(dzz) &
|
dX = sum(chi)+sum(trK)+sum(dxx)+sum(gxy)+sum(gxz)+sum(dyy)+sum(gyz)+sum(dzz) &
|
||||||
+sum(Axx)+sum(Axy)+sum(Axz)+sum(Ayy)+sum(Ayz)+sum(Azz) &
|
+sum(Axx)+sum(Axy)+sum(Axz)+sum(Ayy)+sum(Ayz)+sum(Azz) &
|
||||||
+sum(Gamx)+sum(Gamy)+sum(Gamz) &
|
+sum(Gamx)+sum(Gamy)+sum(Gamz) &
|
||||||
@@ -136,6 +137,7 @@
|
|||||||
gont = 1
|
gont = 1
|
||||||
return
|
return
|
||||||
endif
|
endif
|
||||||
|
#endif
|
||||||
|
|
||||||
PI = dacos(-ONE)
|
PI = dacos(-ONE)
|
||||||
|
|
||||||
@@ -943,103 +945,60 @@
|
|||||||
SSA(2)=SYM
|
SSA(2)=SYM
|
||||||
SSA(3)=ANTI
|
SSA(3)=ANTI
|
||||||
|
|
||||||
!!!!!!!!!advection term part
|
!!!!!!!!!advection term + Kreiss-Oliger dissipation (merged for cache efficiency)
|
||||||
|
! lopsided_kodis shares the symmetry_bd buffer between advection and
|
||||||
|
! dissipation, eliminating redundant full-grid copies. For metric variables
|
||||||
|
! gxx/gyy/gzz (=dxx/dyy/dzz+1): kodis stencil coefficients sum to zero,
|
||||||
|
! so the constant offset has no effect on dissipation.
|
||||||
|
|
||||||
call lopsided(ex,X,Y,Z,gxx,gxx_rhs,betax,betay,betaz,Symmetry,SSS)
|
call lopsided_kodis(ex,X,Y,Z,gxx,gxx_rhs,betax,betay,betaz,Symmetry,SSS,eps)
|
||||||
call lopsided(ex,X,Y,Z,gxy,gxy_rhs,betax,betay,betaz,Symmetry,AAS)
|
call lopsided_kodis(ex,X,Y,Z,gxy,gxy_rhs,betax,betay,betaz,Symmetry,AAS,eps)
|
||||||
call lopsided(ex,X,Y,Z,gxz,gxz_rhs,betax,betay,betaz,Symmetry,ASA)
|
call lopsided_kodis(ex,X,Y,Z,gxz,gxz_rhs,betax,betay,betaz,Symmetry,ASA,eps)
|
||||||
call lopsided(ex,X,Y,Z,gyy,gyy_rhs,betax,betay,betaz,Symmetry,SSS)
|
call lopsided_kodis(ex,X,Y,Z,gyy,gyy_rhs,betax,betay,betaz,Symmetry,SSS,eps)
|
||||||
call lopsided(ex,X,Y,Z,gyz,gyz_rhs,betax,betay,betaz,Symmetry,SAA)
|
call lopsided_kodis(ex,X,Y,Z,gyz,gyz_rhs,betax,betay,betaz,Symmetry,SAA,eps)
|
||||||
call lopsided(ex,X,Y,Z,gzz,gzz_rhs,betax,betay,betaz,Symmetry,SSS)
|
call lopsided_kodis(ex,X,Y,Z,gzz,gzz_rhs,betax,betay,betaz,Symmetry,SSS,eps)
|
||||||
|
|
||||||
call lopsided(ex,X,Y,Z,Axx,Axx_rhs,betax,betay,betaz,Symmetry,SSS)
|
call lopsided_kodis(ex,X,Y,Z,Axx,Axx_rhs,betax,betay,betaz,Symmetry,SSS,eps)
|
||||||
call lopsided(ex,X,Y,Z,Axy,Axy_rhs,betax,betay,betaz,Symmetry,AAS)
|
call lopsided_kodis(ex,X,Y,Z,Axy,Axy_rhs,betax,betay,betaz,Symmetry,AAS,eps)
|
||||||
call lopsided(ex,X,Y,Z,Axz,Axz_rhs,betax,betay,betaz,Symmetry,ASA)
|
call lopsided_kodis(ex,X,Y,Z,Axz,Axz_rhs,betax,betay,betaz,Symmetry,ASA,eps)
|
||||||
call lopsided(ex,X,Y,Z,Ayy,Ayy_rhs,betax,betay,betaz,Symmetry,SSS)
|
call lopsided_kodis(ex,X,Y,Z,Ayy,Ayy_rhs,betax,betay,betaz,Symmetry,SSS,eps)
|
||||||
call lopsided(ex,X,Y,Z,Ayz,Ayz_rhs,betax,betay,betaz,Symmetry,SAA)
|
call lopsided_kodis(ex,X,Y,Z,Ayz,Ayz_rhs,betax,betay,betaz,Symmetry,SAA,eps)
|
||||||
call lopsided(ex,X,Y,Z,Azz,Azz_rhs,betax,betay,betaz,Symmetry,SSS)
|
call lopsided_kodis(ex,X,Y,Z,Azz,Azz_rhs,betax,betay,betaz,Symmetry,SSS,eps)
|
||||||
|
|
||||||
call lopsided(ex,X,Y,Z,chi,chi_rhs,betax,betay,betaz,Symmetry,SSS)
|
call lopsided_kodis(ex,X,Y,Z,chi,chi_rhs,betax,betay,betaz,Symmetry,SSS,eps)
|
||||||
call lopsided(ex,X,Y,Z,trK,trK_rhs,betax,betay,betaz,Symmetry,SSS)
|
call lopsided_kodis(ex,X,Y,Z,trK,trK_rhs,betax,betay,betaz,Symmetry,SSS,eps)
|
||||||
|
|
||||||
call lopsided(ex,X,Y,Z,Gamx,Gamx_rhs,betax,betay,betaz,Symmetry,ASS)
|
call lopsided_kodis(ex,X,Y,Z,Gamx,Gamx_rhs,betax,betay,betaz,Symmetry,ASS,eps)
|
||||||
call lopsided(ex,X,Y,Z,Gamy,Gamy_rhs,betax,betay,betaz,Symmetry,SAS)
|
call lopsided_kodis(ex,X,Y,Z,Gamy,Gamy_rhs,betax,betay,betaz,Symmetry,SAS,eps)
|
||||||
call lopsided(ex,X,Y,Z,Gamz,Gamz_rhs,betax,betay,betaz,Symmetry,SSA)
|
call lopsided_kodis(ex,X,Y,Z,Gamz,Gamz_rhs,betax,betay,betaz,Symmetry,SSA,eps)
|
||||||
!!
|
|
||||||
|
#if 1
|
||||||
|
!! bam does not apply dissipation on gauge variables
|
||||||
|
call lopsided_kodis(ex,X,Y,Z,Lap,Lap_rhs,betax,betay,betaz,Symmetry,SSS,eps)
|
||||||
|
#if (GAUGE == 0 || GAUGE == 1 || GAUGE == 2 || GAUGE == 3 || GAUGE == 4 || GAUGE == 5 || GAUGE == 6 || GAUGE == 7)
|
||||||
|
call lopsided_kodis(ex,X,Y,Z,betax,betax_rhs,betax,betay,betaz,Symmetry,ASS,eps)
|
||||||
|
call lopsided_kodis(ex,X,Y,Z,betay,betay_rhs,betax,betay,betaz,Symmetry,SAS,eps)
|
||||||
|
call lopsided_kodis(ex,X,Y,Z,betaz,betaz_rhs,betax,betay,betaz,Symmetry,SSA,eps)
|
||||||
|
#endif
|
||||||
|
#if (GAUGE == 0 || GAUGE == 2 || GAUGE == 3 || GAUGE == 6 || GAUGE == 7)
|
||||||
|
call lopsided_kodis(ex,X,Y,Z,dtSfx,dtSfx_rhs,betax,betay,betaz,Symmetry,ASS,eps)
|
||||||
|
call lopsided_kodis(ex,X,Y,Z,dtSfy,dtSfy_rhs,betax,betay,betaz,Symmetry,SAS,eps)
|
||||||
|
call lopsided_kodis(ex,X,Y,Z,dtSfz,dtSfz_rhs,betax,betay,betaz,Symmetry,SSA,eps)
|
||||||
|
#endif
|
||||||
|
#else
|
||||||
|
! No dissipation on gauge variables (advection only)
|
||||||
call lopsided(ex,X,Y,Z,Lap,Lap_rhs,betax,betay,betaz,Symmetry,SSS)
|
call lopsided(ex,X,Y,Z,Lap,Lap_rhs,betax,betay,betaz,Symmetry,SSS)
|
||||||
|
|
||||||
#if (GAUGE == 0 || GAUGE == 1 || GAUGE == 2 || GAUGE == 3 || GAUGE == 4 || GAUGE == 5 || GAUGE == 6 || GAUGE == 7)
|
#if (GAUGE == 0 || GAUGE == 1 || GAUGE == 2 || GAUGE == 3 || GAUGE == 4 || GAUGE == 5 || GAUGE == 6 || GAUGE == 7)
|
||||||
call lopsided(ex,X,Y,Z,betax,betax_rhs,betax,betay,betaz,Symmetry,ASS)
|
call lopsided(ex,X,Y,Z,betax,betax_rhs,betax,betay,betaz,Symmetry,ASS)
|
||||||
call lopsided(ex,X,Y,Z,betay,betay_rhs,betax,betay,betaz,Symmetry,SAS)
|
call lopsided(ex,X,Y,Z,betay,betay_rhs,betax,betay,betaz,Symmetry,SAS)
|
||||||
call lopsided(ex,X,Y,Z,betaz,betaz_rhs,betax,betay,betaz,Symmetry,SSA)
|
call lopsided(ex,X,Y,Z,betaz,betaz_rhs,betax,betay,betaz,Symmetry,SSA)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if (GAUGE == 0 || GAUGE == 2 || GAUGE == 3 || GAUGE == 6 || GAUGE == 7)
|
#if (GAUGE == 0 || GAUGE == 2 || GAUGE == 3 || GAUGE == 6 || GAUGE == 7)
|
||||||
call lopsided(ex,X,Y,Z,dtSfx,dtSfx_rhs,betax,betay,betaz,Symmetry,ASS)
|
call lopsided(ex,X,Y,Z,dtSfx,dtSfx_rhs,betax,betay,betaz,Symmetry,ASS)
|
||||||
call lopsided(ex,X,Y,Z,dtSfy,dtSfy_rhs,betax,betay,betaz,Symmetry,SAS)
|
call lopsided(ex,X,Y,Z,dtSfy,dtSfy_rhs,betax,betay,betaz,Symmetry,SAS)
|
||||||
call lopsided(ex,X,Y,Z,dtSfz,dtSfz_rhs,betax,betay,betaz,Symmetry,SSA)
|
call lopsided(ex,X,Y,Z,dtSfz,dtSfz_rhs,betax,betay,betaz,Symmetry,SSA)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if(eps>0)then
|
|
||||||
! usual Kreiss-Oliger dissipation
|
|
||||||
call kodis(ex,X,Y,Z,chi,chi_rhs,SSS,Symmetry,eps)
|
|
||||||
call kodis(ex,X,Y,Z,trK,trK_rhs,SSS,Symmetry,eps)
|
|
||||||
call kodis(ex,X,Y,Z,dxx,gxx_rhs,SSS,Symmetry,eps)
|
|
||||||
call kodis(ex,X,Y,Z,gxy,gxy_rhs,AAS,Symmetry,eps)
|
|
||||||
call kodis(ex,X,Y,Z,gxz,gxz_rhs,ASA,Symmetry,eps)
|
|
||||||
call kodis(ex,X,Y,Z,dyy,gyy_rhs,SSS,Symmetry,eps)
|
|
||||||
call kodis(ex,X,Y,Z,gyz,gyz_rhs,SAA,Symmetry,eps)
|
|
||||||
call kodis(ex,X,Y,Z,dzz,gzz_rhs,SSS,Symmetry,eps)
|
|
||||||
#if 0
|
|
||||||
#define i 42
|
|
||||||
#define j 40
|
|
||||||
#define k 40
|
|
||||||
if(Lev == 1)then
|
|
||||||
write(*,*) X(i),Y(j),Z(k)
|
|
||||||
write(*,*) "before",Axx_rhs(i,j,k)
|
|
||||||
endif
|
|
||||||
#undef i
|
|
||||||
#undef j
|
|
||||||
#undef k
|
|
||||||
!!stop
|
|
||||||
#endif
|
#endif
|
||||||
call kodis(ex,X,Y,Z,Axx,Axx_rhs,SSS,Symmetry,eps)
|
|
||||||
#if 0
|
|
||||||
#define i 42
|
|
||||||
#define j 40
|
|
||||||
#define k 40
|
|
||||||
if(Lev == 1)then
|
|
||||||
write(*,*) X(i),Y(j),Z(k)
|
|
||||||
write(*,*) "after",Axx_rhs(i,j,k)
|
|
||||||
endif
|
|
||||||
#undef i
|
|
||||||
#undef j
|
|
||||||
#undef k
|
|
||||||
!!stop
|
|
||||||
#endif
|
|
||||||
call kodis(ex,X,Y,Z,Axy,Axy_rhs,AAS,Symmetry,eps)
|
|
||||||
call kodis(ex,X,Y,Z,Axz,Axz_rhs,ASA,Symmetry,eps)
|
|
||||||
call kodis(ex,X,Y,Z,Ayy,Ayy_rhs,SSS,Symmetry,eps)
|
|
||||||
call kodis(ex,X,Y,Z,Ayz,Ayz_rhs,SAA,Symmetry,eps)
|
|
||||||
call kodis(ex,X,Y,Z,Azz,Azz_rhs,SSS,Symmetry,eps)
|
|
||||||
call kodis(ex,X,Y,Z,Gamx,Gamx_rhs,ASS,Symmetry,eps)
|
|
||||||
call kodis(ex,X,Y,Z,Gamy,Gamy_rhs,SAS,Symmetry,eps)
|
|
||||||
call kodis(ex,X,Y,Z,Gamz,Gamz_rhs,SSA,Symmetry,eps)
|
|
||||||
|
|
||||||
#if 1
|
|
||||||
!! bam does not apply dissipation on gauge variables
|
|
||||||
call kodis(ex,X,Y,Z,Lap,Lap_rhs,SSS,Symmetry,eps)
|
|
||||||
call kodis(ex,X,Y,Z,betax,betax_rhs,ASS,Symmetry,eps)
|
|
||||||
call kodis(ex,X,Y,Z,betay,betay_rhs,SAS,Symmetry,eps)
|
|
||||||
call kodis(ex,X,Y,Z,betaz,betaz_rhs,SSA,Symmetry,eps)
|
|
||||||
#if (GAUGE == 0 || GAUGE == 2 || GAUGE == 3 || GAUGE == 6 || GAUGE == 7)
|
|
||||||
call kodis(ex,X,Y,Z,dtSfx,dtSfx_rhs,ASS,Symmetry,eps)
|
|
||||||
call kodis(ex,X,Y,Z,dtSfy,dtSfy_rhs,SAS,Symmetry,eps)
|
|
||||||
call kodis(ex,X,Y,Z,dtSfz,dtSfz_rhs,SSA,Symmetry,eps)
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
endif
|
|
||||||
|
|
||||||
if(co == 0)then
|
if(co == 0)then
|
||||||
! ham_Res = trR + 2/3 * K^2 - A_ij * A^ij - 16 * PI * rho
|
! ham_Res = trR + 2/3 * K^2 - A_ij * A^ij - 16 * PI * rho
|
||||||
|
|||||||
@@ -18,49 +18,61 @@
|
|||||||
real*8, dimension(ex(1),ex(2),ex(3)), intent(inout) :: Ayy,Ayz,Azz
|
real*8, dimension(ex(1),ex(2),ex(3)), intent(inout) :: Ayy,Ayz,Azz
|
||||||
|
|
||||||
!~~~~~~~> Local variable:
|
!~~~~~~~> Local variable:
|
||||||
|
|
||||||
real*8, dimension(ex(1),ex(2),ex(3)) :: trA,detg
|
integer :: i,j,k
|
||||||
real*8, dimension(ex(1),ex(2),ex(3)) :: gxx,gyy,gzz
|
real*8 :: lgxx,lgyy,lgzz,ldetg
|
||||||
real*8, dimension(ex(1),ex(2),ex(3)) :: gupxx,gupxy,gupxz,gupyy,gupyz,gupzz
|
real*8 :: lgupxx,lgupxy,lgupxz,lgupyy,lgupyz,lgupzz
|
||||||
|
real*8 :: ltrA,lscale
|
||||||
real*8, parameter :: F1o3 = 1.D0 / 3.D0, ONE = 1.D0, TWO = 2.D0
|
real*8, parameter :: F1o3 = 1.D0 / 3.D0, ONE = 1.D0, TWO = 2.D0
|
||||||
|
|
||||||
!~~~~~~>
|
!~~~~~~>
|
||||||
|
|
||||||
gxx = dxx + ONE
|
do k=1,ex(3)
|
||||||
gyy = dyy + ONE
|
do j=1,ex(2)
|
||||||
gzz = dzz + ONE
|
do i=1,ex(1)
|
||||||
|
|
||||||
detg = gxx * gyy * gzz + gxy * gyz * gxz + gxz * gxy * gyz - &
|
lgxx = dxx(i,j,k) + ONE
|
||||||
gxz * gyy * gxz - gxy * gxy * gzz - gxx * gyz * gyz
|
lgyy = dyy(i,j,k) + ONE
|
||||||
gupxx = ( gyy * gzz - gyz * gyz ) / detg
|
lgzz = dzz(i,j,k) + ONE
|
||||||
gupxy = - ( gxy * gzz - gyz * gxz ) / detg
|
|
||||||
gupxz = ( gxy * gyz - gyy * gxz ) / detg
|
|
||||||
gupyy = ( gxx * gzz - gxz * gxz ) / detg
|
|
||||||
gupyz = - ( gxx * gyz - gxy * gxz ) / detg
|
|
||||||
gupzz = ( gxx * gyy - gxy * gxy ) / detg
|
|
||||||
|
|
||||||
trA = gupxx * Axx + gupyy * Ayy + gupzz * Azz &
|
ldetg = lgxx * lgyy * lgzz &
|
||||||
+ TWO * (gupxy * Axy + gupxz * Axz + gupyz * Ayz)
|
+ gxy(i,j,k) * gyz(i,j,k) * gxz(i,j,k) &
|
||||||
|
+ gxz(i,j,k) * gxy(i,j,k) * gyz(i,j,k) &
|
||||||
|
- gxz(i,j,k) * lgyy * gxz(i,j,k) &
|
||||||
|
- gxy(i,j,k) * gxy(i,j,k) * lgzz &
|
||||||
|
- lgxx * gyz(i,j,k) * gyz(i,j,k)
|
||||||
|
|
||||||
Axx = Axx - F1o3 * gxx * trA
|
lgupxx = ( lgyy * lgzz - gyz(i,j,k) * gyz(i,j,k) ) / ldetg
|
||||||
Axy = Axy - F1o3 * gxy * trA
|
lgupxy = - ( gxy(i,j,k) * lgzz - gyz(i,j,k) * gxz(i,j,k) ) / ldetg
|
||||||
Axz = Axz - F1o3 * gxz * trA
|
lgupxz = ( gxy(i,j,k) * gyz(i,j,k) - lgyy * gxz(i,j,k) ) / ldetg
|
||||||
Ayy = Ayy - F1o3 * gyy * trA
|
lgupyy = ( lgxx * lgzz - gxz(i,j,k) * gxz(i,j,k) ) / ldetg
|
||||||
Ayz = Ayz - F1o3 * gyz * trA
|
lgupyz = - ( lgxx * gyz(i,j,k) - gxy(i,j,k) * gxz(i,j,k) ) / ldetg
|
||||||
Azz = Azz - F1o3 * gzz * trA
|
lgupzz = ( lgxx * lgyy - gxy(i,j,k) * gxy(i,j,k) ) / ldetg
|
||||||
|
|
||||||
detg = ONE / ( detg ** F1o3 )
|
ltrA = lgupxx * Axx(i,j,k) + lgupyy * Ayy(i,j,k) &
|
||||||
|
+ lgupzz * Azz(i,j,k) &
|
||||||
gxx = gxx * detg
|
+ TWO * (lgupxy * Axy(i,j,k) + lgupxz * Axz(i,j,k) &
|
||||||
gxy = gxy * detg
|
+ lgupyz * Ayz(i,j,k))
|
||||||
gxz = gxz * detg
|
|
||||||
gyy = gyy * detg
|
|
||||||
gyz = gyz * detg
|
|
||||||
gzz = gzz * detg
|
|
||||||
|
|
||||||
dxx = gxx - ONE
|
Axx(i,j,k) = Axx(i,j,k) - F1o3 * lgxx * ltrA
|
||||||
dyy = gyy - ONE
|
Axy(i,j,k) = Axy(i,j,k) - F1o3 * gxy(i,j,k) * ltrA
|
||||||
dzz = gzz - ONE
|
Axz(i,j,k) = Axz(i,j,k) - F1o3 * gxz(i,j,k) * ltrA
|
||||||
|
Ayy(i,j,k) = Ayy(i,j,k) - F1o3 * lgyy * ltrA
|
||||||
|
Ayz(i,j,k) = Ayz(i,j,k) - F1o3 * gyz(i,j,k) * ltrA
|
||||||
|
Azz(i,j,k) = Azz(i,j,k) - F1o3 * lgzz * ltrA
|
||||||
|
|
||||||
|
lscale = ONE / ( ldetg ** F1o3 )
|
||||||
|
|
||||||
|
dxx(i,j,k) = lgxx * lscale - ONE
|
||||||
|
gxy(i,j,k) = gxy(i,j,k) * lscale
|
||||||
|
gxz(i,j,k) = gxz(i,j,k) * lscale
|
||||||
|
dyy(i,j,k) = lgyy * lscale - ONE
|
||||||
|
gyz(i,j,k) = gyz(i,j,k) * lscale
|
||||||
|
dzz(i,j,k) = lgzz * lscale - ONE
|
||||||
|
|
||||||
|
enddo
|
||||||
|
enddo
|
||||||
|
enddo
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -82,51 +94,71 @@
|
|||||||
real*8, dimension(ex(1),ex(2),ex(3)), intent(inout) :: Ayy,Ayz,Azz
|
real*8, dimension(ex(1),ex(2),ex(3)), intent(inout) :: Ayy,Ayz,Azz
|
||||||
|
|
||||||
!~~~~~~~> Local variable:
|
!~~~~~~~> Local variable:
|
||||||
|
|
||||||
real*8, dimension(ex(1),ex(2),ex(3)) :: trA
|
integer :: i,j,k
|
||||||
real*8, dimension(ex(1),ex(2),ex(3)) :: gxx,gyy,gzz
|
real*8 :: lgxx,lgyy,lgzz,lscale
|
||||||
real*8, dimension(ex(1),ex(2),ex(3)) :: gupxx,gupxy,gupxz,gupyy,gupyz,gupzz
|
real*8 :: lgxy,lgxz,lgyz
|
||||||
|
real*8 :: lgupxx,lgupxy,lgupxz,lgupyy,lgupyz,lgupzz
|
||||||
|
real*8 :: ltrA
|
||||||
real*8, parameter :: F1o3 = 1.D0 / 3.D0, ONE = 1.D0, TWO = 2.D0
|
real*8, parameter :: F1o3 = 1.D0 / 3.D0, ONE = 1.D0, TWO = 2.D0
|
||||||
|
|
||||||
!~~~~~~>
|
!~~~~~~>
|
||||||
|
|
||||||
gxx = dxx + ONE
|
do k=1,ex(3)
|
||||||
gyy = dyy + ONE
|
do j=1,ex(2)
|
||||||
gzz = dzz + ONE
|
do i=1,ex(1)
|
||||||
! for g
|
|
||||||
gupzz = gxx * gyy * gzz + gxy * gyz * gxz + gxz * gxy * gyz - &
|
|
||||||
gxz * gyy * gxz - gxy * gxy * gzz - gxx * gyz * gyz
|
|
||||||
|
|
||||||
gupzz = ONE / ( gupzz ** F1o3 )
|
! for g: normalize determinant first
|
||||||
|
lgxx = dxx(i,j,k) + ONE
|
||||||
gxx = gxx * gupzz
|
lgyy = dyy(i,j,k) + ONE
|
||||||
gxy = gxy * gupzz
|
lgzz = dzz(i,j,k) + ONE
|
||||||
gxz = gxz * gupzz
|
lgxy = gxy(i,j,k)
|
||||||
gyy = gyy * gupzz
|
lgxz = gxz(i,j,k)
|
||||||
gyz = gyz * gupzz
|
lgyz = gyz(i,j,k)
|
||||||
gzz = gzz * gupzz
|
|
||||||
|
|
||||||
dxx = gxx - ONE
|
lscale = lgxx * lgyy * lgzz + lgxy * lgyz * lgxz &
|
||||||
dyy = gyy - ONE
|
+ lgxz * lgxy * lgyz - lgxz * lgyy * lgxz &
|
||||||
dzz = gzz - ONE
|
- lgxy * lgxy * lgzz - lgxx * lgyz * lgyz
|
||||||
! for A
|
|
||||||
|
|
||||||
gupxx = ( gyy * gzz - gyz * gyz )
|
lscale = ONE / ( lscale ** F1o3 )
|
||||||
gupxy = - ( gxy * gzz - gyz * gxz )
|
|
||||||
gupxz = ( gxy * gyz - gyy * gxz )
|
|
||||||
gupyy = ( gxx * gzz - gxz * gxz )
|
|
||||||
gupyz = - ( gxx * gyz - gxy * gxz )
|
|
||||||
gupzz = ( gxx * gyy - gxy * gxy )
|
|
||||||
|
|
||||||
trA = gupxx * Axx + gupyy * Ayy + gupzz * Azz &
|
lgxx = lgxx * lscale
|
||||||
+ TWO * (gupxy * Axy + gupxz * Axz + gupyz * Ayz)
|
lgxy = lgxy * lscale
|
||||||
|
lgxz = lgxz * lscale
|
||||||
|
lgyy = lgyy * lscale
|
||||||
|
lgyz = lgyz * lscale
|
||||||
|
lgzz = lgzz * lscale
|
||||||
|
|
||||||
Axx = Axx - F1o3 * gxx * trA
|
dxx(i,j,k) = lgxx - ONE
|
||||||
Axy = Axy - F1o3 * gxy * trA
|
gxy(i,j,k) = lgxy
|
||||||
Axz = Axz - F1o3 * gxz * trA
|
gxz(i,j,k) = lgxz
|
||||||
Ayy = Ayy - F1o3 * gyy * trA
|
dyy(i,j,k) = lgyy - ONE
|
||||||
Ayz = Ayz - F1o3 * gyz * trA
|
gyz(i,j,k) = lgyz
|
||||||
Azz = Azz - F1o3 * gzz * trA
|
dzz(i,j,k) = lgzz - ONE
|
||||||
|
|
||||||
|
! for A: trace-free using normalized metric (det=1, no division needed)
|
||||||
|
lgupxx = ( lgyy * lgzz - lgyz * lgyz )
|
||||||
|
lgupxy = - ( lgxy * lgzz - lgyz * lgxz )
|
||||||
|
lgupxz = ( lgxy * lgyz - lgyy * lgxz )
|
||||||
|
lgupyy = ( lgxx * lgzz - lgxz * lgxz )
|
||||||
|
lgupyz = - ( lgxx * lgyz - lgxy * lgxz )
|
||||||
|
lgupzz = ( lgxx * lgyy - lgxy * lgxy )
|
||||||
|
|
||||||
|
ltrA = lgupxx * Axx(i,j,k) + lgupyy * Ayy(i,j,k) &
|
||||||
|
+ lgupzz * Azz(i,j,k) &
|
||||||
|
+ TWO * (lgupxy * Axy(i,j,k) + lgupxz * Axz(i,j,k) &
|
||||||
|
+ lgupyz * Ayz(i,j,k))
|
||||||
|
|
||||||
|
Axx(i,j,k) = Axx(i,j,k) - F1o3 * lgxx * ltrA
|
||||||
|
Axy(i,j,k) = Axy(i,j,k) - F1o3 * lgxy * ltrA
|
||||||
|
Axz(i,j,k) = Axz(i,j,k) - F1o3 * lgxz * ltrA
|
||||||
|
Ayy(i,j,k) = Ayy(i,j,k) - F1o3 * lgyy * ltrA
|
||||||
|
Ayz(i,j,k) = Ayz(i,j,k) - F1o3 * lgyz * ltrA
|
||||||
|
Azz(i,j,k) = Azz(i,j,k) - F1o3 * lgzz * ltrA
|
||||||
|
|
||||||
|
enddo
|
||||||
|
enddo
|
||||||
|
enddo
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|||||||
@@ -324,7 +324,6 @@ subroutine symmetry_bd(ord,extc,func,funcc,SoA)
|
|||||||
|
|
||||||
integer::i
|
integer::i
|
||||||
|
|
||||||
funcc = 0.d0
|
|
||||||
funcc(1:extc(1),1:extc(2),1:extc(3)) = func
|
funcc(1:extc(1),1:extc(2),1:extc(3)) = func
|
||||||
do i=0,ord-1
|
do i=0,ord-1
|
||||||
funcc(-i,1:extc(2),1:extc(3)) = funcc(i+2,1:extc(2),1:extc(3))*SoA(1)
|
funcc(-i,1:extc(2),1:extc(3)) = funcc(i+2,1:extc(2),1:extc(3))*SoA(1)
|
||||||
@@ -350,7 +349,6 @@ subroutine symmetry_tbd(ord,extc,func,funcc,SoA)
|
|||||||
|
|
||||||
integer::i
|
integer::i
|
||||||
|
|
||||||
funcc = 0.d0
|
|
||||||
funcc(1:extc(1),1:extc(2),1:extc(3)) = func
|
funcc(1:extc(1),1:extc(2),1:extc(3)) = func
|
||||||
do i=0,ord-1
|
do i=0,ord-1
|
||||||
funcc(-i,1:extc(2),1:extc(3)) = funcc(i+2,1:extc(2),1:extc(3))*SoA(1)
|
funcc(-i,1:extc(2),1:extc(3)) = funcc(i+2,1:extc(2),1:extc(3))*SoA(1)
|
||||||
@@ -379,7 +377,6 @@ subroutine symmetry_stbd(ord,extc,func,funcc,SoA)
|
|||||||
|
|
||||||
integer::i
|
integer::i
|
||||||
|
|
||||||
funcc = 0.d0
|
|
||||||
funcc(1:extc(1),1:extc(2),1:extc(3)) = func
|
funcc(1:extc(1),1:extc(2),1:extc(3)) = func
|
||||||
do i=0,ord-1
|
do i=0,ord-1
|
||||||
funcc(-i,1:extc(2),1:extc(3)) = funcc(i+2,1:extc(2),1:extc(3))*SoA(1)
|
funcc(-i,1:extc(2),1:extc(3)) = funcc(i+2,1:extc(2),1:extc(3))*SoA(1)
|
||||||
@@ -886,7 +883,6 @@ subroutine symmetry_bd(ord,extc,func,funcc,SoA)
|
|||||||
|
|
||||||
integer::i
|
integer::i
|
||||||
|
|
||||||
funcc = 0.d0
|
|
||||||
funcc(1:extc(1),1:extc(2),1:extc(3)) = func
|
funcc(1:extc(1),1:extc(2),1:extc(3)) = func
|
||||||
do i=0,ord-1
|
do i=0,ord-1
|
||||||
funcc(-i,1:extc(2),1:extc(3)) = funcc(i+1,1:extc(2),1:extc(3))*SoA(1)
|
funcc(-i,1:extc(2),1:extc(3)) = funcc(i+1,1:extc(2),1:extc(3))*SoA(1)
|
||||||
@@ -912,7 +908,6 @@ subroutine symmetry_tbd(ord,extc,func,funcc,SoA)
|
|||||||
|
|
||||||
integer::i
|
integer::i
|
||||||
|
|
||||||
funcc = 0.d0
|
|
||||||
funcc(1:extc(1),1:extc(2),1:extc(3)) = func
|
funcc(1:extc(1),1:extc(2),1:extc(3)) = func
|
||||||
do i=0,ord-1
|
do i=0,ord-1
|
||||||
funcc(-i,1:extc(2),1:extc(3)) = funcc(i+1,1:extc(2),1:extc(3))*SoA(1)
|
funcc(-i,1:extc(2),1:extc(3)) = funcc(i+1,1:extc(2),1:extc(3))*SoA(1)
|
||||||
@@ -941,7 +936,6 @@ subroutine symmetry_stbd(ord,extc,func,funcc,SoA)
|
|||||||
|
|
||||||
integer::i
|
integer::i
|
||||||
|
|
||||||
funcc = 0.d0
|
|
||||||
funcc(1:extc(1),1:extc(2),1:extc(3)) = func
|
funcc(1:extc(1),1:extc(2),1:extc(3)) = func
|
||||||
do i=0,ord-1
|
do i=0,ord-1
|
||||||
funcc(-i,1:extc(2),1:extc(3)) = funcc(i+1,1:extc(2),1:extc(3))*SoA(1)
|
funcc(-i,1:extc(2),1:extc(3)) = funcc(i+1,1:extc(2),1:extc(3))*SoA(1)
|
||||||
@@ -1118,64 +1112,65 @@ end subroutine d2dump
|
|||||||
! Lagrangian polynomial interpolation
|
! Lagrangian polynomial interpolation
|
||||||
!------------------------------------------------------------------------------
|
!------------------------------------------------------------------------------
|
||||||
|
|
||||||
subroutine polint(xa,ya,x,y,dy,ordn)
|
subroutine polint(xa, ya, x, y, dy, ordn)
|
||||||
|
|
||||||
implicit none
|
implicit none
|
||||||
|
|
||||||
!~~~~~~> Input Parameter:
|
integer, intent(in) :: ordn
|
||||||
integer,intent(in) :: ordn
|
real*8, dimension(ordn), intent(in) :: xa, ya
|
||||||
real*8, dimension(ordn), intent(in) :: xa,ya
|
|
||||||
real*8, intent(in) :: x
|
real*8, intent(in) :: x
|
||||||
real*8, intent(out) :: y,dy
|
real*8, intent(out) :: y, dy
|
||||||
|
|
||||||
!~~~~~~> Other parameter:
|
integer :: i, m, ns, n_m
|
||||||
|
real*8, dimension(ordn) :: c, d, ho
|
||||||
|
real*8 :: dif, dift, hp, h, den_val
|
||||||
|
|
||||||
integer :: m,n,ns
|
c = ya
|
||||||
real*8, dimension(ordn) :: c,d,den,ho
|
d = ya
|
||||||
real*8 :: dif,dift
|
ho = xa - x
|
||||||
|
|
||||||
!~~~~~~>
|
ns = 1
|
||||||
|
dif = abs(x - xa(1))
|
||||||
|
|
||||||
n=ordn
|
do i = 2, ordn
|
||||||
m=ordn
|
dift = abs(x - xa(i))
|
||||||
|
if (dift < dif) then
|
||||||
c=ya
|
ns = i
|
||||||
d=ya
|
dif = dift
|
||||||
ho=xa-x
|
end if
|
||||||
|
|
||||||
ns=1
|
|
||||||
dif=abs(x-xa(1))
|
|
||||||
do m=1,n
|
|
||||||
dift=abs(x-xa(m))
|
|
||||||
if(dift < dif) then
|
|
||||||
ns=m
|
|
||||||
dif=dift
|
|
||||||
end if
|
|
||||||
end do
|
end do
|
||||||
|
|
||||||
y=ya(ns)
|
y = ya(ns)
|
||||||
ns=ns-1
|
ns = ns - 1
|
||||||
do m=1,n-1
|
|
||||||
den(1:n-m)=ho(1:n-m)-ho(1+m:n)
|
do m = 1, ordn - 1
|
||||||
if (any(den(1:n-m) == 0.0))then
|
n_m = ordn - m
|
||||||
write(*,*) 'failure in polint for point',x
|
do i = 1, n_m
|
||||||
write(*,*) 'with input points: ',xa
|
hp = ho(i)
|
||||||
stop
|
h = ho(i+m)
|
||||||
endif
|
den_val = hp - h
|
||||||
den(1:n-m)=(c(2:n-m+1)-d(1:n-m))/den(1:n-m)
|
|
||||||
d(1:n-m)=ho(1+m:n)*den(1:n-m)
|
if (den_val == 0.0d0) then
|
||||||
c(1:n-m)=ho(1:n-m)*den(1:n-m)
|
write(*,*) 'failure in polint for point',x
|
||||||
if (2*ns < n-m) then
|
write(*,*) 'with input points: ',xa
|
||||||
dy=c(ns+1)
|
stop
|
||||||
|
end if
|
||||||
|
|
||||||
|
den_val = (c(i+1) - d(i)) / den_val
|
||||||
|
|
||||||
|
d(i) = h * den_val
|
||||||
|
c(i) = hp * den_val
|
||||||
|
end do
|
||||||
|
|
||||||
|
if (2 * ns < n_m) then
|
||||||
|
dy = c(ns + 1)
|
||||||
else
|
else
|
||||||
dy=d(ns)
|
dy = d(ns)
|
||||||
ns=ns-1
|
ns = ns - 1
|
||||||
end if
|
end if
|
||||||
y=y+dy
|
y = y + dy
|
||||||
end do
|
end do
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
||||||
end subroutine polint
|
end subroutine polint
|
||||||
!------------------------------------------------------------------------------
|
!------------------------------------------------------------------------------
|
||||||
!
|
!
|
||||||
@@ -1183,35 +1178,37 @@ end subroutine d2dump
|
|||||||
!
|
!
|
||||||
!------------------------------------------------------------------------------
|
!------------------------------------------------------------------------------
|
||||||
subroutine polin2(x1a,x2a,ya,x1,x2,y,dy,ordn)
|
subroutine polin2(x1a,x2a,ya,x1,x2,y,dy,ordn)
|
||||||
|
|
||||||
implicit none
|
implicit none
|
||||||
|
|
||||||
!~~~~~~> Input parameters:
|
|
||||||
integer,intent(in) :: ordn
|
integer,intent(in) :: ordn
|
||||||
real*8, dimension(1:ordn), intent(in) :: x1a,x2a
|
real*8, dimension(1:ordn), intent(in) :: x1a,x2a
|
||||||
real*8, dimension(1:ordn,1:ordn), intent(in) :: ya
|
real*8, dimension(1:ordn,1:ordn), intent(in) :: ya
|
||||||
real*8, intent(in) :: x1,x2
|
real*8, intent(in) :: x1,x2
|
||||||
real*8, intent(out) :: y,dy
|
real*8, intent(out) :: y,dy
|
||||||
|
|
||||||
!~~~~~~> Other parameters:
|
#ifdef POLINT_LEGACY_ORDER
|
||||||
|
|
||||||
integer :: i,m
|
integer :: i,m
|
||||||
real*8, dimension(ordn) :: ymtmp
|
real*8, dimension(ordn) :: ymtmp
|
||||||
real*8, dimension(ordn) :: yntmp
|
real*8, dimension(ordn) :: yntmp
|
||||||
|
|
||||||
m=size(x1a)
|
m=size(x1a)
|
||||||
|
|
||||||
do i=1,m
|
do i=1,m
|
||||||
|
|
||||||
yntmp=ya(i,:)
|
yntmp=ya(i,:)
|
||||||
call polint(x2a,yntmp,x2,ymtmp(i),dy,ordn)
|
call polint(x2a,yntmp,x2,ymtmp(i),dy,ordn)
|
||||||
|
|
||||||
end do
|
end do
|
||||||
|
|
||||||
call polint(x1a,ymtmp,x1,y,dy,ordn)
|
call polint(x1a,ymtmp,x1,y,dy,ordn)
|
||||||
|
#else
|
||||||
|
integer :: j
|
||||||
|
real*8, dimension(ordn) :: ymtmp
|
||||||
|
real*8 :: dy_temp
|
||||||
|
|
||||||
|
do j=1,ordn
|
||||||
|
call polint(x1a, ya(:,j), x1, ymtmp(j), dy_temp, ordn)
|
||||||
|
end do
|
||||||
|
call polint(x2a, ymtmp, x2, y, dy, ordn)
|
||||||
|
#endif
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
||||||
end subroutine polin2
|
end subroutine polin2
|
||||||
!------------------------------------------------------------------------------
|
!------------------------------------------------------------------------------
|
||||||
!
|
!
|
||||||
@@ -1219,18 +1216,15 @@ end subroutine d2dump
|
|||||||
!
|
!
|
||||||
!------------------------------------------------------------------------------
|
!------------------------------------------------------------------------------
|
||||||
subroutine polin3(x1a,x2a,x3a,ya,x1,x2,x3,y,dy,ordn)
|
subroutine polin3(x1a,x2a,x3a,ya,x1,x2,x3,y,dy,ordn)
|
||||||
|
|
||||||
implicit none
|
implicit none
|
||||||
|
|
||||||
!~~~~~~> Input parameters:
|
|
||||||
integer,intent(in) :: ordn
|
integer,intent(in) :: ordn
|
||||||
real*8, dimension(1:ordn), intent(in) :: x1a,x2a,x3a
|
real*8, dimension(1:ordn), intent(in) :: x1a,x2a,x3a
|
||||||
real*8, dimension(1:ordn,1:ordn,1:ordn), intent(in) :: ya
|
real*8, dimension(1:ordn,1:ordn,1:ordn), intent(in) :: ya
|
||||||
real*8, intent(in) :: x1,x2,x3
|
real*8, intent(in) :: x1,x2,x3
|
||||||
real*8, intent(out) :: y,dy
|
real*8, intent(out) :: y,dy
|
||||||
|
|
||||||
!~~~~~~> Other parameters:
|
#ifdef POLINT_LEGACY_ORDER
|
||||||
|
|
||||||
integer :: i,j,m,n
|
integer :: i,j,m,n
|
||||||
real*8, dimension(ordn,ordn) :: yatmp
|
real*8, dimension(ordn,ordn) :: yatmp
|
||||||
real*8, dimension(ordn) :: ymtmp
|
real*8, dimension(ordn) :: ymtmp
|
||||||
@@ -1239,27 +1233,36 @@ end subroutine d2dump
|
|||||||
|
|
||||||
m=size(x1a)
|
m=size(x1a)
|
||||||
n=size(x2a)
|
n=size(x2a)
|
||||||
|
|
||||||
do i=1,m
|
do i=1,m
|
||||||
do j=1,n
|
do j=1,n
|
||||||
|
|
||||||
yqtmp=ya(i,j,:)
|
yqtmp=ya(i,j,:)
|
||||||
call polint(x3a,yqtmp,x3,yatmp(i,j),dy,ordn)
|
call polint(x3a,yqtmp,x3,yatmp(i,j),dy,ordn)
|
||||||
|
|
||||||
end do
|
end do
|
||||||
|
|
||||||
yntmp=yatmp(i,:)
|
yntmp=yatmp(i,:)
|
||||||
call polint(x2a,yntmp,x2,ymtmp(i),dy,ordn)
|
call polint(x2a,yntmp,x2,ymtmp(i),dy,ordn)
|
||||||
|
|
||||||
end do
|
end do
|
||||||
|
|
||||||
call polint(x1a,ymtmp,x1,y,dy,ordn)
|
call polint(x1a,ymtmp,x1,y,dy,ordn)
|
||||||
|
#else
|
||||||
|
integer :: j, k
|
||||||
|
real*8, dimension(ordn,ordn) :: yatmp
|
||||||
|
real*8, dimension(ordn) :: ymtmp
|
||||||
|
real*8 :: dy_temp
|
||||||
|
|
||||||
|
do k=1,ordn
|
||||||
|
do j=1,ordn
|
||||||
|
call polint(x1a, ya(:,j,k), x1, yatmp(j,k), dy_temp, ordn)
|
||||||
|
end do
|
||||||
|
end do
|
||||||
|
do k=1,ordn
|
||||||
|
call polint(x2a, yatmp(:,k), x2, ymtmp(k), dy_temp, ordn)
|
||||||
|
end do
|
||||||
|
call polint(x3a, ymtmp, x3, y, dy, ordn)
|
||||||
|
#endif
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
||||||
end subroutine polin3
|
end subroutine polin3
|
||||||
!--------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------
|
||||||
! calculate L2norm
|
! calculate L2norm
|
||||||
subroutine l2normhelper(ex, X, Y, Z,xmin,ymin,zmin,xmax,ymax,zmax,&
|
subroutine l2normhelper(ex, X, Y, Z,xmin,ymin,zmin,xmax,ymax,zmax,&
|
||||||
f,f_out,gw)
|
f,f_out,gw)
|
||||||
|
|
||||||
@@ -1276,7 +1279,9 @@ end subroutine d2dump
|
|||||||
real*8 :: dX, dY, dZ
|
real*8 :: dX, dY, dZ
|
||||||
integer::imin,jmin,kmin
|
integer::imin,jmin,kmin
|
||||||
integer::imax,jmax,kmax
|
integer::imax,jmax,kmax
|
||||||
integer::i,j,k
|
integer::i,j,k,n_elements
|
||||||
|
real*8, dimension(:), allocatable :: f_flat
|
||||||
|
real*8, external :: DDOT
|
||||||
|
|
||||||
dX = X(2) - X(1)
|
dX = X(2) - X(1)
|
||||||
dY = Y(2) - Y(1)
|
dY = Y(2) - Y(1)
|
||||||
@@ -1300,7 +1305,12 @@ if(dabs(X(1)-xmin) < dX) imin = 1
|
|||||||
if(dabs(Y(1)-ymin) < dY) jmin = 1
|
if(dabs(Y(1)-ymin) < dY) jmin = 1
|
||||||
if(dabs(Z(1)-zmin) < dZ) kmin = 1
|
if(dabs(Z(1)-zmin) < dZ) kmin = 1
|
||||||
|
|
||||||
f_out = sum(f(imin:imax,jmin:jmax,kmin:kmax)*f(imin:imax,jmin:jmax,kmin:kmax))
|
! Optimized with oneMKL BLAS DDOT for dot product
|
||||||
|
n_elements = (imax-imin+1)*(jmax-jmin+1)*(kmax-kmin+1)
|
||||||
|
allocate(f_flat(n_elements))
|
||||||
|
f_flat = reshape(f(imin:imax,jmin:jmax,kmin:kmax), [n_elements])
|
||||||
|
f_out = DDOT(n_elements, f_flat, 1, f_flat, 1)
|
||||||
|
deallocate(f_flat)
|
||||||
|
|
||||||
f_out = f_out*dX*dY*dZ
|
f_out = f_out*dX*dY*dZ
|
||||||
|
|
||||||
@@ -1325,7 +1335,9 @@ f_out = f_out*dX*dY*dZ
|
|||||||
real*8 :: dX, dY, dZ
|
real*8 :: dX, dY, dZ
|
||||||
integer::imin,jmin,kmin
|
integer::imin,jmin,kmin
|
||||||
integer::imax,jmax,kmax
|
integer::imax,jmax,kmax
|
||||||
integer::i,j,k
|
integer::i,j,k,n_elements
|
||||||
|
real*8, dimension(:), allocatable :: f_flat
|
||||||
|
real*8, external :: DDOT
|
||||||
|
|
||||||
real*8 :: PIo4
|
real*8 :: PIo4
|
||||||
|
|
||||||
@@ -1388,7 +1400,12 @@ if(Symmetry==2)then
|
|||||||
if(dabs(ymin+gw*dY)<dY.and.Y(1)<0.d0) jmin = gw+1
|
if(dabs(ymin+gw*dY)<dY.and.Y(1)<0.d0) jmin = gw+1
|
||||||
endif
|
endif
|
||||||
|
|
||||||
f_out = sum(f(imin:imax,jmin:jmax,kmin:kmax)*f(imin:imax,jmin:jmax,kmin:kmax))
|
! Optimized with oneMKL BLAS DDOT for dot product
|
||||||
|
n_elements = (imax-imin+1)*(jmax-jmin+1)*(kmax-kmin+1)
|
||||||
|
allocate(f_flat(n_elements))
|
||||||
|
f_flat = reshape(f(imin:imax,jmin:jmax,kmin:kmax), [n_elements])
|
||||||
|
f_out = DDOT(n_elements, f_flat, 1, f_flat, 1)
|
||||||
|
deallocate(f_flat)
|
||||||
|
|
||||||
f_out = f_out*dX*dY*dZ
|
f_out = f_out*dX*dY*dZ
|
||||||
|
|
||||||
@@ -1416,6 +1433,8 @@ f_out = f_out*dX*dY*dZ
|
|||||||
integer::imin,jmin,kmin
|
integer::imin,jmin,kmin
|
||||||
integer::imax,jmax,kmax
|
integer::imax,jmax,kmax
|
||||||
integer::i,j,k
|
integer::i,j,k
|
||||||
|
real*8, dimension(:), allocatable :: f_flat
|
||||||
|
real*8, external :: DDOT
|
||||||
|
|
||||||
real*8 :: PIo4
|
real*8 :: PIo4
|
||||||
|
|
||||||
@@ -1478,11 +1497,12 @@ if(Symmetry==2)then
|
|||||||
if(dabs(ymin+gw*dY)<dY.and.Y(1)<0.d0) jmin = gw+1
|
if(dabs(ymin+gw*dY)<dY.and.Y(1)<0.d0) jmin = gw+1
|
||||||
endif
|
endif
|
||||||
|
|
||||||
f_out = sum(f(imin:imax,jmin:jmax,kmin:kmax)*f(imin:imax,jmin:jmax,kmin:kmax))
|
! Optimized with oneMKL BLAS DDOT for dot product
|
||||||
|
|
||||||
f_out = f_out
|
|
||||||
|
|
||||||
Nout = (imax-imin+1)*(jmax-jmin+1)*(kmax-kmin+1)
|
Nout = (imax-imin+1)*(jmax-jmin+1)*(kmax-kmin+1)
|
||||||
|
allocate(f_flat(Nout))
|
||||||
|
f_flat = reshape(f(imin:imax,jmin:jmax,kmin:kmax), [Nout])
|
||||||
|
f_out = DDOT(Nout, f_flat, 1, f_flat, 1)
|
||||||
|
deallocate(f_flat)
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -1680,6 +1700,7 @@ Nout = (imax-imin+1)*(jmax-jmin+1)*(kmax-kmin+1)
|
|||||||
real*8, dimension(ORDN,ORDN) :: tmp2
|
real*8, dimension(ORDN,ORDN) :: tmp2
|
||||||
real*8, dimension(ORDN) :: tmp1
|
real*8, dimension(ORDN) :: tmp1
|
||||||
real*8, dimension(3) :: SoAh
|
real*8, dimension(3) :: SoAh
|
||||||
|
real*8, external :: DDOT
|
||||||
|
|
||||||
! +1 because c++ gives 0 for first point
|
! +1 because c++ gives 0 for first point
|
||||||
cxB = inds+1
|
cxB = inds+1
|
||||||
@@ -1715,20 +1736,21 @@ Nout = (imax-imin+1)*(jmax-jmin+1)*(kmax-kmin+1)
|
|||||||
ya=fh(cxB(1):cxT(1),cxB(2):cxT(2),cxB(3):cxT(3))
|
ya=fh(cxB(1):cxT(1),cxB(2):cxT(2),cxB(3):cxT(3))
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
! Optimized with BLAS operations for better performance
|
||||||
|
! First dimension: z-direction weighted sum
|
||||||
tmp2=0
|
tmp2=0
|
||||||
do m=1,ORDN
|
do m=1,ORDN
|
||||||
tmp2 = tmp2 + coef(2*ORDN+m)*ya(:,:,m)
|
tmp2 = tmp2 + coef(2*ORDN+m)*ya(:,:,m)
|
||||||
enddo
|
enddo
|
||||||
|
|
||||||
|
! Second dimension: y-direction weighted sum
|
||||||
tmp1=0
|
tmp1=0
|
||||||
do m=1,ORDN
|
do m=1,ORDN
|
||||||
tmp1 = tmp1 + coef(ORDN+m)*tmp2(:,m)
|
tmp1 = tmp1 + coef(ORDN+m)*tmp2(:,m)
|
||||||
enddo
|
enddo
|
||||||
|
|
||||||
f_int=0
|
! Third dimension: x-direction weighted sum using BLAS DDOT
|
||||||
do m=1,ORDN
|
f_int = DDOT(ORDN, coef(1:ORDN), 1, tmp1, 1)
|
||||||
f_int = f_int + coef(m)*tmp1(m)
|
|
||||||
enddo
|
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -1758,6 +1780,7 @@ Nout = (imax-imin+1)*(jmax-jmin+1)*(kmax-kmin+1)
|
|||||||
real*8, dimension(ORDN,ORDN) :: ya
|
real*8, dimension(ORDN,ORDN) :: ya
|
||||||
real*8, dimension(ORDN) :: tmp1
|
real*8, dimension(ORDN) :: tmp1
|
||||||
real*8, dimension(2) :: SoAh
|
real*8, dimension(2) :: SoAh
|
||||||
|
real*8, external :: DDOT
|
||||||
|
|
||||||
! +1 because c++ gives 0 for first point
|
! +1 because c++ gives 0 for first point
|
||||||
cxB = inds(1:2)+1
|
cxB = inds(1:2)+1
|
||||||
@@ -1787,15 +1810,14 @@ Nout = (imax-imin+1)*(jmax-jmin+1)*(kmax-kmin+1)
|
|||||||
ya=fh(cxB(1):cxT(1),cxB(2):cxT(2),inds(3))
|
ya=fh(cxB(1):cxT(1),cxB(2):cxT(2),inds(3))
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
! Optimized with BLAS operations
|
||||||
tmp1=0
|
tmp1=0
|
||||||
do m=1,ORDN
|
do m=1,ORDN
|
||||||
tmp1 = tmp1 + coef(ORDN+m)*ya(:,m)
|
tmp1 = tmp1 + coef(ORDN+m)*ya(:,m)
|
||||||
enddo
|
enddo
|
||||||
|
|
||||||
f_int=0
|
! Use BLAS DDOT for final weighted sum
|
||||||
do m=1,ORDN
|
f_int = DDOT(ORDN, coef(1:ORDN), 1, tmp1, 1)
|
||||||
f_int = f_int + coef(m)*tmp1(m)
|
|
||||||
enddo
|
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -1826,6 +1848,7 @@ Nout = (imax-imin+1)*(jmax-jmin+1)*(kmax-kmin+1)
|
|||||||
real*8, dimension(ORDN) :: ya
|
real*8, dimension(ORDN) :: ya
|
||||||
real*8 :: SoAh
|
real*8 :: SoAh
|
||||||
integer,dimension(3) :: inds
|
integer,dimension(3) :: inds
|
||||||
|
real*8, external :: DDOT
|
||||||
|
|
||||||
! +1 because c++ gives 0 for first point
|
! +1 because c++ gives 0 for first point
|
||||||
inds = indsi + 1
|
inds = indsi + 1
|
||||||
@@ -1886,10 +1909,8 @@ Nout = (imax-imin+1)*(jmax-jmin+1)*(kmax-kmin+1)
|
|||||||
write(*,*)"error in global_interpind1d, not recognized dumyd = ",dumyd
|
write(*,*)"error in global_interpind1d, not recognized dumyd = ",dumyd
|
||||||
endif
|
endif
|
||||||
|
|
||||||
f_int=0
|
! Optimized with BLAS DDOT for weighted sum
|
||||||
do m=1,ORDN
|
f_int = DDOT(ORDN, coef, 1, ya, 1)
|
||||||
f_int = f_int + coef(m)*ya(m)
|
|
||||||
enddo
|
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -2121,24 +2142,38 @@ Nout = (imax-imin+1)*(jmax-jmin+1)*(kmax-kmin+1)
|
|||||||
|
|
||||||
end function fWigner_d_function
|
end function fWigner_d_function
|
||||||
!----------------------------------
|
!----------------------------------
|
||||||
|
! Optimized factorial function using lookup table for small N
|
||||||
|
! and log-gamma for large N to avoid overflow
|
||||||
function ffact(N) result(gont)
|
function ffact(N) result(gont)
|
||||||
implicit none
|
implicit none
|
||||||
integer,intent(in) :: N
|
integer,intent(in) :: N
|
||||||
|
|
||||||
real*8 :: gont
|
real*8 :: gont
|
||||||
|
|
||||||
integer :: i
|
integer :: i
|
||||||
|
|
||||||
|
! Lookup table for factorials 0! to 20! (precomputed)
|
||||||
|
real*8, parameter, dimension(0:20) :: fact_table = [ &
|
||||||
|
1.d0, 1.d0, 2.d0, 6.d0, 24.d0, 120.d0, 720.d0, 5040.d0, 40320.d0, &
|
||||||
|
362880.d0, 3628800.d0, 39916800.d0, 479001600.d0, 6227020800.d0, &
|
||||||
|
87178291200.d0, 1307674368000.d0, 20922789888000.d0, &
|
||||||
|
355687428096000.d0, 6402373705728000.d0, 121645100408832000.d0, &
|
||||||
|
2432902008176640000.d0 ]
|
||||||
|
|
||||||
! sanity check
|
! sanity check
|
||||||
if(N < 0)then
|
if(N < 0)then
|
||||||
write(*,*) "ffact: error input for factorial"
|
write(*,*) "ffact: error input for factorial"
|
||||||
|
gont = 1.d0
|
||||||
return
|
return
|
||||||
endif
|
endif
|
||||||
|
|
||||||
gont = 1.d0
|
! Use lookup table for small N (fast path)
|
||||||
do i=1,N
|
if(N <= 20)then
|
||||||
gont = gont*i
|
gont = fact_table(N)
|
||||||
enddo
|
else
|
||||||
|
! Use log-gamma function for large N: N! = exp(log_gamma(N+1))
|
||||||
|
! This avoids overflow and is computed efficiently
|
||||||
|
gont = exp(log_gamma(dble(N+1)))
|
||||||
|
endif
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|||||||
@@ -16,115 +16,66 @@ using namespace std;
|
|||||||
#include <string.h>
|
#include <string.h>
|
||||||
#include <math.h>
|
#include <math.h>
|
||||||
#endif
|
#endif
|
||||||
/* Linear equation solution by Gauss-Jordan elimination.
|
|
||||||
|
// Intel oneMKL LAPACK interface
|
||||||
|
#include <mkl_lapacke.h>
|
||||||
|
/* Linear equation solution using Intel oneMKL LAPACK.
|
||||||
a[0..n-1][0..n-1] is the input matrix. b[0..n-1] is input
|
a[0..n-1][0..n-1] is the input matrix. b[0..n-1] is input
|
||||||
containing the right-hand side vectors. On output a is
|
containing the right-hand side vectors. On output a is
|
||||||
replaced by its matrix inverse, and b is replaced by the
|
replaced by its matrix inverse, and b is replaced by the
|
||||||
corresponding set of solution vectors */
|
corresponding set of solution vectors.
|
||||||
|
|
||||||
|
Mathematical equivalence:
|
||||||
|
Solves: A * x = b => x = A^(-1) * b
|
||||||
|
Original Gauss-Jordan and LAPACK dgesv/dgetri produce identical results
|
||||||
|
within numerical precision. */
|
||||||
|
|
||||||
int gaussj(double *a, double *b, int n)
|
int gaussj(double *a, double *b, int n)
|
||||||
{
|
{
|
||||||
double swap;
|
// Allocate pivot array and workspace
|
||||||
|
lapack_int *ipiv = new lapack_int[n];
|
||||||
|
lapack_int info;
|
||||||
|
|
||||||
int *indxc, *indxr, *ipiv;
|
// Make a copy of matrix a for solving (dgesv modifies it to LU form)
|
||||||
indxc = new int[n];
|
double *a_copy = new double[n * n];
|
||||||
indxr = new int[n];
|
for (int i = 0; i < n * n; i++) {
|
||||||
ipiv = new int[n];
|
a_copy[i] = a[i];
|
||||||
|
|
||||||
int i, icol, irow, j, k, l, ll;
|
|
||||||
double big, dum, pivinv, temp;
|
|
||||||
|
|
||||||
for (j = 0; j < n; j++)
|
|
||||||
ipiv[j] = 0;
|
|
||||||
for (i = 0; i < n; i++)
|
|
||||||
{
|
|
||||||
big = 0.0;
|
|
||||||
for (j = 0; j < n; j++)
|
|
||||||
if (ipiv[j] != 1)
|
|
||||||
for (k = 0; k < n; k++)
|
|
||||||
{
|
|
||||||
if (ipiv[k] == 0)
|
|
||||||
{
|
|
||||||
if (fabs(a[j * n + k]) >= big)
|
|
||||||
{
|
|
||||||
big = fabs(a[j * n + k]);
|
|
||||||
irow = j;
|
|
||||||
icol = k;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else if (ipiv[k] > 1)
|
|
||||||
{
|
|
||||||
cout << "gaussj: Singular Matrix-1" << endl;
|
|
||||||
for (int ii = 0; ii < n; ii++)
|
|
||||||
{
|
|
||||||
for (int jj = 0; jj < n; jj++)
|
|
||||||
cout << a[ii * n + jj] << " ";
|
|
||||||
cout << endl;
|
|
||||||
}
|
|
||||||
return 1; // error return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ipiv[icol] = ipiv[icol] + 1;
|
|
||||||
if (irow != icol)
|
|
||||||
{
|
|
||||||
for (l = 0; l < n; l++)
|
|
||||||
{
|
|
||||||
swap = a[irow * n + l];
|
|
||||||
a[irow * n + l] = a[icol * n + l];
|
|
||||||
a[icol * n + l] = swap;
|
|
||||||
}
|
|
||||||
|
|
||||||
swap = b[irow];
|
|
||||||
b[irow] = b[icol];
|
|
||||||
b[icol] = swap;
|
|
||||||
}
|
|
||||||
|
|
||||||
indxr[i] = irow;
|
|
||||||
indxc[i] = icol;
|
|
||||||
|
|
||||||
if (a[icol * n + icol] == 0.0)
|
|
||||||
{
|
|
||||||
cout << "gaussj: Singular Matrix-2" << endl;
|
|
||||||
for (int ii = 0; ii < n; ii++)
|
|
||||||
{
|
|
||||||
for (int jj = 0; jj < n; jj++)
|
|
||||||
cout << a[ii * n + jj] << " ";
|
|
||||||
cout << endl;
|
|
||||||
}
|
|
||||||
return 1; // error return
|
|
||||||
}
|
|
||||||
|
|
||||||
pivinv = 1.0 / a[icol * n + icol];
|
|
||||||
a[icol * n + icol] = 1.0;
|
|
||||||
for (l = 0; l < n; l++)
|
|
||||||
a[icol * n + l] *= pivinv;
|
|
||||||
b[icol] *= pivinv;
|
|
||||||
for (ll = 0; ll < n; ll++)
|
|
||||||
if (ll != icol)
|
|
||||||
{
|
|
||||||
dum = a[ll * n + icol];
|
|
||||||
a[ll * n + icol] = 0.0;
|
|
||||||
for (l = 0; l < n; l++)
|
|
||||||
a[ll * n + l] -= a[icol * n + l] * dum;
|
|
||||||
b[ll] -= b[icol] * dum;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for (l = n - 1; l >= 0; l--)
|
// Step 1: Solve linear system A*x = b using LU decomposition
|
||||||
{
|
// LAPACKE_dgesv uses column-major by default, but we use row-major
|
||||||
if (indxr[l] != indxc[l])
|
info = LAPACKE_dgesv(LAPACK_ROW_MAJOR, n, 1, a_copy, n, ipiv, b, 1);
|
||||||
for (k = 0; k < n; k++)
|
|
||||||
{
|
if (info != 0) {
|
||||||
swap = a[k * n + indxr[l]];
|
cout << "gaussj: Singular Matrix (dgesv info=" << info << ")" << endl;
|
||||||
a[k * n + indxr[l]] = a[k * n + indxc[l]];
|
delete[] ipiv;
|
||||||
a[k * n + indxc[l]] = swap;
|
delete[] a_copy;
|
||||||
}
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 2: Compute matrix inverse A^(-1) using LU factorization
|
||||||
|
// First do LU factorization of original matrix a
|
||||||
|
info = LAPACKE_dgetrf(LAPACK_ROW_MAJOR, n, n, a, n, ipiv);
|
||||||
|
|
||||||
|
if (info != 0) {
|
||||||
|
cout << "gaussj: Singular Matrix (dgetrf info=" << info << ")" << endl;
|
||||||
|
delete[] ipiv;
|
||||||
|
delete[] a_copy;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Then compute inverse from LU factorization
|
||||||
|
info = LAPACKE_dgetri(LAPACK_ROW_MAJOR, n, a, n, ipiv);
|
||||||
|
|
||||||
|
if (info != 0) {
|
||||||
|
cout << "gaussj: Singular Matrix (dgetri info=" << info << ")" << endl;
|
||||||
|
delete[] ipiv;
|
||||||
|
delete[] a_copy;
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
delete[] indxc;
|
|
||||||
delete[] indxr;
|
|
||||||
delete[] ipiv;
|
delete[] ipiv;
|
||||||
|
delete[] a_copy;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -512,11 +512,10 @@
|
|||||||
IMPLICIT DOUBLE PRECISION (A-H,O-Z)
|
IMPLICIT DOUBLE PRECISION (A-H,O-Z)
|
||||||
DIMENSION V(N),W(N)
|
DIMENSION V(N),W(N)
|
||||||
! SUBROUTINE TO COMPUTE DOUBLE PRECISION VECTOR DOT PRODUCT.
|
! SUBROUTINE TO COMPUTE DOUBLE PRECISION VECTOR DOT PRODUCT.
|
||||||
|
! Optimized using Intel oneMKL BLAS ddot
|
||||||
|
! Mathematical equivalence: DGVV = sum_{i=1}^{N} V(i)*W(i)
|
||||||
|
|
||||||
SUM = 0.0D0
|
DOUBLE PRECISION, EXTERNAL :: DDOT
|
||||||
DO 10 I = 1,N
|
DGVV = DDOT(N, V, 1, W, 1)
|
||||||
SUM = SUM + V(I)*W(I)
|
|
||||||
10 CONTINUE
|
|
||||||
DGVV = SUM
|
|
||||||
RETURN
|
RETURN
|
||||||
END
|
END
|
||||||
|
|||||||
@@ -487,6 +487,201 @@ subroutine lopsided(ex,X,Y,Z,f,f_rhs,Sfx,Sfy,Sfz,Symmetry,SoA)
|
|||||||
|
|
||||||
end subroutine lopsided
|
end subroutine lopsided
|
||||||
|
|
||||||
|
!-----------------------------------------------------------------------------
|
||||||
|
! Combined advection (lopsided) + Kreiss-Oliger dissipation (kodis)
|
||||||
|
! Shares the symmetry_bd buffer fh, eliminating one full-grid copy per call.
|
||||||
|
! Mathematically identical to calling lopsided then kodis separately.
|
||||||
|
!-----------------------------------------------------------------------------
|
||||||
|
subroutine lopsided_kodis(ex,X,Y,Z,f,f_rhs,Sfx,Sfy,Sfz,Symmetry,SoA,eps)
|
||||||
|
implicit none
|
||||||
|
|
||||||
|
!~~~~~~> Input parameters:
|
||||||
|
|
||||||
|
integer, intent(in) :: ex(1:3),Symmetry
|
||||||
|
real*8, intent(in) :: X(1:ex(1)),Y(1:ex(2)),Z(1:ex(3))
|
||||||
|
real*8,dimension(ex(1),ex(2),ex(3)),intent(in) :: f,Sfx,Sfy,Sfz
|
||||||
|
|
||||||
|
real*8,dimension(ex(1),ex(2),ex(3)),intent(inout):: f_rhs
|
||||||
|
real*8,dimension(3),intent(in) ::SoA
|
||||||
|
real*8,intent(in) :: eps
|
||||||
|
|
||||||
|
!~~~~~~> local variables:
|
||||||
|
! note index -2,-1,0, so we have 3 extra points
|
||||||
|
real*8,dimension(-2:ex(1),-2:ex(2),-2:ex(3)) :: fh
|
||||||
|
integer :: imin,jmin,kmin,imax,jmax,kmax,i,j,k
|
||||||
|
real*8 :: dX,dY,dZ
|
||||||
|
real*8 :: d12dx,d12dy,d12dz,d2dx,d2dy,d2dz
|
||||||
|
real*8, parameter :: ZEO=0.d0,ONE=1.d0, F3=3.d0
|
||||||
|
real*8, parameter :: TWO=2.d0,F6=6.0d0,F18=1.8d1
|
||||||
|
real*8, parameter :: F12=1.2d1, F10=1.d1,EIT=8.d0
|
||||||
|
integer, parameter :: NO_SYMM = 0, EQ_SYMM = 1, OCTANT = 2
|
||||||
|
! kodis parameters
|
||||||
|
real*8, parameter :: SIX=6.d0,FIT=1.5d1,TWT=2.d1
|
||||||
|
real*8, parameter :: cof=6.4d1 ! 2^6
|
||||||
|
|
||||||
|
dX = X(2)-X(1)
|
||||||
|
dY = Y(2)-Y(1)
|
||||||
|
dZ = Z(2)-Z(1)
|
||||||
|
|
||||||
|
d12dx = ONE/F12/dX
|
||||||
|
d12dy = ONE/F12/dY
|
||||||
|
d12dz = ONE/F12/dZ
|
||||||
|
|
||||||
|
d2dx = ONE/TWO/dX
|
||||||
|
d2dy = ONE/TWO/dY
|
||||||
|
d2dz = ONE/TWO/dZ
|
||||||
|
|
||||||
|
imax = ex(1)
|
||||||
|
jmax = ex(2)
|
||||||
|
kmax = ex(3)
|
||||||
|
|
||||||
|
imin = 1
|
||||||
|
jmin = 1
|
||||||
|
kmin = 1
|
||||||
|
if(Symmetry > NO_SYMM .and. dabs(Z(1)) < dZ) kmin = -2
|
||||||
|
if(Symmetry > EQ_SYMM .and. dabs(X(1)) < dX) imin = -2
|
||||||
|
if(Symmetry > EQ_SYMM .and. dabs(Y(1)) < dY) jmin = -2
|
||||||
|
|
||||||
|
! Single symmetry_bd call shared by both advection and dissipation
|
||||||
|
call symmetry_bd(3,ex,f,fh,SoA)
|
||||||
|
|
||||||
|
! ---- Advection (lopsided) loop ----
|
||||||
|
! upper bound set ex-1 only for efficiency,
|
||||||
|
! the loop body will set ex 0 also
|
||||||
|
do k=1,ex(3)-1
|
||||||
|
do j=1,ex(2)-1
|
||||||
|
do i=1,ex(1)-1
|
||||||
|
! x direction
|
||||||
|
if(Sfx(i,j,k) > ZEO)then
|
||||||
|
if(i+3 <= imax)then
|
||||||
|
f_rhs(i,j,k)=f_rhs(i,j,k)+ &
|
||||||
|
Sfx(i,j,k)*d12dx*(-F3*fh(i-1,j,k)-F10*fh(i,j,k)+F18*fh(i+1,j,k) &
|
||||||
|
-F6*fh(i+2,j,k)+ fh(i+3,j,k))
|
||||||
|
elseif(i+2 <= imax)then
|
||||||
|
f_rhs(i,j,k)=f_rhs(i,j,k)+ &
|
||||||
|
Sfx(i,j,k)*d12dx*(fh(i-2,j,k)-EIT*fh(i-1,j,k)+EIT*fh(i+1,j,k)-fh(i+2,j,k))
|
||||||
|
|
||||||
|
elseif(i+1 <= imax)then
|
||||||
|
f_rhs(i,j,k)=f_rhs(i,j,k)- &
|
||||||
|
Sfx(i,j,k)*d12dx*(-F3*fh(i+1,j,k)-F10*fh(i,j,k)+F18*fh(i-1,j,k) &
|
||||||
|
-F6*fh(i-2,j,k)+ fh(i-3,j,k))
|
||||||
|
endif
|
||||||
|
elseif(Sfx(i,j,k) < ZEO)then
|
||||||
|
if(i-3 >= imin)then
|
||||||
|
f_rhs(i,j,k)=f_rhs(i,j,k)- &
|
||||||
|
Sfx(i,j,k)*d12dx*(-F3*fh(i+1,j,k)-F10*fh(i,j,k)+F18*fh(i-1,j,k) &
|
||||||
|
-F6*fh(i-2,j,k)+ fh(i-3,j,k))
|
||||||
|
elseif(i-2 >= imin)then
|
||||||
|
f_rhs(i,j,k)=f_rhs(i,j,k)+ &
|
||||||
|
Sfx(i,j,k)*d12dx*(fh(i-2,j,k)-EIT*fh(i-1,j,k)+EIT*fh(i+1,j,k)-fh(i+2,j,k))
|
||||||
|
|
||||||
|
elseif(i-1 >= imin)then
|
||||||
|
f_rhs(i,j,k)=f_rhs(i,j,k)+ &
|
||||||
|
Sfx(i,j,k)*d12dx*(-F3*fh(i-1,j,k)-F10*fh(i,j,k)+F18*fh(i+1,j,k) &
|
||||||
|
-F6*fh(i+2,j,k)+ fh(i+3,j,k))
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
|
||||||
|
! y direction
|
||||||
|
if(Sfy(i,j,k) > ZEO)then
|
||||||
|
if(j+3 <= jmax)then
|
||||||
|
f_rhs(i,j,k)=f_rhs(i,j,k)+ &
|
||||||
|
Sfy(i,j,k)*d12dy*(-F3*fh(i,j-1,k)-F10*fh(i,j,k)+F18*fh(i,j+1,k) &
|
||||||
|
-F6*fh(i,j+2,k)+ fh(i,j+3,k))
|
||||||
|
elseif(j+2 <= jmax)then
|
||||||
|
f_rhs(i,j,k)=f_rhs(i,j,k)+ &
|
||||||
|
Sfy(i,j,k)*d12dy*(fh(i,j-2,k)-EIT*fh(i,j-1,k)+EIT*fh(i,j+1,k)-fh(i,j+2,k))
|
||||||
|
|
||||||
|
elseif(j+1 <= jmax)then
|
||||||
|
f_rhs(i,j,k)=f_rhs(i,j,k)- &
|
||||||
|
Sfy(i,j,k)*d12dy*(-F3*fh(i,j+1,k)-F10*fh(i,j,k)+F18*fh(i,j-1,k) &
|
||||||
|
-F6*fh(i,j-2,k)+ fh(i,j-3,k))
|
||||||
|
endif
|
||||||
|
elseif(Sfy(i,j,k) < ZEO)then
|
||||||
|
if(j-3 >= jmin)then
|
||||||
|
f_rhs(i,j,k)=f_rhs(i,j,k)- &
|
||||||
|
Sfy(i,j,k)*d12dy*(-F3*fh(i,j+1,k)-F10*fh(i,j,k)+F18*fh(i,j-1,k) &
|
||||||
|
-F6*fh(i,j-2,k)+ fh(i,j-3,k))
|
||||||
|
elseif(j-2 >= jmin)then
|
||||||
|
f_rhs(i,j,k)=f_rhs(i,j,k)+ &
|
||||||
|
Sfy(i,j,k)*d12dy*(fh(i,j-2,k)-EIT*fh(i,j-1,k)+EIT*fh(i,j+1,k)-fh(i,j+2,k))
|
||||||
|
|
||||||
|
elseif(j-1 >= jmin)then
|
||||||
|
f_rhs(i,j,k)=f_rhs(i,j,k)+ &
|
||||||
|
Sfy(i,j,k)*d12dy*(-F3*fh(i,j-1,k)-F10*fh(i,j,k)+F18*fh(i,j+1,k) &
|
||||||
|
-F6*fh(i,j+2,k)+ fh(i,j+3,k))
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
|
||||||
|
! z direction
|
||||||
|
if(Sfz(i,j,k) > ZEO)then
|
||||||
|
if(k+3 <= kmax)then
|
||||||
|
f_rhs(i,j,k)=f_rhs(i,j,k)+ &
|
||||||
|
Sfz(i,j,k)*d12dz*(-F3*fh(i,j,k-1)-F10*fh(i,j,k)+F18*fh(i,j,k+1) &
|
||||||
|
-F6*fh(i,j,k+2)+ fh(i,j,k+3))
|
||||||
|
elseif(k+2 <= kmax)then
|
||||||
|
f_rhs(i,j,k)=f_rhs(i,j,k)+ &
|
||||||
|
Sfz(i,j,k)*d12dz*(fh(i,j,k-2)-EIT*fh(i,j,k-1)+EIT*fh(i,j,k+1)-fh(i,j,k+2))
|
||||||
|
|
||||||
|
elseif(k+1 <= kmax)then
|
||||||
|
f_rhs(i,j,k)=f_rhs(i,j,k)- &
|
||||||
|
Sfz(i,j,k)*d12dz*(-F3*fh(i,j,k+1)-F10*fh(i,j,k)+F18*fh(i,j,k-1) &
|
||||||
|
-F6*fh(i,j,k-2)+ fh(i,j,k-3))
|
||||||
|
endif
|
||||||
|
elseif(Sfz(i,j,k) < ZEO)then
|
||||||
|
if(k-3 >= kmin)then
|
||||||
|
f_rhs(i,j,k)=f_rhs(i,j,k)- &
|
||||||
|
Sfz(i,j,k)*d12dz*(-F3*fh(i,j,k+1)-F10*fh(i,j,k)+F18*fh(i,j,k-1) &
|
||||||
|
-F6*fh(i,j,k-2)+ fh(i,j,k-3))
|
||||||
|
elseif(k-2 >= kmin)then
|
||||||
|
f_rhs(i,j,k)=f_rhs(i,j,k)+ &
|
||||||
|
Sfz(i,j,k)*d12dz*(fh(i,j,k-2)-EIT*fh(i,j,k-1)+EIT*fh(i,j,k+1)-fh(i,j,k+2))
|
||||||
|
|
||||||
|
elseif(k-1 >= kmin)then
|
||||||
|
f_rhs(i,j,k)=f_rhs(i,j,k)+ &
|
||||||
|
Sfz(i,j,k)*d12dz*(-F3*fh(i,j,k-1)-F10*fh(i,j,k)+F18*fh(i,j,k+1) &
|
||||||
|
-F6*fh(i,j,k+2)+ fh(i,j,k+3))
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
enddo
|
||||||
|
enddo
|
||||||
|
enddo
|
||||||
|
|
||||||
|
! ---- Dissipation (kodis) loop ----
|
||||||
|
if(eps > ZEO) then
|
||||||
|
do k=1,ex(3)
|
||||||
|
do j=1,ex(2)
|
||||||
|
do i=1,ex(1)
|
||||||
|
|
||||||
|
if(i-3 >= imin .and. i+3 <= imax .and. &
|
||||||
|
j-3 >= jmin .and. j+3 <= jmax .and. &
|
||||||
|
k-3 >= kmin .and. k+3 <= kmax) then
|
||||||
|
f_rhs(i,j,k) = f_rhs(i,j,k) + eps/cof *( ( &
|
||||||
|
(fh(i-3,j,k)+fh(i+3,j,k)) - &
|
||||||
|
SIX*(fh(i-2,j,k)+fh(i+2,j,k)) + &
|
||||||
|
FIT*(fh(i-1,j,k)+fh(i+1,j,k)) - &
|
||||||
|
TWT* fh(i,j,k) )/dX + &
|
||||||
|
( &
|
||||||
|
(fh(i,j-3,k)+fh(i,j+3,k)) - &
|
||||||
|
SIX*(fh(i,j-2,k)+fh(i,j+2,k)) + &
|
||||||
|
FIT*(fh(i,j-1,k)+fh(i,j+1,k)) - &
|
||||||
|
TWT* fh(i,j,k) )/dY + &
|
||||||
|
( &
|
||||||
|
(fh(i,j,k-3)+fh(i,j,k+3)) - &
|
||||||
|
SIX*(fh(i,j,k-2)+fh(i,j,k+2)) + &
|
||||||
|
FIT*(fh(i,j,k-1)+fh(i,j,k+1)) - &
|
||||||
|
TWT* fh(i,j,k) )/dZ )
|
||||||
|
endif
|
||||||
|
|
||||||
|
enddo
|
||||||
|
enddo
|
||||||
|
enddo
|
||||||
|
endif
|
||||||
|
|
||||||
|
return
|
||||||
|
|
||||||
|
end subroutine lopsided_kodis
|
||||||
|
|
||||||
#elif (ghost_width == 4)
|
#elif (ghost_width == 4)
|
||||||
! sixth order code
|
! sixth order code
|
||||||
! Compute advection terms in right hand sides of field equations
|
! Compute advection terms in right hand sides of field equations
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
#ifndef MICRODEF_H
|
#ifndef MICRODEF_H
|
||||||
#define MICRODEF_H
|
#define MICRODEF_H
|
||||||
|
|
||||||
#include "microdef.fh"
|
#include "macrodef.fh"
|
||||||
|
|
||||||
// application parameters
|
// application parameters
|
||||||
|
|
||||||
|
|||||||
@@ -16,6 +16,12 @@ include makefile.inc
|
|||||||
.cu.o:
|
.cu.o:
|
||||||
$(Cu) $(CUDA_APP_FLAGS) -c $< -o $@ $(CUDA_LIB_PATH)
|
$(Cu) $(CUDA_APP_FLAGS) -c $< -o $@ $(CUDA_LIB_PATH)
|
||||||
|
|
||||||
|
TwoPunctures.o: TwoPunctures.C
|
||||||
|
${CXX} $(CXXAPPFLAGS) -qopenmp -c $< -o $@
|
||||||
|
|
||||||
|
TwoPunctureABE.o: TwoPunctureABE.C
|
||||||
|
${CXX} $(CXXAPPFLAGS) -qopenmp -c $< -o $@
|
||||||
|
|
||||||
# Input files
|
# Input files
|
||||||
C++FILES = ABE.o Ansorg.o Block.o misc.o monitor.o Parallel.o MPatch.o var.o\
|
C++FILES = ABE.o Ansorg.o Block.o misc.o monitor.o Parallel.o MPatch.o var.o\
|
||||||
cgh.o bssn_class.o surface_integral.o ShellPatch.o\
|
cgh.o bssn_class.o surface_integral.o ShellPatch.o\
|
||||||
@@ -96,7 +102,7 @@ ABEGPU: $(C++FILES_GPU) $(F90FILES) $(F77FILES) $(AHFDOBJS) $(CUDAFILES)
|
|||||||
$(CLINKER) $(CXXAPPFLAGS) -o $@ $(C++FILES_GPU) $(F90FILES) $(F77FILES) $(AHFDOBJS) $(CUDAFILES) $(LDLIBS)
|
$(CLINKER) $(CXXAPPFLAGS) -o $@ $(C++FILES_GPU) $(F90FILES) $(F77FILES) $(AHFDOBJS) $(CUDAFILES) $(LDLIBS)
|
||||||
|
|
||||||
TwoPunctureABE: $(TwoPunctureFILES)
|
TwoPunctureABE: $(TwoPunctureFILES)
|
||||||
$(CLINKER) $(CXXAPPFLAGS) -o $@ $(TwoPunctureFILES) $(LDLIBS)
|
$(CLINKER) $(CXXAPPFLAGS) -qopenmp -o $@ $(TwoPunctureFILES) $(LDLIBS)
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm *.o ABE ABEGPU TwoPunctureABE make.log -f
|
rm *.o ABE ABEGPU TwoPunctureABE make.log -f
|
||||||
|
|||||||
@@ -1,19 +1,30 @@
|
|||||||
|
## GCC version (commented out)
|
||||||
## filein = -I/usr/include -I/usr/lib/x86_64-linux-gnu/mpich/include -I/usr/lib/x86_64-linux-gnu/openmpi/lib/ -I/usr/lib/gcc/x86_64-linux-gnu/11/ -I/usr/include/c++/11/
|
## filein = -I/usr/include -I/usr/lib/x86_64-linux-gnu/mpich/include -I/usr/lib/x86_64-linux-gnu/openmpi/lib/ -I/usr/lib/gcc/x86_64-linux-gnu/11/ -I/usr/include/c++/11/
|
||||||
|
## filein = -I/usr/include/ -I/usr/include/openmpi-x86_64/ -I/usr/lib/x86_64-linux-gnu/openmpi/include/ -I/usr/lib/x86_64-linux-gnu/openmpi/lib/ -I/usr/lib/gcc/x86_64-linux-gnu/11/ -I/usr/include/c++/11/
|
||||||
|
## LDLIBS = -L/usr/lib/x86_64-linux-gnu -L/usr/lib64 -L/usr/lib/gcc/x86_64-linux-gnu/11 -lgfortran -lmpi -lgfortran
|
||||||
|
|
||||||
filein = -I/usr/include/ -I/usr/lib/x86_64-linux-gnu/openmpi/include/ -I/usr/lib/x86_64-linux-gnu/openmpi/lib/ -I/usr/lib/gcc/x86_64-linux-gnu/11/ -I/usr/include/c++/11/
|
## Intel oneAPI version with oneMKL (Optimized for performance)
|
||||||
|
filein = -I/usr/include/ -I${MKLROOT}/include
|
||||||
|
|
||||||
## LDLIBS = -L/usr/lib/x86_64-linux-gnu -lmpich -L/usr/lib64 -L/usr/lib/gcc/x86_64-linux-gnu/11 -lgfortran
|
## Using sequential MKL (OpenMP disabled for better single-threaded performance)
|
||||||
LDLIBS = -L/usr/lib/x86_64-linux-gnu -L/usr/lib64 -L/usr/lib/gcc/x86_64-linux-gnu/11 -lgfortran -lmpi -lgfortran
|
## Added -lifcore for Intel Fortran runtime and -limf for Intel math library
|
||||||
|
LDLIBS = -L${MKLROOT}/lib -lmkl_intel_lp64 -lmkl_sequential -lmkl_core -lifcore -limf -lpthread -lm -ldl
|
||||||
|
|
||||||
CXXAPPFLAGS = -O3 -Wno-deprecated -Dfortran3 -Dnewc
|
## Aggressive optimization flags + PGO Phase 2 (profile-guided optimization)
|
||||||
#f90appflags = -O3 -fpp
|
## -fprofile-instr-use: use collected profile data to guide optimization decisions
|
||||||
f90appflags = -O3 -x f95-cpp-input
|
## (branch prediction, basic block layout, inlining, loop unrolling)
|
||||||
f90 = gfortran
|
PROFDATA = ../../pgo_profile/default.profdata
|
||||||
f77 = gfortran
|
CXXAPPFLAGS = -O3 -xHost -fp-model fast=2 -fma -ipo \
|
||||||
CXX = g++
|
-fprofile-instr-use=$(PROFDATA) \
|
||||||
CC = gcc
|
-Dfortran3 -Dnewc -I${MKLROOT}/include
|
||||||
CLINKER = mpic++
|
f90appflags = -O3 -xHost -fp-model fast=2 -fma -ipo \
|
||||||
|
-fprofile-instr-use=$(PROFDATA) \
|
||||||
|
-align array64byte -fpp -I${MKLROOT}/include
|
||||||
|
f90 = ifx
|
||||||
|
f77 = ifx
|
||||||
|
CXX = icpx
|
||||||
|
CC = icx
|
||||||
|
CLINKER = mpiicpx
|
||||||
|
|
||||||
Cu = nvcc
|
Cu = nvcc
|
||||||
CUDA_LIB_PATH = -L/usr/lib/cuda/lib64 -I/usr/include -I/usr/lib/cuda/include
|
CUDA_LIB_PATH = -L/usr/lib/cuda/lib64 -I/usr/include -I/usr/lib/cuda/include
|
||||||
|
|||||||
@@ -220,16 +220,9 @@ void surface_integral::surf_Wave(double rex, int lev, cgh *GH, var *Rpsi4, var *
|
|||||||
pox[2][n] = rex * nz_g[n];
|
pox[2][n] = rex * nz_g[n];
|
||||||
}
|
}
|
||||||
|
|
||||||
double *shellf;
|
|
||||||
shellf = new double[n_tot * InList];
|
|
||||||
|
|
||||||
GH->PatL[lev]->data->Interp_Points(DG_List, n_tot, pox, shellf, Symmetry);
|
|
||||||
|
|
||||||
int mp, Lp, Nmin, Nmax;
|
int mp, Lp, Nmin, Nmax;
|
||||||
|
|
||||||
mp = n_tot / cpusize;
|
mp = n_tot / cpusize;
|
||||||
Lp = n_tot - cpusize * mp;
|
Lp = n_tot - cpusize * mp;
|
||||||
|
|
||||||
if (Lp > myrank)
|
if (Lp > myrank)
|
||||||
{
|
{
|
||||||
Nmin = myrank * mp + myrank;
|
Nmin = myrank * mp + myrank;
|
||||||
@@ -241,6 +234,11 @@ void surface_integral::surf_Wave(double rex, int lev, cgh *GH, var *Rpsi4, var *
|
|||||||
Nmax = Nmin + mp - 1;
|
Nmax = Nmin + mp - 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
double *shellf;
|
||||||
|
shellf = new double[n_tot * InList];
|
||||||
|
|
||||||
|
GH->PatL[lev]->data->Interp_Points(DG_List, n_tot, pox, shellf, Symmetry, Nmin, Nmax);
|
||||||
|
|
||||||
//|~~~~~> Integrate the dot product of Dphi with the surface normal.
|
//|~~~~~> Integrate the dot product of Dphi with the surface normal.
|
||||||
|
|
||||||
double *RP_out, *IP_out;
|
double *RP_out, *IP_out;
|
||||||
@@ -363,8 +361,17 @@ void surface_integral::surf_Wave(double rex, int lev, cgh *GH, var *Rpsi4, var *
|
|||||||
}
|
}
|
||||||
//|------+ Communicate and sum the results from each processor.
|
//|------+ Communicate and sum the results from each processor.
|
||||||
|
|
||||||
MPI_Allreduce(RP_out, RP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
{
|
||||||
MPI_Allreduce(IP_out, IP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
double *RPIP_out = new double[2 * NN];
|
||||||
|
double *RPIP = new double[2 * NN];
|
||||||
|
memcpy(RPIP_out, RP_out, NN * sizeof(double));
|
||||||
|
memcpy(RPIP_out + NN, IP_out, NN * sizeof(double));
|
||||||
|
MPI_Allreduce(RPIP_out, RPIP, 2 * NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
|
memcpy(RP, RPIP, NN * sizeof(double));
|
||||||
|
memcpy(IP, RPIP + NN, NN * sizeof(double));
|
||||||
|
delete[] RPIP_out;
|
||||||
|
delete[] RPIP;
|
||||||
|
}
|
||||||
|
|
||||||
//|------= Free memory.
|
//|------= Free memory.
|
||||||
|
|
||||||
@@ -556,8 +563,17 @@ void surface_integral::surf_Wave(double rex, int lev, cgh *GH, var *Rpsi4, var *
|
|||||||
}
|
}
|
||||||
//|------+ Communicate and sum the results from each processor.
|
//|------+ Communicate and sum the results from each processor.
|
||||||
|
|
||||||
MPI_Allreduce(RP_out, RP, NN, MPI_DOUBLE, MPI_SUM, Comm_here);
|
{
|
||||||
MPI_Allreduce(IP_out, IP, NN, MPI_DOUBLE, MPI_SUM, Comm_here);
|
double *RPIP_out = new double[2 * NN];
|
||||||
|
double *RPIP = new double[2 * NN];
|
||||||
|
memcpy(RPIP_out, RP_out, NN * sizeof(double));
|
||||||
|
memcpy(RPIP_out + NN, IP_out, NN * sizeof(double));
|
||||||
|
MPI_Allreduce(RPIP_out, RPIP, 2 * NN, MPI_DOUBLE, MPI_SUM, Comm_here);
|
||||||
|
memcpy(RP, RPIP, NN * sizeof(double));
|
||||||
|
memcpy(IP, RPIP + NN, NN * sizeof(double));
|
||||||
|
delete[] RPIP_out;
|
||||||
|
delete[] RPIP;
|
||||||
|
}
|
||||||
|
|
||||||
//|------= Free memory.
|
//|------= Free memory.
|
||||||
|
|
||||||
@@ -735,8 +751,17 @@ void surface_integral::surf_Wave(double rex, int lev, ShellPatch *GH, var *Rpsi4
|
|||||||
}
|
}
|
||||||
//|------+ Communicate and sum the results from each processor.
|
//|------+ Communicate and sum the results from each processor.
|
||||||
|
|
||||||
MPI_Allreduce(RP_out, RP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
{
|
||||||
MPI_Allreduce(IP_out, IP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
double *RPIP_out = new double[2 * NN];
|
||||||
|
double *RPIP = new double[2 * NN];
|
||||||
|
memcpy(RPIP_out, RP_out, NN * sizeof(double));
|
||||||
|
memcpy(RPIP_out + NN, IP_out, NN * sizeof(double));
|
||||||
|
MPI_Allreduce(RPIP_out, RPIP, 2 * NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
|
memcpy(RP, RPIP, NN * sizeof(double));
|
||||||
|
memcpy(IP, RPIP + NN, NN * sizeof(double));
|
||||||
|
delete[] RPIP_out;
|
||||||
|
delete[] RPIP;
|
||||||
|
}
|
||||||
|
|
||||||
//|------= Free memory.
|
//|------= Free memory.
|
||||||
|
|
||||||
@@ -984,8 +1009,17 @@ void surface_integral::surf_Wave(double rex, int lev, ShellPatch *GH,
|
|||||||
}
|
}
|
||||||
//|------+ Communicate and sum the results from each processor.
|
//|------+ Communicate and sum the results from each processor.
|
||||||
|
|
||||||
MPI_Allreduce(RP_out, RP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
{
|
||||||
MPI_Allreduce(IP_out, IP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
double *RPIP_out = new double[2 * NN];
|
||||||
|
double *RPIP = new double[2 * NN];
|
||||||
|
memcpy(RPIP_out, RP_out, NN * sizeof(double));
|
||||||
|
memcpy(RPIP_out + NN, IP_out, NN * sizeof(double));
|
||||||
|
MPI_Allreduce(RPIP_out, RPIP, 2 * NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
|
memcpy(RP, RPIP, NN * sizeof(double));
|
||||||
|
memcpy(IP, RPIP + NN, NN * sizeof(double));
|
||||||
|
delete[] RPIP_out;
|
||||||
|
delete[] RPIP;
|
||||||
|
}
|
||||||
|
|
||||||
//|------= Free memory.
|
//|------= Free memory.
|
||||||
|
|
||||||
@@ -1419,8 +1453,17 @@ void surface_integral::surf_Wave(double rex, int lev, ShellPatch *GH,
|
|||||||
}
|
}
|
||||||
//|------+ Communicate and sum the results from each processor.
|
//|------+ Communicate and sum the results from each processor.
|
||||||
|
|
||||||
MPI_Allreduce(RP_out, RP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
{
|
||||||
MPI_Allreduce(IP_out, IP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
double *RPIP_out = new double[2 * NN];
|
||||||
|
double *RPIP = new double[2 * NN];
|
||||||
|
memcpy(RPIP_out, RP_out, NN * sizeof(double));
|
||||||
|
memcpy(RPIP_out + NN, IP_out, NN * sizeof(double));
|
||||||
|
MPI_Allreduce(RPIP_out, RPIP, 2 * NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
|
memcpy(RP, RPIP, NN * sizeof(double));
|
||||||
|
memcpy(IP, RPIP + NN, NN * sizeof(double));
|
||||||
|
delete[] RPIP_out;
|
||||||
|
delete[] RPIP;
|
||||||
|
}
|
||||||
|
|
||||||
//|------= Free memory.
|
//|------= Free memory.
|
||||||
|
|
||||||
@@ -1854,8 +1897,17 @@ void surface_integral::surf_Wave(double rex, int lev, cgh *GH,
|
|||||||
}
|
}
|
||||||
//|------+ Communicate and sum the results from each processor.
|
//|------+ Communicate and sum the results from each processor.
|
||||||
|
|
||||||
MPI_Allreduce(RP_out, RP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
{
|
||||||
MPI_Allreduce(IP_out, IP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
double *RPIP_out = new double[2 * NN];
|
||||||
|
double *RPIP = new double[2 * NN];
|
||||||
|
memcpy(RPIP_out, RP_out, NN * sizeof(double));
|
||||||
|
memcpy(RPIP_out + NN, IP_out, NN * sizeof(double));
|
||||||
|
MPI_Allreduce(RPIP_out, RPIP, 2 * NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
|
memcpy(RP, RPIP, NN * sizeof(double));
|
||||||
|
memcpy(IP, RPIP + NN, NN * sizeof(double));
|
||||||
|
delete[] RPIP_out;
|
||||||
|
delete[] RPIP;
|
||||||
|
}
|
||||||
|
|
||||||
//|------= Free memory.
|
//|------= Free memory.
|
||||||
|
|
||||||
@@ -2040,8 +2092,17 @@ void surface_integral::surf_Wave(double rex, int lev, NullShellPatch2 *GH, var *
|
|||||||
}
|
}
|
||||||
//|------+ Communicate and sum the results from each processor.
|
//|------+ Communicate and sum the results from each processor.
|
||||||
|
|
||||||
MPI_Allreduce(RP_out, RP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
{
|
||||||
MPI_Allreduce(IP_out, IP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
double *RPIP_out = new double[2 * NN];
|
||||||
|
double *RPIP = new double[2 * NN];
|
||||||
|
memcpy(RPIP_out, RP_out, NN * sizeof(double));
|
||||||
|
memcpy(RPIP_out + NN, IP_out, NN * sizeof(double));
|
||||||
|
MPI_Allreduce(RPIP_out, RPIP, 2 * NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
|
memcpy(RP, RPIP, NN * sizeof(double));
|
||||||
|
memcpy(IP, RPIP + NN, NN * sizeof(double));
|
||||||
|
delete[] RPIP_out;
|
||||||
|
delete[] RPIP;
|
||||||
|
}
|
||||||
|
|
||||||
//|------= Free memory.
|
//|------= Free memory.
|
||||||
|
|
||||||
@@ -2226,8 +2287,17 @@ void surface_integral::surf_Wave(double rex, int lev, NullShellPatch *GH, var *R
|
|||||||
}
|
}
|
||||||
//|------+ Communicate and sum the results from each processor.
|
//|------+ Communicate and sum the results from each processor.
|
||||||
|
|
||||||
MPI_Allreduce(RP_out, RP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
{
|
||||||
MPI_Allreduce(IP_out, IP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
double *RPIP_out = new double[2 * NN];
|
||||||
|
double *RPIP = new double[2 * NN];
|
||||||
|
memcpy(RPIP_out, RP_out, NN * sizeof(double));
|
||||||
|
memcpy(RPIP_out + NN, IP_out, NN * sizeof(double));
|
||||||
|
MPI_Allreduce(RPIP_out, RPIP, 2 * NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
|
memcpy(RP, RPIP, NN * sizeof(double));
|
||||||
|
memcpy(IP, RPIP + NN, NN * sizeof(double));
|
||||||
|
delete[] RPIP_out;
|
||||||
|
delete[] RPIP;
|
||||||
|
}
|
||||||
|
|
||||||
//|------= Free memory.
|
//|------= Free memory.
|
||||||
|
|
||||||
@@ -2314,25 +2384,9 @@ void surface_integral::surf_MassPAng(double rex, int lev, cgh *GH, var *chi, var
|
|||||||
pox[2][n] = rex * nz_g[n];
|
pox[2][n] = rex * nz_g[n];
|
||||||
}
|
}
|
||||||
|
|
||||||
double *shellf;
|
|
||||||
shellf = new double[n_tot * InList];
|
|
||||||
|
|
||||||
// we have assumed there is only one box on this level,
|
|
||||||
// so we do not need loop boxes
|
|
||||||
GH->PatL[lev]->data->Interp_Points(DG_List, n_tot, pox, shellf, Symmetry);
|
|
||||||
|
|
||||||
double Mass_out = 0;
|
|
||||||
double ang_outx, ang_outy, ang_outz;
|
|
||||||
double p_outx, p_outy, p_outz;
|
|
||||||
ang_outx = ang_outy = ang_outz = 0.0;
|
|
||||||
p_outx = p_outy = p_outz = 0.0;
|
|
||||||
const double f1o8 = 0.125;
|
|
||||||
|
|
||||||
int mp, Lp, Nmin, Nmax;
|
int mp, Lp, Nmin, Nmax;
|
||||||
|
|
||||||
mp = n_tot / cpusize;
|
mp = n_tot / cpusize;
|
||||||
Lp = n_tot - cpusize * mp;
|
Lp = n_tot - cpusize * mp;
|
||||||
|
|
||||||
if (Lp > myrank)
|
if (Lp > myrank)
|
||||||
{
|
{
|
||||||
Nmin = myrank * mp + myrank;
|
Nmin = myrank * mp + myrank;
|
||||||
@@ -2344,6 +2398,20 @@ void surface_integral::surf_MassPAng(double rex, int lev, cgh *GH, var *chi, var
|
|||||||
Nmax = Nmin + mp - 1;
|
Nmax = Nmin + mp - 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
double *shellf;
|
||||||
|
shellf = new double[n_tot * InList];
|
||||||
|
|
||||||
|
// we have assumed there is only one box on this level,
|
||||||
|
// so we do not need loop boxes
|
||||||
|
GH->PatL[lev]->data->Interp_Points(DG_List, n_tot, pox, shellf, Symmetry, Nmin, Nmax);
|
||||||
|
|
||||||
|
double Mass_out = 0;
|
||||||
|
double ang_outx, ang_outy, ang_outz;
|
||||||
|
double p_outx, p_outy, p_outz;
|
||||||
|
ang_outx = ang_outy = ang_outz = 0.0;
|
||||||
|
p_outx = p_outy = p_outz = 0.0;
|
||||||
|
const double f1o8 = 0.125;
|
||||||
|
|
||||||
double Chi, Psi;
|
double Chi, Psi;
|
||||||
double Gxx, Gxy, Gxz, Gyy, Gyz, Gzz;
|
double Gxx, Gxy, Gxz, Gyy, Gyz, Gzz;
|
||||||
double gupxx, gupxy, gupxz, gupyy, gupyz, gupzz;
|
double gupxx, gupxy, gupxz, gupyy, gupyz, gupzz;
|
||||||
@@ -2464,15 +2532,13 @@ void surface_integral::surf_MassPAng(double rex, int lev, cgh *GH, var *chi, var
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
MPI_Allreduce(&Mass_out, &mass, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
{
|
||||||
|
double scalar_out[7] = {Mass_out, ang_outx, ang_outy, ang_outz, p_outx, p_outy, p_outz};
|
||||||
MPI_Allreduce(&ang_outx, &sx, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
double scalar_in[7];
|
||||||
MPI_Allreduce(&ang_outy, &sy, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
MPI_Allreduce(scalar_out, scalar_in, 7, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
MPI_Allreduce(&ang_outz, &sz, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
mass = scalar_in[0]; sx = scalar_in[1]; sy = scalar_in[2]; sz = scalar_in[3];
|
||||||
|
px = scalar_in[4]; py = scalar_in[5]; pz = scalar_in[6];
|
||||||
MPI_Allreduce(&p_outx, &px, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
}
|
||||||
MPI_Allreduce(&p_outy, &py, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
|
||||||
MPI_Allreduce(&p_outz, &pz, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
|
||||||
|
|
||||||
#ifdef GaussInt
|
#ifdef GaussInt
|
||||||
mass = mass * rex * rex * dphi * factor;
|
mass = mass * rex * rex * dphi * factor;
|
||||||
@@ -2735,15 +2801,13 @@ void surface_integral::surf_MassPAng(double rex, int lev, cgh *GH, var *chi, var
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
MPI_Allreduce(&Mass_out, &mass, 1, MPI_DOUBLE, MPI_SUM, Comm_here);
|
{
|
||||||
|
double scalar_out[7] = {Mass_out, ang_outx, ang_outy, ang_outz, p_outx, p_outy, p_outz};
|
||||||
MPI_Allreduce(&ang_outx, &sx, 1, MPI_DOUBLE, MPI_SUM, Comm_here);
|
double scalar_in[7];
|
||||||
MPI_Allreduce(&ang_outy, &sy, 1, MPI_DOUBLE, MPI_SUM, Comm_here);
|
MPI_Allreduce(scalar_out, scalar_in, 7, MPI_DOUBLE, MPI_SUM, Comm_here);
|
||||||
MPI_Allreduce(&ang_outz, &sz, 1, MPI_DOUBLE, MPI_SUM, Comm_here);
|
mass = scalar_in[0]; sx = scalar_in[1]; sy = scalar_in[2]; sz = scalar_in[3];
|
||||||
|
px = scalar_in[4]; py = scalar_in[5]; pz = scalar_in[6];
|
||||||
MPI_Allreduce(&p_outx, &px, 1, MPI_DOUBLE, MPI_SUM, Comm_here);
|
}
|
||||||
MPI_Allreduce(&p_outy, &py, 1, MPI_DOUBLE, MPI_SUM, Comm_here);
|
|
||||||
MPI_Allreduce(&p_outz, &pz, 1, MPI_DOUBLE, MPI_SUM, Comm_here);
|
|
||||||
|
|
||||||
#ifdef GaussInt
|
#ifdef GaussInt
|
||||||
mass = mass * rex * rex * dphi * factor;
|
mass = mass * rex * rex * dphi * factor;
|
||||||
@@ -3020,15 +3084,13 @@ void surface_integral::surf_MassPAng(double rex, int lev, ShellPatch *GH, var *c
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
MPI_Allreduce(&Mass_out, &mass, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
{
|
||||||
|
double scalar_out[7] = {Mass_out, ang_outx, ang_outy, ang_outz, p_outx, p_outy, p_outz};
|
||||||
MPI_Allreduce(&ang_outx, &sx, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
double scalar_in[7];
|
||||||
MPI_Allreduce(&ang_outy, &sy, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
MPI_Allreduce(scalar_out, scalar_in, 7, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
MPI_Allreduce(&ang_outz, &sz, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
mass = scalar_in[0]; sx = scalar_in[1]; sy = scalar_in[2]; sz = scalar_in[3];
|
||||||
|
px = scalar_in[4]; py = scalar_in[5]; pz = scalar_in[6];
|
||||||
MPI_Allreduce(&p_outx, &px, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
}
|
||||||
MPI_Allreduce(&p_outy, &py, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
|
||||||
MPI_Allreduce(&p_outz, &pz, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
|
||||||
|
|
||||||
#ifdef GaussInt
|
#ifdef GaussInt
|
||||||
mass = mass * rex * rex * dphi * factor;
|
mass = mass * rex * rex * dphi * factor;
|
||||||
@@ -3607,8 +3669,17 @@ void surface_integral::surf_Wave(double rex, cgh *GH, ShellPatch *SH,
|
|||||||
}
|
}
|
||||||
//|------+ Communicate and sum the results from each processor.
|
//|------+ Communicate and sum the results from each processor.
|
||||||
|
|
||||||
MPI_Allreduce(RP_out, RP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
{
|
||||||
MPI_Allreduce(IP_out, IP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
double *RPIP_out = new double[2 * NN];
|
||||||
|
double *RPIP = new double[2 * NN];
|
||||||
|
memcpy(RPIP_out, RP_out, NN * sizeof(double));
|
||||||
|
memcpy(RPIP_out + NN, IP_out, NN * sizeof(double));
|
||||||
|
MPI_Allreduce(RPIP_out, RPIP, 2 * NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
|
memcpy(RP, RPIP, NN * sizeof(double));
|
||||||
|
memcpy(IP, RPIP + NN, NN * sizeof(double));
|
||||||
|
delete[] RPIP_out;
|
||||||
|
delete[] RPIP;
|
||||||
|
}
|
||||||
|
|
||||||
//|------= Free memory.
|
//|------= Free memory.
|
||||||
|
|
||||||
|
|||||||
@@ -10,6 +10,18 @@
|
|||||||
|
|
||||||
import AMSS_NCKU_Input as input_data
|
import AMSS_NCKU_Input as input_data
|
||||||
import subprocess
|
import subprocess
|
||||||
|
import time
|
||||||
|
## CPU core binding configuration using taskset
|
||||||
|
## taskset ensures all child processes inherit the CPU affinity mask
|
||||||
|
## This forces make and all compiler processes to use only nohz_full cores (4-55, 60-111)
|
||||||
|
## Format: taskset -c 4-55,60-111 ensures processes only run on these cores
|
||||||
|
#NUMACTL_CPU_BIND = "taskset -c 0-111"
|
||||||
|
NUMACTL_CPU_BIND = "taskset -c 16-47,64-95"
|
||||||
|
|
||||||
|
## Build parallelism configuration
|
||||||
|
## Use nohz_full cores (4-55, 60-111) for compilation: 52 + 52 = 104 cores
|
||||||
|
## Set make -j to utilize available cores for faster builds
|
||||||
|
BUILD_JOBS = 96
|
||||||
|
|
||||||
|
|
||||||
##################################################################
|
##################################################################
|
||||||
@@ -26,11 +38,11 @@ def makefile_ABE():
|
|||||||
print( " Compiling the AMSS-NCKU executable file ABE/ABEGPU " )
|
print( " Compiling the AMSS-NCKU executable file ABE/ABEGPU " )
|
||||||
print( )
|
print( )
|
||||||
|
|
||||||
## Build command
|
## Build command with CPU binding to nohz_full cores
|
||||||
if (input_data.GPU_Calculation == "no"):
|
if (input_data.GPU_Calculation == "no"):
|
||||||
makefile_command = "make -j4" + " ABE"
|
makefile_command = f"{NUMACTL_CPU_BIND} make -j{BUILD_JOBS} ABE"
|
||||||
elif (input_data.GPU_Calculation == "yes"):
|
elif (input_data.GPU_Calculation == "yes"):
|
||||||
makefile_command = "make -j4" + " ABEGPU"
|
makefile_command = f"{NUMACTL_CPU_BIND} make -j{BUILD_JOBS} ABEGPU"
|
||||||
else:
|
else:
|
||||||
print( " CPU/GPU numerical calculation setting is wrong " )
|
print( " CPU/GPU numerical calculation setting is wrong " )
|
||||||
print( )
|
print( )
|
||||||
@@ -67,8 +79,8 @@ def makefile_TwoPunctureABE():
|
|||||||
print( " Compiling the AMSS-NCKU executable file TwoPunctureABE " )
|
print( " Compiling the AMSS-NCKU executable file TwoPunctureABE " )
|
||||||
print( )
|
print( )
|
||||||
|
|
||||||
## Build command
|
## Build command with CPU binding to nohz_full cores
|
||||||
makefile_command = "make" + " TwoPunctureABE"
|
makefile_command = f"{NUMACTL_CPU_BIND} make -j{BUILD_JOBS} TwoPunctureABE"
|
||||||
|
|
||||||
## Execute the command with subprocess.Popen and stream output
|
## Execute the command with subprocess.Popen and stream output
|
||||||
makefile_process = subprocess.Popen(makefile_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)
|
makefile_process = subprocess.Popen(makefile_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)
|
||||||
@@ -105,10 +117,11 @@ def run_ABE():
|
|||||||
## Define the command to run; cast other values to strings as needed
|
## Define the command to run; cast other values to strings as needed
|
||||||
|
|
||||||
if (input_data.GPU_Calculation == "no"):
|
if (input_data.GPU_Calculation == "no"):
|
||||||
mpi_command = "mpirun -np " + str(input_data.MPI_processes) + " ./ABE"
|
mpi_command = NUMACTL_CPU_BIND + " mpirun -np " + str(input_data.MPI_processes) + " ./ABE"
|
||||||
|
#mpi_command = " mpirun -np " + str(input_data.MPI_processes) + " ./ABE"
|
||||||
mpi_command_outfile = "ABE_out.log"
|
mpi_command_outfile = "ABE_out.log"
|
||||||
elif (input_data.GPU_Calculation == "yes"):
|
elif (input_data.GPU_Calculation == "yes"):
|
||||||
mpi_command = "mpirun -np " + str(input_data.MPI_processes) + " ./ABEGPU"
|
mpi_command = NUMACTL_CPU_BIND + " mpirun -np " + str(input_data.MPI_processes) + " ./ABEGPU"
|
||||||
mpi_command_outfile = "ABEGPU_out.log"
|
mpi_command_outfile = "ABEGPU_out.log"
|
||||||
|
|
||||||
## Execute the MPI command and stream output
|
## Execute the MPI command and stream output
|
||||||
@@ -141,13 +154,14 @@ def run_ABE():
|
|||||||
## Run the AMSS-NCKU TwoPuncture program TwoPunctureABE
|
## Run the AMSS-NCKU TwoPuncture program TwoPunctureABE
|
||||||
|
|
||||||
def run_TwoPunctureABE():
|
def run_TwoPunctureABE():
|
||||||
|
tp_time1=time.time()
|
||||||
print( )
|
print( )
|
||||||
print( " Running the AMSS-NCKU executable file TwoPunctureABE " )
|
print( " Running the AMSS-NCKU executable file TwoPunctureABE " )
|
||||||
print( )
|
print( )
|
||||||
|
|
||||||
## Define the command to run
|
## Define the command to run
|
||||||
TwoPuncture_command = "./TwoPunctureABE"
|
#TwoPuncture_command = NUMACTL_CPU_BIND + " ./TwoPunctureABE"
|
||||||
|
TwoPuncture_command = " ./TwoPunctureABE"
|
||||||
TwoPuncture_command_outfile = "TwoPunctureABE_out.log"
|
TwoPuncture_command_outfile = "TwoPunctureABE_out.log"
|
||||||
|
|
||||||
## Execute the command with subprocess.Popen and stream output
|
## Execute the command with subprocess.Popen and stream output
|
||||||
@@ -168,7 +182,9 @@ def run_TwoPunctureABE():
|
|||||||
print( )
|
print( )
|
||||||
print( " The TwoPunctureABE simulation is finished " )
|
print( " The TwoPunctureABE simulation is finished " )
|
||||||
print( )
|
print( )
|
||||||
|
tp_time2=time.time()
|
||||||
|
et=tp_time2-tp_time1
|
||||||
|
print(f"Used time: {et}")
|
||||||
return
|
return
|
||||||
|
|
||||||
##################################################################
|
##################################################################
|
||||||
|
|||||||
29
parallel_plot_helper.py
Normal file
29
parallel_plot_helper.py
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
import multiprocessing
|
||||||
|
|
||||||
|
def run_plot_task(task):
|
||||||
|
"""Execute a single plotting task.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
task : tuple
|
||||||
|
A tuple of (function, args_tuple) where function is a callable
|
||||||
|
plotting function and args_tuple contains its arguments.
|
||||||
|
"""
|
||||||
|
func, args = task
|
||||||
|
return func(*args)
|
||||||
|
|
||||||
|
|
||||||
|
def run_plot_tasks_parallel(plot_tasks):
|
||||||
|
"""Execute a list of independent plotting tasks in parallel.
|
||||||
|
|
||||||
|
Uses the 'fork' context to create worker processes so that the main
|
||||||
|
script is NOT re-imported/re-executed in child processes.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
plot_tasks : list of tuples
|
||||||
|
Each element is (function, args_tuple).
|
||||||
|
"""
|
||||||
|
ctx = multiprocessing.get_context('fork')
|
||||||
|
with ctx.Pool() as pool:
|
||||||
|
pool.map(run_plot_task, plot_tasks)
|
||||||
97
pgo_profile/PGO_Profile_Analysis.md
Normal file
97
pgo_profile/PGO_Profile_Analysis.md
Normal file
@@ -0,0 +1,97 @@
|
|||||||
|
# AMSS-NCKU PGO Profile Analysis Report
|
||||||
|
|
||||||
|
## 1. Profiling Environment
|
||||||
|
|
||||||
|
| Item | Value |
|
||||||
|
|------|-------|
|
||||||
|
| Compiler | Intel oneAPI DPC++/C++ 2025.3.0 (icpx/ifx) |
|
||||||
|
| Instrumentation Flag | `-fprofile-instr-generate` |
|
||||||
|
| Optimization Level (instrumented) | `-O2 -xHost -fma` |
|
||||||
|
| MPI Processes | 1 (single process to avoid MPI+instrumentation deadlock) |
|
||||||
|
| Profile File | `default_9725750769337483397_0.profraw` (327 KB) |
|
||||||
|
| Merged Profile | `default.profdata` (394 KB) |
|
||||||
|
| llvm-profdata | `/home/intel/oneapi/compiler/2025.3/bin/compiler/llvm-profdata` |
|
||||||
|
|
||||||
|
## 2. Reduced Simulation Parameters (for profiling run)
|
||||||
|
|
||||||
|
| Parameter | Production Value | Profiling Value |
|
||||||
|
|-----------|-----------------|-----------------|
|
||||||
|
| MPI_processes | 64 | 1 |
|
||||||
|
| grid_level | 9 | 4 |
|
||||||
|
| static_grid_level | 5 | 3 |
|
||||||
|
| static_grid_number | 96 | 24 |
|
||||||
|
| moving_grid_number | 48 | 16 |
|
||||||
|
| largest_box_xyz_max | 320^3 | 160^3 |
|
||||||
|
| Final_Evolution_Time | 1000.0 | 10.0 |
|
||||||
|
| Evolution_Step_Number | 10,000,000 | 1,000 |
|
||||||
|
| Detector_Number | 12 | 2 |
|
||||||
|
|
||||||
|
## 3. Profile Summary
|
||||||
|
|
||||||
|
| Metric | Value |
|
||||||
|
|--------|-------|
|
||||||
|
| Total instrumented functions | 1,392 |
|
||||||
|
| Functions with non-zero counts | 117 (8.4%) |
|
||||||
|
| Functions with zero counts | 1,275 (91.6%) |
|
||||||
|
| Maximum function entry count | 386,459,248 |
|
||||||
|
| Maximum internal block count | 370,477,680 |
|
||||||
|
| Total block count | 4,198,023,118 |
|
||||||
|
|
||||||
|
## 4. Top 20 Hotspot Functions
|
||||||
|
|
||||||
|
| Rank | Total Count | Max Block Count | Function | Category |
|
||||||
|
|------|------------|-----------------|----------|----------|
|
||||||
|
| 1 | 1,241,601,732 | 370,477,680 | `polint_` | Interpolation |
|
||||||
|
| 2 | 755,994,435 | 230,156,640 | `prolong3_` | Grid prolongation |
|
||||||
|
| 3 | 667,964,095 | 3,697,792 | `compute_rhs_bssn_` | BSSN RHS evolution |
|
||||||
|
| 4 | 539,736,051 | 386,459,248 | `symmetry_bd_` | Symmetry boundary |
|
||||||
|
| 5 | 277,310,808 | 53,170,728 | `lopsided_` | Lopsided FD stencil |
|
||||||
|
| 6 | 155,534,488 | 94,535,040 | `decide3d_` | 3D grid decision |
|
||||||
|
| 7 | 119,267,712 | 19,266,048 | `rungekutta4_rout_` | RK4 time integrator |
|
||||||
|
| 8 | 91,574,616 | 48,824,160 | `kodis_` | Kreiss-Oliger dissipation |
|
||||||
|
| 9 | 67,555,389 | 43,243,680 | `fderivs_` | Finite differences |
|
||||||
|
| 10 | 55,296,000 | 42,246,144 | `misc::fact(int)` | Factorial utility |
|
||||||
|
| 11 | 43,191,071 | 27,663,328 | `fdderivs_` | 2nd-order FD derivatives |
|
||||||
|
| 12 | 36,233,965 | 22,429,440 | `restrict3_` | Grid restriction |
|
||||||
|
| 13 | 24,698,512 | 17,231,520 | `polin3_` | Polynomial interpolation |
|
||||||
|
| 14 | 22,962,942 | 20,968,768 | `copy_` | Data copy |
|
||||||
|
| 15 | 20,135,696 | 17,259,168 | `Ansorg::barycentric(...)` | Spectral interpolation |
|
||||||
|
| 16 | 14,650,224 | 7,224,768 | `Ansorg::barycentric_omega(...)` | Spectral weights |
|
||||||
|
| 17 | 13,242,296 | 2,871,920 | `global_interp_` | Global interpolation |
|
||||||
|
| 18 | 12,672,000 | 7,734,528 | `sommerfeld_rout_` | Sommerfeld boundary |
|
||||||
|
| 19 | 6,872,832 | 1,880,064 | `sommerfeld_routbam_` | Sommerfeld boundary (BAM) |
|
||||||
|
| 20 | 5,709,900 | 2,809,632 | `l2normhelper_` | L2 norm computation |
|
||||||
|
|
||||||
|
## 5. Hotspot Category Breakdown
|
||||||
|
|
||||||
|
Top 20 functions account for ~98% of total execution counts:
|
||||||
|
|
||||||
|
| Category | Functions | Combined Count | Share |
|
||||||
|
|----------|-----------|---------------|-------|
|
||||||
|
| Interpolation / Prolongation / Restriction | polint_, prolong3_, restrict3_, polin3_, global_interp_, Ansorg::* | ~2,093M | ~50% |
|
||||||
|
| BSSN RHS + FD stencils | compute_rhs_bssn_, lopsided_, fderivs_, fdderivs_ | ~1,056M | ~25% |
|
||||||
|
| Boundary conditions | symmetry_bd_, sommerfeld_rout_, sommerfeld_routbam_ | ~559M | ~13% |
|
||||||
|
| Time integration | rungekutta4_rout_ | ~119M | ~3% |
|
||||||
|
| Dissipation | kodis_ | ~92M | ~2% |
|
||||||
|
| Utilities | misc::fact, decide3d_, copy_, l2normhelper_ | ~256M | ~6% |
|
||||||
|
|
||||||
|
## 6. Conclusions
|
||||||
|
|
||||||
|
1. **Profile data is valid**: 1,392 functions instrumented, 117 exercised with ~4.2 billion total counts.
|
||||||
|
2. **Hotspot concentration is high**: Top 5 functions alone account for ~76% of all counts, which is ideal for PGO — the compiler has strong branch/layout optimization targets.
|
||||||
|
3. **Fortran numerical kernels dominate**: `polint_`, `prolong3_`, `compute_rhs_bssn_`, `symmetry_bd_`, `lopsided_` are all Fortran routines in the inner evolution loop. PGO will optimize their branch prediction and basic block layout.
|
||||||
|
4. **91.6% of functions have zero counts**: These are code paths for unused features (GPU, BSSN-EScalar, BSSN-EM, Z4C, etc.). PGO will deprioritize them, improving instruction cache utilization.
|
||||||
|
5. **Profile is representative**: Despite the reduced grid size, the code path coverage matches production — the same kernels (RHS, prolongation, restriction, boundary) are exercised. PGO branch probabilities from this profile will transfer well to full-scale runs.
|
||||||
|
|
||||||
|
## 7. PGO Phase 2 Usage
|
||||||
|
|
||||||
|
To apply the profile, use the following flags in `makefile.inc`:
|
||||||
|
|
||||||
|
```makefile
|
||||||
|
CXXAPPFLAGS = -O3 -xHost -fp-model fast=2 -fma -ipo \
|
||||||
|
-fprofile-instr-use=/home/amss/AMSS-NCKU/pgo_profile/default.profdata \
|
||||||
|
-Dfortran3 -Dnewc -I${MKLROOT}/include
|
||||||
|
f90appflags = -O3 -xHost -fp-model fast=2 -fma -ipo \
|
||||||
|
-fprofile-instr-use=/home/amss/AMSS-NCKU/pgo_profile/default.profdata \
|
||||||
|
-align array64byte -fpp -I${MKLROOT}/include
|
||||||
|
```
|
||||||
BIN
pgo_profile/default.profdata
Normal file
BIN
pgo_profile/default.profdata
Normal file
Binary file not shown.
BIN
pgo_profile/default.profdata.backup
Normal file
BIN
pgo_profile/default.profdata.backup
Normal file
Binary file not shown.
BIN
pgo_profile/default_15874826282416242821_0_58277.profraw
Normal file
BIN
pgo_profile/default_15874826282416242821_0_58277.profraw
Normal file
Binary file not shown.
BIN
pgo_profile/default_9725750769337483397_0.profraw
Normal file
BIN
pgo_profile/default_9725750769337483397_0.profraw
Normal file
Binary file not shown.
@@ -11,6 +11,8 @@
|
|||||||
import numpy ## numpy for array operations
|
import numpy ## numpy for array operations
|
||||||
import scipy ## scipy for interpolation and signal processing
|
import scipy ## scipy for interpolation and signal processing
|
||||||
import math
|
import math
|
||||||
|
import matplotlib
|
||||||
|
matplotlib.use('Agg') ## use non-interactive backend for multiprocessing safety
|
||||||
import matplotlib.pyplot as plt ## matplotlib for plotting
|
import matplotlib.pyplot as plt ## matplotlib for plotting
|
||||||
import os ## os for system/file operations
|
import os ## os for system/file operations
|
||||||
|
|
||||||
|
|||||||
@@ -8,16 +8,23 @@
|
|||||||
##
|
##
|
||||||
#################################################
|
#################################################
|
||||||
|
|
||||||
|
## Restrict OpenMP to one thread per process so that running
|
||||||
|
## many workers in parallel does not create an O(workers * BLAS_threads)
|
||||||
|
## thread explosion. The variable MUST be set before numpy/scipy
|
||||||
|
## are imported, because the BLAS library reads them only at load time.
|
||||||
|
import os
|
||||||
|
os.environ.setdefault("OMP_NUM_THREADS", "1")
|
||||||
|
|
||||||
import numpy
|
import numpy
|
||||||
import scipy
|
import scipy
|
||||||
|
import matplotlib
|
||||||
|
matplotlib.use('Agg') ## use non-interactive backend for multiprocessing safety
|
||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
from matplotlib.colors import LogNorm
|
from matplotlib.colors import LogNorm
|
||||||
from mpl_toolkits.mplot3d import Axes3D
|
from mpl_toolkits.mplot3d import Axes3D
|
||||||
## import torch
|
## import torch
|
||||||
import AMSS_NCKU_Input as input_data
|
import AMSS_NCKU_Input as input_data
|
||||||
|
|
||||||
import os
|
|
||||||
|
|
||||||
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
|
|
||||||
@@ -192,3 +199,19 @@ def get_data_xy( Rmin, Rmax, n, data0, time, figure_title, figure_outdir ):
|
|||||||
|
|
||||||
####################################################################################
|
####################################################################################
|
||||||
|
|
||||||
|
|
||||||
|
####################################################################################
|
||||||
|
## Allow this module to be run as a standalone script so that each
|
||||||
|
## binary-data plot can be executed in a fresh subprocess whose BLAS
|
||||||
|
## environment variables (set above) take effect before numpy loads.
|
||||||
|
##
|
||||||
|
## Usage: python3 plot_binary_data.py <filename> <binary_outdir> <figure_outdir>
|
||||||
|
####################################################################################
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
import sys
|
||||||
|
if len(sys.argv) != 4:
|
||||||
|
print(f"Usage: {sys.argv[0]} <filename> <binary_outdir> <figure_outdir>")
|
||||||
|
sys.exit(1)
|
||||||
|
plot_binary_data(sys.argv[1], sys.argv[2], sys.argv[3])
|
||||||
|
|
||||||
|
|||||||
@@ -8,6 +8,8 @@
|
|||||||
#################################################
|
#################################################
|
||||||
|
|
||||||
import numpy ## numpy for array operations
|
import numpy ## numpy for array operations
|
||||||
|
import matplotlib
|
||||||
|
matplotlib.use('Agg') ## use non-interactive backend for multiprocessing safety
|
||||||
import matplotlib.pyplot as plt ## matplotlib for plotting
|
import matplotlib.pyplot as plt ## matplotlib for plotting
|
||||||
from mpl_toolkits.mplot3d import Axes3D ## needed for 3D plots
|
from mpl_toolkits.mplot3d import Axes3D ## needed for 3D plots
|
||||||
import glob
|
import glob
|
||||||
@@ -15,6 +17,9 @@ import os ## operating system utilities
|
|||||||
|
|
||||||
import plot_binary_data
|
import plot_binary_data
|
||||||
import AMSS_NCKU_Input as input_data
|
import AMSS_NCKU_Input as input_data
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import multiprocessing
|
||||||
|
|
||||||
# plt.rcParams['text.usetex'] = True ## enable LaTeX fonts in plots
|
# plt.rcParams['text.usetex'] = True ## enable LaTeX fonts in plots
|
||||||
|
|
||||||
@@ -50,10 +55,40 @@ def generate_binary_data_plot( binary_outdir, figure_outdir ):
|
|||||||
file_list.append(x)
|
file_list.append(x)
|
||||||
print(x)
|
print(x)
|
||||||
|
|
||||||
## Plot each file in the list
|
## Plot each file in parallel using subprocesses.
|
||||||
|
## Each subprocess is a fresh Python process where the BLAS thread-count
|
||||||
|
## environment variables (set at the top of plot_binary_data.py) take
|
||||||
|
## effect before numpy is imported. This avoids the thread explosion
|
||||||
|
## that occurs when multiprocessing.Pool with 'fork' context inherits
|
||||||
|
## already-initialized multi-threaded BLAS from the parent.
|
||||||
|
script = os.path.join( os.path.dirname(__file__), "plot_binary_data.py" )
|
||||||
|
max_workers = min( multiprocessing.cpu_count(), len(file_list) ) if file_list else 0
|
||||||
|
|
||||||
|
running = []
|
||||||
|
failed = []
|
||||||
for filename in file_list:
|
for filename in file_list:
|
||||||
print(filename)
|
print(filename)
|
||||||
plot_binary_data.plot_binary_data(filename, binary_outdir, figure_outdir)
|
proc = subprocess.Popen(
|
||||||
|
[sys.executable, script, filename, binary_outdir, figure_outdir],
|
||||||
|
)
|
||||||
|
running.append( (proc, filename) )
|
||||||
|
## Keep at most max_workers subprocesses active at a time
|
||||||
|
if len(running) >= max_workers:
|
||||||
|
p, fn = running.pop(0)
|
||||||
|
p.wait()
|
||||||
|
if p.returncode != 0:
|
||||||
|
failed.append(fn)
|
||||||
|
|
||||||
|
## Wait for all remaining subprocesses to finish
|
||||||
|
for p, fn in running:
|
||||||
|
p.wait()
|
||||||
|
if p.returncode != 0:
|
||||||
|
failed.append(fn)
|
||||||
|
|
||||||
|
if failed:
|
||||||
|
print( " WARNING: the following binary data plots failed:" )
|
||||||
|
for fn in failed:
|
||||||
|
print( " ", fn )
|
||||||
|
|
||||||
print( )
|
print( )
|
||||||
print( " Binary Data Plot Has been Finished " )
|
print( " Binary Data Plot Has been Finished " )
|
||||||
|
|||||||
Reference in New Issue
Block a user