Compare commits
16 Commits
cjy-oneapi
...
cjy-oneapi
| Author | SHA1 | Date | |
|---|---|---|---|
| 714c6e90c6 | |||
| caf192b2e4 | |||
| d06d5b4db8 | |||
| 50e2a845f8 | |||
| 738498cb28 | |||
| 42b9cf1ad9 | |||
| e9d321fd00 | |||
| ed1d86ade9 | |||
| 471baa5065 | |||
| 4bb6c03013 | |||
|
b8e41b2b39
|
|||
|
133e4f13a2
|
|||
|
914c4f4791
|
|||
|
f345b0e520
|
|||
|
f5ed23d687
|
|||
|
03d501db04
|
@@ -16,7 +16,7 @@ import numpy
|
||||
File_directory = "GW150914" ## output file directory
|
||||
Output_directory = "binary_output" ## binary data file directory
|
||||
## The file directory name should not be too long
|
||||
MPI_processes = 64 ## number of mpi processes used in the simulation
|
||||
MPI_processes = 1 ## number of processes (MPI removed, single-process mode)
|
||||
|
||||
GPU_Calculation = "no" ## Use GPU or not
|
||||
## (prefer "no" in the current version, because the GPU part may have bugs when integrated in this Python interface)
|
||||
|
||||
@@ -20,7 +20,11 @@ using namespace std;
|
||||
#include <map.h>
|
||||
#endif
|
||||
|
||||
#ifdef MPI_STUB
|
||||
#include "mpi_stub.h"
|
||||
#else
|
||||
#include <mpi.h>
|
||||
#endif
|
||||
|
||||
#include "misc.h"
|
||||
#include "macrodef.h"
|
||||
|
||||
@@ -19,7 +19,11 @@ using namespace std;
|
||||
#include <math.h>
|
||||
#endif
|
||||
|
||||
#ifdef MPI_STUB
|
||||
#include "mpi_stub.h"
|
||||
#else
|
||||
#include <mpi.h>
|
||||
#endif
|
||||
|
||||
#define PI M_PI
|
||||
|
||||
|
||||
@@ -2,7 +2,11 @@
|
||||
#ifndef BLOCK_H
|
||||
#define BLOCK_H
|
||||
|
||||
#ifdef MPI_STUB
|
||||
#include "mpi_stub.h"
|
||||
#else
|
||||
#include <mpi.h>
|
||||
#endif
|
||||
#include "macrodef.h" //need dim here; Vertex or Cell
|
||||
#include "var.h"
|
||||
#include "MyList.h"
|
||||
|
||||
@@ -4,7 +4,11 @@
|
||||
#include <stdarg.h>
|
||||
#include <string.h>
|
||||
|
||||
#ifdef MPI_STUB
|
||||
#include "mpi_stub.h"
|
||||
#else
|
||||
#include <mpi.h>
|
||||
#endif
|
||||
|
||||
#include "myglobal.h"
|
||||
|
||||
|
||||
@@ -341,8 +341,9 @@ void Patch::Interp_Points(MyList<var> *VarList,
|
||||
double *Shellf, int Symmetry)
|
||||
{
|
||||
// NOTE: we do not Synchnize variables here, make sure of that before calling this routine
|
||||
int myrank;
|
||||
int myrank, nprocs;
|
||||
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
|
||||
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
|
||||
|
||||
int ordn = 2 * ghost_width;
|
||||
MyList<var> *varl;
|
||||
@@ -354,24 +355,18 @@ void Patch::Interp_Points(MyList<var> *VarList,
|
||||
varl = varl->next;
|
||||
}
|
||||
|
||||
double *shellf;
|
||||
shellf = new double[NN * num_var];
|
||||
memset(shellf, 0, sizeof(double) * NN * num_var);
|
||||
memset(Shellf, 0, sizeof(double) * NN * num_var);
|
||||
|
||||
// we use weight to monitor code, later some day we can move it for optimization
|
||||
int *weight;
|
||||
weight = new int[NN];
|
||||
memset(weight, 0, sizeof(int) * NN);
|
||||
|
||||
double *DH, *llb, *uub;
|
||||
DH = new double[dim];
|
||||
// owner_rank[j] records which MPI rank owns point j
|
||||
// All ranks traverse the same block list so they all agree on ownership
|
||||
int *owner_rank;
|
||||
owner_rank = new int[NN];
|
||||
for (int j = 0; j < NN; j++)
|
||||
owner_rank[j] = -1;
|
||||
|
||||
double DH[dim], llb[dim], uub[dim];
|
||||
for (int i = 0; i < dim; i++)
|
||||
{
|
||||
DH[i] = getdX(i);
|
||||
}
|
||||
llb = new double[dim];
|
||||
uub = new double[dim];
|
||||
|
||||
for (int j = 0; j < NN; j++) // run along points
|
||||
{
|
||||
@@ -403,12 +398,6 @@ void Patch::Interp_Points(MyList<var> *VarList,
|
||||
bool flag = true;
|
||||
for (int i = 0; i < dim; i++)
|
||||
{
|
||||
// NOTE: our dividing structure is (exclude ghost)
|
||||
// -1 0
|
||||
// 1 2
|
||||
// so (0,1) does not belong to any part for vertex structure
|
||||
// here we put (0,0.5) to left part and (0.5,1) to right part
|
||||
// BUT for cell structure the bbox is (-1.5,0.5) and (0.5,2.5), there is no missing region at all
|
||||
#ifdef Vertex
|
||||
#ifdef Cell
|
||||
#error Both Cell and Vertex are defined
|
||||
@@ -433,6 +422,7 @@ void Patch::Interp_Points(MyList<var> *VarList,
|
||||
if (flag)
|
||||
{
|
||||
notfind = false;
|
||||
owner_rank[j] = BP->rank;
|
||||
if (myrank == BP->rank)
|
||||
{
|
||||
//---> interpolation
|
||||
@@ -440,14 +430,11 @@ void Patch::Interp_Points(MyList<var> *VarList,
|
||||
int k = 0;
|
||||
while (varl) // run along variables
|
||||
{
|
||||
// shellf[j*num_var+k] = Parallel::global_interp(dim,BP->shape,BP->X,BP->fgfs[varl->data->sgfn],
|
||||
// pox,ordn,varl->data->SoA,Symmetry);
|
||||
f_global_interp(BP->shape, BP->X[0], BP->X[1], BP->X[2], BP->fgfs[varl->data->sgfn], shellf[j * num_var + k],
|
||||
f_global_interp(BP->shape, BP->X[0], BP->X[1], BP->X[2], BP->fgfs[varl->data->sgfn], Shellf[j * num_var + k],
|
||||
pox[0], pox[1], pox[2], ordn, varl->data->SoA, Symmetry);
|
||||
varl = varl->next;
|
||||
k++;
|
||||
}
|
||||
weight[j] = 1;
|
||||
}
|
||||
}
|
||||
if (Bp == ble)
|
||||
@@ -456,103 +443,327 @@ void Patch::Interp_Points(MyList<var> *VarList,
|
||||
}
|
||||
}
|
||||
|
||||
MPI_Allreduce(shellf, Shellf, NN * num_var, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
int *Weight;
|
||||
Weight = new int[NN];
|
||||
MPI_Allreduce(weight, Weight, NN, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
||||
|
||||
// misc::tillherecheck("print me");
|
||||
|
||||
for (int i = 0; i < NN; i++)
|
||||
// Replace MPI_Allreduce with per-owner MPI_Bcast:
|
||||
// Group consecutive points by owner rank and broadcast each group.
|
||||
// Since each point's data is non-zero only on the owner rank,
|
||||
// Bcast from owner is equivalent to Allreduce(MPI_SUM) but much cheaper.
|
||||
{
|
||||
if (Weight[i] > 1)
|
||||
int j = 0;
|
||||
while (j < NN)
|
||||
{
|
||||
if (myrank == 0)
|
||||
cout << "WARNING: Patch::Interp_Points meets multiple weight" << endl;
|
||||
for (int j = 0; j < num_var; j++)
|
||||
Shellf[j + i * num_var] = Shellf[j + i * num_var] / Weight[i];
|
||||
int cur_owner = owner_rank[j];
|
||||
if (cur_owner < 0)
|
||||
{
|
||||
if (myrank == 0)
|
||||
{
|
||||
cout << "ERROR: Patch::Interp_Points fails to find point (";
|
||||
for (int d = 0; d < dim; d++)
|
||||
{
|
||||
cout << XX[d][j];
|
||||
if (d < dim - 1)
|
||||
cout << ",";
|
||||
else
|
||||
cout << ")";
|
||||
}
|
||||
cout << " on Patch (";
|
||||
for (int d = 0; d < dim; d++)
|
||||
{
|
||||
cout << bbox[d] << "+" << lli[d] * DH[d];
|
||||
if (d < dim - 1)
|
||||
cout << ",";
|
||||
else
|
||||
cout << ")--";
|
||||
}
|
||||
cout << "(";
|
||||
for (int d = 0; d < dim; d++)
|
||||
{
|
||||
cout << bbox[dim + d] << "-" << uui[d] * DH[d];
|
||||
if (d < dim - 1)
|
||||
cout << ",";
|
||||
else
|
||||
cout << ")" << endl;
|
||||
}
|
||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||
}
|
||||
j++;
|
||||
continue;
|
||||
}
|
||||
// Find contiguous run of points with the same owner
|
||||
int jstart = j;
|
||||
while (j < NN && owner_rank[j] == cur_owner)
|
||||
j++;
|
||||
int count = (j - jstart) * num_var;
|
||||
MPI_Bcast(Shellf + jstart * num_var, count, MPI_DOUBLE, cur_owner, MPI_COMM_WORLD);
|
||||
}
|
||||
else if (Weight[i] == 0 && myrank == 0)
|
||||
}
|
||||
|
||||
delete[] owner_rank;
|
||||
}
|
||||
void Patch::Interp_Points(MyList<var> *VarList,
|
||||
int NN, double **XX,
|
||||
double *Shellf, int Symmetry,
|
||||
int Nmin_consumer, int Nmax_consumer)
|
||||
{
|
||||
// Targeted point-to-point overload: each owner sends each point only to
|
||||
// the one rank that needs it for integration (consumer), reducing
|
||||
// communication volume by ~nprocs times compared to the Bcast version.
|
||||
int myrank, nprocs;
|
||||
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
|
||||
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
|
||||
|
||||
int ordn = 2 * ghost_width;
|
||||
MyList<var> *varl;
|
||||
int num_var = 0;
|
||||
varl = VarList;
|
||||
while (varl)
|
||||
{
|
||||
num_var++;
|
||||
varl = varl->next;
|
||||
}
|
||||
|
||||
memset(Shellf, 0, sizeof(double) * NN * num_var);
|
||||
|
||||
// owner_rank[j] records which MPI rank owns point j
|
||||
int *owner_rank;
|
||||
owner_rank = new int[NN];
|
||||
for (int j = 0; j < NN; j++)
|
||||
owner_rank[j] = -1;
|
||||
|
||||
double DH[dim], llb[dim], uub[dim];
|
||||
for (int i = 0; i < dim; i++)
|
||||
DH[i] = getdX(i);
|
||||
|
||||
// --- Interpolation phase (identical to original) ---
|
||||
for (int j = 0; j < NN; j++)
|
||||
{
|
||||
double pox[dim];
|
||||
for (int i = 0; i < dim; i++)
|
||||
{
|
||||
pox[i] = XX[i][j];
|
||||
if (myrank == 0 && (XX[i][j] < bbox[i] + lli[i] * DH[i] || XX[i][j] > bbox[dim + i] - uui[i] * DH[i]))
|
||||
{
|
||||
cout << "Patch::Interp_Points: point (";
|
||||
for (int k = 0; k < dim; k++)
|
||||
{
|
||||
cout << XX[k][j];
|
||||
if (k < dim - 1)
|
||||
cout << ",";
|
||||
else
|
||||
cout << ") is out of current Patch." << endl;
|
||||
}
|
||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||
}
|
||||
}
|
||||
|
||||
MyList<Block> *Bp = blb;
|
||||
bool notfind = true;
|
||||
while (notfind && Bp)
|
||||
{
|
||||
Block *BP = Bp->data;
|
||||
|
||||
bool flag = true;
|
||||
for (int i = 0; i < dim; i++)
|
||||
{
|
||||
#ifdef Vertex
|
||||
#ifdef Cell
|
||||
#error Both Cell and Vertex are defined
|
||||
#endif
|
||||
llb[i] = (feq(BP->bbox[i], bbox[i], DH[i] / 2)) ? BP->bbox[i] + lli[i] * DH[i] : BP->bbox[i] + (ghost_width - 0.5) * DH[i];
|
||||
uub[i] = (feq(BP->bbox[dim + i], bbox[dim + i], DH[i] / 2)) ? BP->bbox[dim + i] - uui[i] * DH[i] : BP->bbox[dim + i] - (ghost_width - 0.5) * DH[i];
|
||||
#else
|
||||
#ifdef Cell
|
||||
llb[i] = (feq(BP->bbox[i], bbox[i], DH[i] / 2)) ? BP->bbox[i] + lli[i] * DH[i] : BP->bbox[i] + ghost_width * DH[i];
|
||||
uub[i] = (feq(BP->bbox[dim + i], bbox[dim + i], DH[i] / 2)) ? BP->bbox[dim + i] - uui[i] * DH[i] : BP->bbox[dim + i] - ghost_width * DH[i];
|
||||
#else
|
||||
#error Not define Vertex nor Cell
|
||||
#endif
|
||||
#endif
|
||||
if (XX[i][j] - llb[i] < -DH[i] / 2 || XX[i][j] - uub[i] > DH[i] / 2)
|
||||
{
|
||||
flag = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (flag)
|
||||
{
|
||||
notfind = false;
|
||||
owner_rank[j] = BP->rank;
|
||||
if (myrank == BP->rank)
|
||||
{
|
||||
varl = VarList;
|
||||
int k = 0;
|
||||
while (varl)
|
||||
{
|
||||
f_global_interp(BP->shape, BP->X[0], BP->X[1], BP->X[2], BP->fgfs[varl->data->sgfn], Shellf[j * num_var + k],
|
||||
pox[0], pox[1], pox[2], ordn, varl->data->SoA, Symmetry);
|
||||
varl = varl->next;
|
||||
k++;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (Bp == ble)
|
||||
break;
|
||||
Bp = Bp->next;
|
||||
}
|
||||
}
|
||||
|
||||
// --- Error check for unfound points ---
|
||||
for (int j = 0; j < NN; j++)
|
||||
{
|
||||
if (owner_rank[j] < 0 && myrank == 0)
|
||||
{
|
||||
cout << "ERROR: Patch::Interp_Points fails to find point (";
|
||||
for (int j = 0; j < dim; j++)
|
||||
for (int d = 0; d < dim; d++)
|
||||
{
|
||||
cout << XX[j][i];
|
||||
if (j < dim - 1)
|
||||
cout << XX[d][j];
|
||||
if (d < dim - 1)
|
||||
cout << ",";
|
||||
else
|
||||
cout << ")";
|
||||
}
|
||||
cout << " on Patch (";
|
||||
for (int j = 0; j < dim; j++)
|
||||
for (int d = 0; d < dim; d++)
|
||||
{
|
||||
cout << bbox[j] << "+" << lli[j] * getdX(j);
|
||||
if (j < dim - 1)
|
||||
cout << bbox[d] << "+" << lli[d] * DH[d];
|
||||
if (d < dim - 1)
|
||||
cout << ",";
|
||||
else
|
||||
cout << ")--";
|
||||
}
|
||||
cout << "(";
|
||||
for (int j = 0; j < dim; j++)
|
||||
for (int d = 0; d < dim; d++)
|
||||
{
|
||||
cout << bbox[dim + j] << "-" << uui[j] * getdX(j);
|
||||
if (j < dim - 1)
|
||||
cout << bbox[dim + d] << "-" << uui[d] * DH[d];
|
||||
if (d < dim - 1)
|
||||
cout << ",";
|
||||
else
|
||||
cout << ")" << endl;
|
||||
}
|
||||
#if 0
|
||||
checkBlock();
|
||||
#else
|
||||
cout << "splited domains:" << endl;
|
||||
{
|
||||
MyList<Block> *Bp = blb;
|
||||
while (Bp)
|
||||
{
|
||||
Block *BP = Bp->data;
|
||||
|
||||
for (int i = 0; i < dim; i++)
|
||||
{
|
||||
#ifdef Vertex
|
||||
#ifdef Cell
|
||||
#error Both Cell and Vertex are defined
|
||||
#endif
|
||||
llb[i] = (feq(BP->bbox[i], bbox[i], DH[i] / 2)) ? BP->bbox[i] + lli[i] * DH[i] : BP->bbox[i] + (ghost_width - 0.5) * DH[i];
|
||||
uub[i] = (feq(BP->bbox[dim + i], bbox[dim + i], DH[i] / 2)) ? BP->bbox[dim + i] - uui[i] * DH[i] : BP->bbox[dim + i] - (ghost_width - 0.5) * DH[i];
|
||||
#else
|
||||
#ifdef Cell
|
||||
llb[i] = (feq(BP->bbox[i], bbox[i], DH[i] / 2)) ? BP->bbox[i] + lli[i] * DH[i] : BP->bbox[i] + ghost_width * DH[i];
|
||||
uub[i] = (feq(BP->bbox[dim + i], bbox[dim + i], DH[i] / 2)) ? BP->bbox[dim + i] - uui[i] * DH[i] : BP->bbox[dim + i] - ghost_width * DH[i];
|
||||
#else
|
||||
#error Not define Vertex nor Cell
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
cout << "(";
|
||||
for (int j = 0; j < dim; j++)
|
||||
{
|
||||
cout << llb[j] << ":" << uub[j];
|
||||
if (j < dim - 1)
|
||||
cout << ",";
|
||||
else
|
||||
cout << ")" << endl;
|
||||
}
|
||||
if (Bp == ble)
|
||||
break;
|
||||
Bp = Bp->next;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||
}
|
||||
}
|
||||
|
||||
delete[] shellf;
|
||||
delete[] weight;
|
||||
delete[] Weight;
|
||||
delete[] DH;
|
||||
delete[] llb;
|
||||
delete[] uub;
|
||||
// --- Targeted point-to-point communication phase ---
|
||||
// Compute consumer_rank[j] using the same deterministic formula as surface_integral
|
||||
int *consumer_rank = new int[NN];
|
||||
{
|
||||
int mp = NN / nprocs;
|
||||
int Lp = NN - nprocs * mp;
|
||||
for (int j = 0; j < NN; j++)
|
||||
{
|
||||
if (j < Lp * (mp + 1))
|
||||
consumer_rank[j] = j / (mp + 1);
|
||||
else
|
||||
consumer_rank[j] = Lp + (j - Lp * (mp + 1)) / mp;
|
||||
}
|
||||
}
|
||||
|
||||
// Count sends and recvs per rank
|
||||
int *send_count = new int[nprocs];
|
||||
int *recv_count = new int[nprocs];
|
||||
memset(send_count, 0, sizeof(int) * nprocs);
|
||||
memset(recv_count, 0, sizeof(int) * nprocs);
|
||||
|
||||
for (int j = 0; j < NN; j++)
|
||||
{
|
||||
int own = owner_rank[j];
|
||||
int con = consumer_rank[j];
|
||||
if (own == con)
|
||||
continue; // local — no communication needed
|
||||
if (own == myrank)
|
||||
send_count[con]++;
|
||||
if (con == myrank)
|
||||
recv_count[own]++;
|
||||
}
|
||||
|
||||
// Build send buffers: for each destination rank, pack (index, data) pairs
|
||||
// Each entry: 1 int (point index j) + num_var doubles
|
||||
int total_send = 0, total_recv = 0;
|
||||
int *send_offset = new int[nprocs];
|
||||
int *recv_offset = new int[nprocs];
|
||||
for (int r = 0; r < nprocs; r++)
|
||||
{
|
||||
send_offset[r] = total_send;
|
||||
total_send += send_count[r];
|
||||
recv_offset[r] = total_recv;
|
||||
total_recv += recv_count[r];
|
||||
}
|
||||
|
||||
// Pack send buffers: each message contains (j, data[0..num_var-1]) per point
|
||||
int stride = 1 + num_var; // 1 double for index + num_var doubles for data
|
||||
double *sendbuf = new double[total_send * stride];
|
||||
double *recvbuf = new double[total_recv * stride];
|
||||
|
||||
// Temporary counters for packing
|
||||
int *pack_pos = new int[nprocs];
|
||||
memset(pack_pos, 0, sizeof(int) * nprocs);
|
||||
|
||||
for (int j = 0; j < NN; j++)
|
||||
{
|
||||
int own = owner_rank[j];
|
||||
int con = consumer_rank[j];
|
||||
if (own != myrank || con == myrank)
|
||||
continue;
|
||||
int pos = (send_offset[con] + pack_pos[con]) * stride;
|
||||
sendbuf[pos] = (double)j; // point index
|
||||
for (int v = 0; v < num_var; v++)
|
||||
sendbuf[pos + 1 + v] = Shellf[j * num_var + v];
|
||||
pack_pos[con]++;
|
||||
}
|
||||
|
||||
// Post non-blocking recvs and sends
|
||||
int n_req = 0;
|
||||
for (int r = 0; r < nprocs; r++)
|
||||
{
|
||||
if (recv_count[r] > 0) n_req++;
|
||||
if (send_count[r] > 0) n_req++;
|
||||
}
|
||||
|
||||
MPI_Request *reqs = new MPI_Request[n_req];
|
||||
int req_idx = 0;
|
||||
|
||||
for (int r = 0; r < nprocs; r++)
|
||||
{
|
||||
if (recv_count[r] > 0)
|
||||
{
|
||||
MPI_Irecv(recvbuf + recv_offset[r] * stride,
|
||||
recv_count[r] * stride, MPI_DOUBLE,
|
||||
r, 0, MPI_COMM_WORLD, &reqs[req_idx++]);
|
||||
}
|
||||
}
|
||||
for (int r = 0; r < nprocs; r++)
|
||||
{
|
||||
if (send_count[r] > 0)
|
||||
{
|
||||
MPI_Isend(sendbuf + send_offset[r] * stride,
|
||||
send_count[r] * stride, MPI_DOUBLE,
|
||||
r, 0, MPI_COMM_WORLD, &reqs[req_idx++]);
|
||||
}
|
||||
}
|
||||
|
||||
if (n_req > 0)
|
||||
MPI_Waitall(n_req, reqs, MPI_STATUSES_IGNORE);
|
||||
|
||||
// Unpack recv buffers into Shellf
|
||||
for (int i = 0; i < total_recv; i++)
|
||||
{
|
||||
int pos = i * stride;
|
||||
int j = (int)recvbuf[pos];
|
||||
for (int v = 0; v < num_var; v++)
|
||||
Shellf[j * num_var + v] = recvbuf[pos + 1 + v];
|
||||
}
|
||||
|
||||
delete[] reqs;
|
||||
delete[] sendbuf;
|
||||
delete[] recvbuf;
|
||||
delete[] pack_pos;
|
||||
delete[] send_offset;
|
||||
delete[] recv_offset;
|
||||
delete[] send_count;
|
||||
delete[] recv_count;
|
||||
delete[] consumer_rank;
|
||||
delete[] owner_rank;
|
||||
}
|
||||
void Patch::Interp_Points(MyList<var> *VarList,
|
||||
int NN, double **XX,
|
||||
@@ -573,24 +784,22 @@ void Patch::Interp_Points(MyList<var> *VarList,
|
||||
varl = varl->next;
|
||||
}
|
||||
|
||||
double *shellf;
|
||||
shellf = new double[NN * num_var];
|
||||
memset(shellf, 0, sizeof(double) * NN * num_var);
|
||||
memset(Shellf, 0, sizeof(double) * NN * num_var);
|
||||
|
||||
// we use weight to monitor code, later some day we can move it for optimization
|
||||
int *weight;
|
||||
weight = new int[NN];
|
||||
memset(weight, 0, sizeof(int) * NN);
|
||||
// owner_rank[j] stores the global rank that owns point j
|
||||
int *owner_rank;
|
||||
owner_rank = new int[NN];
|
||||
for (int j = 0; j < NN; j++)
|
||||
owner_rank[j] = -1;
|
||||
|
||||
double *DH, *llb, *uub;
|
||||
DH = new double[dim];
|
||||
// Build global-to-local rank translation for Comm_here
|
||||
MPI_Group world_group, local_group;
|
||||
MPI_Comm_group(MPI_COMM_WORLD, &world_group);
|
||||
MPI_Comm_group(Comm_here, &local_group);
|
||||
|
||||
double DH[dim], llb[dim], uub[dim];
|
||||
for (int i = 0; i < dim; i++)
|
||||
{
|
||||
DH[i] = getdX(i);
|
||||
}
|
||||
llb = new double[dim];
|
||||
uub = new double[dim];
|
||||
|
||||
for (int j = 0; j < NN; j++) // run along points
|
||||
{
|
||||
@@ -622,12 +831,6 @@ void Patch::Interp_Points(MyList<var> *VarList,
|
||||
bool flag = true;
|
||||
for (int i = 0; i < dim; i++)
|
||||
{
|
||||
// NOTE: our dividing structure is (exclude ghost)
|
||||
// -1 0
|
||||
// 1 2
|
||||
// so (0,1) does not belong to any part for vertex structure
|
||||
// here we put (0,0.5) to left part and (0.5,1) to right part
|
||||
// BUT for cell structure the bbox is (-1.5,0.5) and (0.5,2.5), there is no missing region at all
|
||||
#ifdef Vertex
|
||||
#ifdef Cell
|
||||
#error Both Cell and Vertex are defined
|
||||
@@ -652,6 +855,7 @@ void Patch::Interp_Points(MyList<var> *VarList,
|
||||
if (flag)
|
||||
{
|
||||
notfind = false;
|
||||
owner_rank[j] = BP->rank;
|
||||
if (myrank == BP->rank)
|
||||
{
|
||||
//---> interpolation
|
||||
@@ -659,14 +863,11 @@ void Patch::Interp_Points(MyList<var> *VarList,
|
||||
int k = 0;
|
||||
while (varl) // run along variables
|
||||
{
|
||||
// shellf[j*num_var+k] = Parallel::global_interp(dim,BP->shape,BP->X,BP->fgfs[varl->data->sgfn],
|
||||
// pox,ordn,varl->data->SoA,Symmetry);
|
||||
f_global_interp(BP->shape, BP->X[0], BP->X[1], BP->X[2], BP->fgfs[varl->data->sgfn], shellf[j * num_var + k],
|
||||
f_global_interp(BP->shape, BP->X[0], BP->X[1], BP->X[2], BP->fgfs[varl->data->sgfn], Shellf[j * num_var + k],
|
||||
pox[0], pox[1], pox[2], ordn, varl->data->SoA, Symmetry);
|
||||
varl = varl->next;
|
||||
k++;
|
||||
}
|
||||
weight[j] = 1;
|
||||
}
|
||||
}
|
||||
if (Bp == ble)
|
||||
@@ -675,97 +876,35 @@ void Patch::Interp_Points(MyList<var> *VarList,
|
||||
}
|
||||
}
|
||||
|
||||
MPI_Allreduce(shellf, Shellf, NN * num_var, MPI_DOUBLE, MPI_SUM, Comm_here);
|
||||
int *Weight;
|
||||
Weight = new int[NN];
|
||||
MPI_Allreduce(weight, Weight, NN, MPI_INT, MPI_SUM, Comm_here);
|
||||
|
||||
// misc::tillherecheck("print me");
|
||||
// if(lmyrank == 0) cout<<"myrank = "<<myrank<<"print me"<<endl;
|
||||
|
||||
for (int i = 0; i < NN; i++)
|
||||
// Collect unique global owner ranks and translate to local ranks in Comm_here
|
||||
// Then broadcast each owner's points via MPI_Bcast on Comm_here
|
||||
{
|
||||
if (Weight[i] > 1)
|
||||
int j = 0;
|
||||
while (j < NN)
|
||||
{
|
||||
if (lmyrank == 0)
|
||||
cout << "WARNING: Patch::Interp_Points meets multiple weight" << endl;
|
||||
for (int j = 0; j < num_var; j++)
|
||||
Shellf[j + i * num_var] = Shellf[j + i * num_var] / Weight[i];
|
||||
int cur_owner_global = owner_rank[j];
|
||||
if (cur_owner_global < 0)
|
||||
{
|
||||
// Point not found — skip (error check disabled for sub-communicator levels)
|
||||
j++;
|
||||
continue;
|
||||
}
|
||||
// Translate global rank to local rank in Comm_here
|
||||
int cur_owner_local;
|
||||
MPI_Group_translate_ranks(world_group, 1, &cur_owner_global, local_group, &cur_owner_local);
|
||||
|
||||
// Find contiguous run of points with the same owner
|
||||
int jstart = j;
|
||||
while (j < NN && owner_rank[j] == cur_owner_global)
|
||||
j++;
|
||||
int count = (j - jstart) * num_var;
|
||||
MPI_Bcast(Shellf + jstart * num_var, count, MPI_DOUBLE, cur_owner_local, Comm_here);
|
||||
}
|
||||
#if 0 // for not involved levels, this may fail
|
||||
else if(Weight[i] == 0 && lmyrank == 0)
|
||||
{
|
||||
cout<<"ERROR: Patch::Interp_Points fails to find point (";
|
||||
for(int j=0;j<dim;j++)
|
||||
{
|
||||
cout<<XX[j][i];
|
||||
if(j<dim-1) cout<<",";
|
||||
else cout<<")";
|
||||
}
|
||||
cout<<" on Patch (";
|
||||
for(int j=0;j<dim;j++)
|
||||
{
|
||||
cout<<bbox[j]<<"+"<<lli[j]*getdX(j);
|
||||
if(j<dim-1) cout<<",";
|
||||
else cout<<")--";
|
||||
}
|
||||
cout<<"(";
|
||||
for(int j=0;j<dim;j++)
|
||||
{
|
||||
cout<<bbox[dim+j]<<"-"<<uui[j]*getdX(j);
|
||||
if(j<dim-1) cout<<",";
|
||||
else cout<<")"<<endl;
|
||||
}
|
||||
#if 0
|
||||
checkBlock();
|
||||
#else
|
||||
cout<<"splited domains:"<<endl;
|
||||
{
|
||||
MyList<Block> *Bp=blb;
|
||||
while(Bp)
|
||||
{
|
||||
Block *BP=Bp->data;
|
||||
|
||||
for(int i=0;i<dim;i++)
|
||||
{
|
||||
#ifdef Vertex
|
||||
#ifdef Cell
|
||||
#error Both Cell and Vertex are defined
|
||||
#endif
|
||||
llb[i] = (feq(BP->bbox[i] ,bbox[i] ,DH[i]/2)) ? BP->bbox[i]+lli[i]*DH[i] : BP->bbox[i] +(ghost_width-0.5)*DH[i];
|
||||
uub[i] = (feq(BP->bbox[dim+i],bbox[dim+i],DH[i]/2)) ? BP->bbox[dim+i]-uui[i]*DH[i] : BP->bbox[dim+i]-(ghost_width-0.5)*DH[i];
|
||||
#else
|
||||
#ifdef Cell
|
||||
llb[i] = (feq(BP->bbox[i] ,bbox[i] ,DH[i]/2)) ? BP->bbox[i]+lli[i]*DH[i] : BP->bbox[i] +ghost_width*DH[i];
|
||||
uub[i] = (feq(BP->bbox[dim+i],bbox[dim+i],DH[i]/2)) ? BP->bbox[dim+i]-uui[i]*DH[i] : BP->bbox[dim+i]-ghost_width*DH[i];
|
||||
#else
|
||||
#error Not define Vertex nor Cell
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
cout<<"(";
|
||||
for(int j=0;j<dim;j++)
|
||||
{
|
||||
cout<<llb[j]<<":"<<uub[j];
|
||||
if(j<dim-1) cout<<",";
|
||||
else cout<<")"<<endl;
|
||||
}
|
||||
if(Bp == ble) break;
|
||||
Bp=Bp->next;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
MPI_Abort(MPI_COMM_WORLD,1);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
delete[] shellf;
|
||||
delete[] weight;
|
||||
delete[] Weight;
|
||||
delete[] DH;
|
||||
delete[] llb;
|
||||
delete[] uub;
|
||||
MPI_Group_free(&world_group);
|
||||
MPI_Group_free(&local_group);
|
||||
delete[] owner_rank;
|
||||
}
|
||||
void Patch::checkBlock()
|
||||
{
|
||||
|
||||
@@ -2,7 +2,11 @@
|
||||
#ifndef PATCH_H
|
||||
#define PATCH_H
|
||||
|
||||
#ifdef MPI_STUB
|
||||
#include "mpi_stub.h"
|
||||
#else
|
||||
#include <mpi.h>
|
||||
#endif
|
||||
#include "MyList.h"
|
||||
#include "Block.h"
|
||||
#include "var.h"
|
||||
@@ -39,6 +43,10 @@ public:
|
||||
|
||||
bool Find_Point(double *XX);
|
||||
|
||||
void Interp_Points(MyList<var> *VarList,
|
||||
int NN, double **XX,
|
||||
double *Shellf, int Symmetry,
|
||||
int Nmin_consumer, int Nmax_consumer);
|
||||
void Interp_Points(MyList<var> *VarList,
|
||||
int NN, double **XX,
|
||||
double *Shellf, int Symmetry, MPI_Comm Comm_here);
|
||||
|
||||
@@ -8,7 +8,11 @@
|
||||
#include <limits.h>
|
||||
#include <float.h>
|
||||
#include <math.h>
|
||||
#ifdef MPI_STUB
|
||||
#include "mpi_stub.h"
|
||||
#else
|
||||
#include <mpi.h>
|
||||
#endif
|
||||
|
||||
#include "util_Table.h"
|
||||
#include "cctk.h"
|
||||
|
||||
@@ -23,7 +23,11 @@ using namespace std;
|
||||
#include <complex.h>
|
||||
#endif
|
||||
|
||||
#ifdef MPI_STUB
|
||||
#include "mpi_stub.h"
|
||||
#else
|
||||
#include <mpi.h>
|
||||
#endif
|
||||
#include "MyList.h"
|
||||
#include "Block.h"
|
||||
#include "Parallel.h"
|
||||
|
||||
@@ -23,7 +23,11 @@ using namespace std;
|
||||
#include <complex.h>
|
||||
#endif
|
||||
|
||||
#ifdef MPI_STUB
|
||||
#include "mpi_stub.h"
|
||||
#else
|
||||
#include <mpi.h>
|
||||
#endif
|
||||
#include "MyList.h"
|
||||
#include "Block.h"
|
||||
#include "Parallel.h"
|
||||
|
||||
@@ -3756,6 +3756,484 @@ void Parallel::Sync(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry)
|
||||
delete[] transfer_src;
|
||||
delete[] transfer_dst;
|
||||
}
|
||||
// Merged Sync: collect all intra-patch and inter-patch grid segment lists,
|
||||
// then issue a single transfer() call instead of N+1 separate ones.
|
||||
void Parallel::Sync_merged(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry)
|
||||
{
|
||||
int cpusize;
|
||||
MPI_Comm_size(MPI_COMM_WORLD, &cpusize);
|
||||
|
||||
MyList<Parallel::gridseg> **combined_src = new MyList<Parallel::gridseg> *[cpusize];
|
||||
MyList<Parallel::gridseg> **combined_dst = new MyList<Parallel::gridseg> *[cpusize];
|
||||
for (int node = 0; node < cpusize; node++)
|
||||
combined_src[node] = combined_dst[node] = 0;
|
||||
|
||||
// Phase A: Intra-patch ghost exchange segments
|
||||
MyList<Patch> *Pp = PatL;
|
||||
while (Pp)
|
||||
{
|
||||
Patch *Pat = Pp->data;
|
||||
MyList<Parallel::gridseg> *dst_ghost = build_ghost_gsl(Pat);
|
||||
|
||||
for (int node = 0; node < cpusize; node++)
|
||||
{
|
||||
MyList<Parallel::gridseg> *src_owned = build_owned_gsl0(Pat, node);
|
||||
MyList<Parallel::gridseg> *tsrc = 0, *tdst = 0;
|
||||
build_gstl(src_owned, dst_ghost, &tsrc, &tdst);
|
||||
|
||||
if (tsrc)
|
||||
{
|
||||
if (combined_src[node])
|
||||
combined_src[node]->catList(tsrc);
|
||||
else
|
||||
combined_src[node] = tsrc;
|
||||
}
|
||||
if (tdst)
|
||||
{
|
||||
if (combined_dst[node])
|
||||
combined_dst[node]->catList(tdst);
|
||||
else
|
||||
combined_dst[node] = tdst;
|
||||
}
|
||||
|
||||
if (src_owned)
|
||||
src_owned->destroyList();
|
||||
}
|
||||
|
||||
if (dst_ghost)
|
||||
dst_ghost->destroyList();
|
||||
|
||||
Pp = Pp->next;
|
||||
}
|
||||
|
||||
// Phase B: Inter-patch buffer exchange segments
|
||||
MyList<Parallel::gridseg> *dst_buffer = build_buffer_gsl(PatL);
|
||||
for (int node = 0; node < cpusize; node++)
|
||||
{
|
||||
MyList<Parallel::gridseg> *src_owned = build_owned_gsl(PatL, node, 5, Symmetry);
|
||||
MyList<Parallel::gridseg> *tsrc = 0, *tdst = 0;
|
||||
build_gstl(src_owned, dst_buffer, &tsrc, &tdst);
|
||||
|
||||
if (tsrc)
|
||||
{
|
||||
if (combined_src[node])
|
||||
combined_src[node]->catList(tsrc);
|
||||
else
|
||||
combined_src[node] = tsrc;
|
||||
}
|
||||
if (tdst)
|
||||
{
|
||||
if (combined_dst[node])
|
||||
combined_dst[node]->catList(tdst);
|
||||
else
|
||||
combined_dst[node] = tdst;
|
||||
}
|
||||
|
||||
if (src_owned)
|
||||
src_owned->destroyList();
|
||||
}
|
||||
if (dst_buffer)
|
||||
dst_buffer->destroyList();
|
||||
|
||||
// Phase C: Single transfer
|
||||
transfer(combined_src, combined_dst, VarList, VarList, Symmetry);
|
||||
|
||||
// Phase D: Cleanup
|
||||
for (int node = 0; node < cpusize; node++)
|
||||
{
|
||||
if (combined_src[node])
|
||||
combined_src[node]->destroyList();
|
||||
if (combined_dst[node])
|
||||
combined_dst[node]->destroyList();
|
||||
}
|
||||
delete[] combined_src;
|
||||
delete[] combined_dst;
|
||||
}
|
||||
// SyncCache constructor
|
||||
Parallel::SyncCache::SyncCache()
|
||||
: valid(false), cpusize(0), combined_src(0), combined_dst(0),
|
||||
send_lengths(0), recv_lengths(0), send_bufs(0), recv_bufs(0),
|
||||
send_buf_caps(0), recv_buf_caps(0), reqs(0), stats(0), max_reqs(0)
|
||||
{
|
||||
}
|
||||
// SyncCache invalidate: free grid segment lists but keep buffers
|
||||
void Parallel::SyncCache::invalidate()
|
||||
{
|
||||
if (!valid)
|
||||
return;
|
||||
for (int i = 0; i < cpusize; i++)
|
||||
{
|
||||
if (combined_src[i])
|
||||
combined_src[i]->destroyList();
|
||||
if (combined_dst[i])
|
||||
combined_dst[i]->destroyList();
|
||||
combined_src[i] = combined_dst[i] = 0;
|
||||
send_lengths[i] = recv_lengths[i] = 0;
|
||||
}
|
||||
valid = false;
|
||||
}
|
||||
// SyncCache destroy: free everything
|
||||
void Parallel::SyncCache::destroy()
|
||||
{
|
||||
invalidate();
|
||||
if (combined_src) delete[] combined_src;
|
||||
if (combined_dst) delete[] combined_dst;
|
||||
if (send_lengths) delete[] send_lengths;
|
||||
if (recv_lengths) delete[] recv_lengths;
|
||||
if (send_buf_caps) delete[] send_buf_caps;
|
||||
if (recv_buf_caps) delete[] recv_buf_caps;
|
||||
for (int i = 0; i < cpusize; i++)
|
||||
{
|
||||
if (send_bufs && send_bufs[i]) delete[] send_bufs[i];
|
||||
if (recv_bufs && recv_bufs[i]) delete[] recv_bufs[i];
|
||||
}
|
||||
if (send_bufs) delete[] send_bufs;
|
||||
if (recv_bufs) delete[] recv_bufs;
|
||||
if (reqs) delete[] reqs;
|
||||
if (stats) delete[] stats;
|
||||
combined_src = combined_dst = 0;
|
||||
send_lengths = recv_lengths = 0;
|
||||
send_buf_caps = recv_buf_caps = 0;
|
||||
send_bufs = recv_bufs = 0;
|
||||
reqs = 0; stats = 0;
|
||||
cpusize = 0; max_reqs = 0;
|
||||
}
|
||||
// transfer_cached: reuse pre-allocated buffers from SyncCache
|
||||
void Parallel::transfer_cached(MyList<Parallel::gridseg> **src, MyList<Parallel::gridseg> **dst,
|
||||
MyList<var> *VarList1, MyList<var> *VarList2,
|
||||
int Symmetry, SyncCache &cache)
|
||||
{
|
||||
int myrank;
|
||||
MPI_Comm_size(MPI_COMM_WORLD, &cache.cpusize);
|
||||
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
|
||||
int cpusize = cache.cpusize;
|
||||
|
||||
int req_no = 0;
|
||||
int node;
|
||||
|
||||
for (node = 0; node < cpusize; node++)
|
||||
{
|
||||
if (node == myrank)
|
||||
{
|
||||
int length = data_packer(0, src[myrank], dst[myrank], node, PACK, VarList1, VarList2, Symmetry);
|
||||
cache.recv_lengths[node] = length;
|
||||
if (length > 0)
|
||||
{
|
||||
if (length > cache.recv_buf_caps[node])
|
||||
{
|
||||
if (cache.recv_bufs[node]) delete[] cache.recv_bufs[node];
|
||||
cache.recv_bufs[node] = new double[length];
|
||||
cache.recv_buf_caps[node] = length;
|
||||
}
|
||||
data_packer(cache.recv_bufs[node], src[myrank], dst[myrank], node, PACK, VarList1, VarList2, Symmetry);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// send
|
||||
int slength = data_packer(0, src[myrank], dst[myrank], node, PACK, VarList1, VarList2, Symmetry);
|
||||
cache.send_lengths[node] = slength;
|
||||
if (slength > 0)
|
||||
{
|
||||
if (slength > cache.send_buf_caps[node])
|
||||
{
|
||||
if (cache.send_bufs[node]) delete[] cache.send_bufs[node];
|
||||
cache.send_bufs[node] = new double[slength];
|
||||
cache.send_buf_caps[node] = slength;
|
||||
}
|
||||
data_packer(cache.send_bufs[node], src[myrank], dst[myrank], node, PACK, VarList1, VarList2, Symmetry);
|
||||
MPI_Isend((void *)cache.send_bufs[node], slength, MPI_DOUBLE, node, 1, MPI_COMM_WORLD, cache.reqs + req_no++);
|
||||
}
|
||||
// recv
|
||||
int rlength = data_packer(0, src[node], dst[node], node, UNPACK, VarList1, VarList2, Symmetry);
|
||||
cache.recv_lengths[node] = rlength;
|
||||
if (rlength > 0)
|
||||
{
|
||||
if (rlength > cache.recv_buf_caps[node])
|
||||
{
|
||||
if (cache.recv_bufs[node]) delete[] cache.recv_bufs[node];
|
||||
cache.recv_bufs[node] = new double[rlength];
|
||||
cache.recv_buf_caps[node] = rlength;
|
||||
}
|
||||
MPI_Irecv((void *)cache.recv_bufs[node], rlength, MPI_DOUBLE, node, 1, MPI_COMM_WORLD, cache.reqs + req_no++);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
MPI_Waitall(req_no, cache.reqs, cache.stats);
|
||||
|
||||
for (node = 0; node < cpusize; node++)
|
||||
if (cache.recv_bufs[node] && cache.recv_lengths[node] > 0)
|
||||
data_packer(cache.recv_bufs[node], src[node], dst[node], node, UNPACK, VarList1, VarList2, Symmetry);
|
||||
}
|
||||
// Sync_cached: build grid segment lists on first call, reuse on subsequent calls
|
||||
void Parallel::Sync_cached(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry, SyncCache &cache)
|
||||
{
|
||||
if (!cache.valid)
|
||||
{
|
||||
int cpusize;
|
||||
MPI_Comm_size(MPI_COMM_WORLD, &cpusize);
|
||||
cache.cpusize = cpusize;
|
||||
|
||||
// Allocate cache arrays if needed
|
||||
if (!cache.combined_src)
|
||||
{
|
||||
cache.combined_src = new MyList<Parallel::gridseg> *[cpusize];
|
||||
cache.combined_dst = new MyList<Parallel::gridseg> *[cpusize];
|
||||
cache.send_lengths = new int[cpusize];
|
||||
cache.recv_lengths = new int[cpusize];
|
||||
cache.send_bufs = new double *[cpusize];
|
||||
cache.recv_bufs = new double *[cpusize];
|
||||
cache.send_buf_caps = new int[cpusize];
|
||||
cache.recv_buf_caps = new int[cpusize];
|
||||
for (int i = 0; i < cpusize; i++)
|
||||
{
|
||||
cache.send_bufs[i] = cache.recv_bufs[i] = 0;
|
||||
cache.send_buf_caps[i] = cache.recv_buf_caps[i] = 0;
|
||||
}
|
||||
cache.max_reqs = 2 * cpusize;
|
||||
cache.reqs = new MPI_Request[cache.max_reqs];
|
||||
cache.stats = new MPI_Status[cache.max_reqs];
|
||||
}
|
||||
|
||||
for (int node = 0; node < cpusize; node++)
|
||||
{
|
||||
cache.combined_src[node] = cache.combined_dst[node] = 0;
|
||||
cache.send_lengths[node] = cache.recv_lengths[node] = 0;
|
||||
}
|
||||
|
||||
// Build intra-patch segments (same as Sync_merged Phase A)
|
||||
MyList<Patch> *Pp = PatL;
|
||||
while (Pp)
|
||||
{
|
||||
Patch *Pat = Pp->data;
|
||||
MyList<Parallel::gridseg> *dst_ghost = build_ghost_gsl(Pat);
|
||||
for (int node = 0; node < cpusize; node++)
|
||||
{
|
||||
MyList<Parallel::gridseg> *src_owned = build_owned_gsl0(Pat, node);
|
||||
MyList<Parallel::gridseg> *tsrc = 0, *tdst = 0;
|
||||
build_gstl(src_owned, dst_ghost, &tsrc, &tdst);
|
||||
if (tsrc)
|
||||
{
|
||||
if (cache.combined_src[node])
|
||||
cache.combined_src[node]->catList(tsrc);
|
||||
else
|
||||
cache.combined_src[node] = tsrc;
|
||||
}
|
||||
if (tdst)
|
||||
{
|
||||
if (cache.combined_dst[node])
|
||||
cache.combined_dst[node]->catList(tdst);
|
||||
else
|
||||
cache.combined_dst[node] = tdst;
|
||||
}
|
||||
if (src_owned) src_owned->destroyList();
|
||||
}
|
||||
if (dst_ghost) dst_ghost->destroyList();
|
||||
Pp = Pp->next;
|
||||
}
|
||||
|
||||
// Build inter-patch segments (same as Sync_merged Phase B)
|
||||
MyList<Parallel::gridseg> *dst_buffer = build_buffer_gsl(PatL);
|
||||
for (int node = 0; node < cpusize; node++)
|
||||
{
|
||||
MyList<Parallel::gridseg> *src_owned = build_owned_gsl(PatL, node, 5, Symmetry);
|
||||
MyList<Parallel::gridseg> *tsrc = 0, *tdst = 0;
|
||||
build_gstl(src_owned, dst_buffer, &tsrc, &tdst);
|
||||
if (tsrc)
|
||||
{
|
||||
if (cache.combined_src[node])
|
||||
cache.combined_src[node]->catList(tsrc);
|
||||
else
|
||||
cache.combined_src[node] = tsrc;
|
||||
}
|
||||
if (tdst)
|
||||
{
|
||||
if (cache.combined_dst[node])
|
||||
cache.combined_dst[node]->catList(tdst);
|
||||
else
|
||||
cache.combined_dst[node] = tdst;
|
||||
}
|
||||
if (src_owned) src_owned->destroyList();
|
||||
}
|
||||
if (dst_buffer) dst_buffer->destroyList();
|
||||
|
||||
cache.valid = true;
|
||||
}
|
||||
|
||||
// Use cached lists with buffer-reusing transfer
|
||||
transfer_cached(cache.combined_src, cache.combined_dst, VarList, VarList, Symmetry, cache);
|
||||
}
|
||||
// Sync_start: pack and post MPI_Isend/Irecv, return immediately
|
||||
void Parallel::Sync_start(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry,
|
||||
SyncCache &cache, AsyncSyncState &state)
|
||||
{
|
||||
// Ensure cache is built
|
||||
if (!cache.valid)
|
||||
{
|
||||
// Build cache (same logic as Sync_cached)
|
||||
int cpusize;
|
||||
MPI_Comm_size(MPI_COMM_WORLD, &cpusize);
|
||||
cache.cpusize = cpusize;
|
||||
|
||||
if (!cache.combined_src)
|
||||
{
|
||||
cache.combined_src = new MyList<Parallel::gridseg> *[cpusize];
|
||||
cache.combined_dst = new MyList<Parallel::gridseg> *[cpusize];
|
||||
cache.send_lengths = new int[cpusize];
|
||||
cache.recv_lengths = new int[cpusize];
|
||||
cache.send_bufs = new double *[cpusize];
|
||||
cache.recv_bufs = new double *[cpusize];
|
||||
cache.send_buf_caps = new int[cpusize];
|
||||
cache.recv_buf_caps = new int[cpusize];
|
||||
for (int i = 0; i < cpusize; i++)
|
||||
{
|
||||
cache.send_bufs[i] = cache.recv_bufs[i] = 0;
|
||||
cache.send_buf_caps[i] = cache.recv_buf_caps[i] = 0;
|
||||
}
|
||||
cache.max_reqs = 2 * cpusize;
|
||||
cache.reqs = new MPI_Request[cache.max_reqs];
|
||||
cache.stats = new MPI_Status[cache.max_reqs];
|
||||
}
|
||||
|
||||
for (int node = 0; node < cpusize; node++)
|
||||
{
|
||||
cache.combined_src[node] = cache.combined_dst[node] = 0;
|
||||
cache.send_lengths[node] = cache.recv_lengths[node] = 0;
|
||||
}
|
||||
|
||||
MyList<Patch> *Pp = PatL;
|
||||
while (Pp)
|
||||
{
|
||||
Patch *Pat = Pp->data;
|
||||
MyList<Parallel::gridseg> *dst_ghost = build_ghost_gsl(Pat);
|
||||
for (int node = 0; node < cpusize; node++)
|
||||
{
|
||||
MyList<Parallel::gridseg> *src_owned = build_owned_gsl0(Pat, node);
|
||||
MyList<Parallel::gridseg> *tsrc = 0, *tdst = 0;
|
||||
build_gstl(src_owned, dst_ghost, &tsrc, &tdst);
|
||||
if (tsrc)
|
||||
{
|
||||
if (cache.combined_src[node])
|
||||
cache.combined_src[node]->catList(tsrc);
|
||||
else
|
||||
cache.combined_src[node] = tsrc;
|
||||
}
|
||||
if (tdst)
|
||||
{
|
||||
if (cache.combined_dst[node])
|
||||
cache.combined_dst[node]->catList(tdst);
|
||||
else
|
||||
cache.combined_dst[node] = tdst;
|
||||
}
|
||||
if (src_owned) src_owned->destroyList();
|
||||
}
|
||||
if (dst_ghost) dst_ghost->destroyList();
|
||||
Pp = Pp->next;
|
||||
}
|
||||
|
||||
MyList<Parallel::gridseg> *dst_buffer = build_buffer_gsl(PatL);
|
||||
for (int node = 0; node < cpusize; node++)
|
||||
{
|
||||
MyList<Parallel::gridseg> *src_owned = build_owned_gsl(PatL, node, 5, Symmetry);
|
||||
MyList<Parallel::gridseg> *tsrc = 0, *tdst = 0;
|
||||
build_gstl(src_owned, dst_buffer, &tsrc, &tdst);
|
||||
if (tsrc)
|
||||
{
|
||||
if (cache.combined_src[node])
|
||||
cache.combined_src[node]->catList(tsrc);
|
||||
else
|
||||
cache.combined_src[node] = tsrc;
|
||||
}
|
||||
if (tdst)
|
||||
{
|
||||
if (cache.combined_dst[node])
|
||||
cache.combined_dst[node]->catList(tdst);
|
||||
else
|
||||
cache.combined_dst[node] = tdst;
|
||||
}
|
||||
if (src_owned) src_owned->destroyList();
|
||||
}
|
||||
if (dst_buffer) dst_buffer->destroyList();
|
||||
cache.valid = true;
|
||||
}
|
||||
|
||||
// Now pack and post async MPI operations
|
||||
int myrank;
|
||||
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
|
||||
int cpusize = cache.cpusize;
|
||||
state.req_no = 0;
|
||||
state.active = true;
|
||||
|
||||
MyList<Parallel::gridseg> **src = cache.combined_src;
|
||||
MyList<Parallel::gridseg> **dst = cache.combined_dst;
|
||||
|
||||
for (int node = 0; node < cpusize; node++)
|
||||
{
|
||||
if (node == myrank)
|
||||
{
|
||||
int length = data_packer(0, src[myrank], dst[myrank], node, PACK, VarList, VarList, Symmetry);
|
||||
cache.recv_lengths[node] = length;
|
||||
if (length > 0)
|
||||
{
|
||||
if (length > cache.recv_buf_caps[node])
|
||||
{
|
||||
if (cache.recv_bufs[node]) delete[] cache.recv_bufs[node];
|
||||
cache.recv_bufs[node] = new double[length];
|
||||
cache.recv_buf_caps[node] = length;
|
||||
}
|
||||
data_packer(cache.recv_bufs[node], src[myrank], dst[myrank], node, PACK, VarList, VarList, Symmetry);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
int slength = data_packer(0, src[myrank], dst[myrank], node, PACK, VarList, VarList, Symmetry);
|
||||
cache.send_lengths[node] = slength;
|
||||
if (slength > 0)
|
||||
{
|
||||
if (slength > cache.send_buf_caps[node])
|
||||
{
|
||||
if (cache.send_bufs[node]) delete[] cache.send_bufs[node];
|
||||
cache.send_bufs[node] = new double[slength];
|
||||
cache.send_buf_caps[node] = slength;
|
||||
}
|
||||
data_packer(cache.send_bufs[node], src[myrank], dst[myrank], node, PACK, VarList, VarList, Symmetry);
|
||||
MPI_Isend((void *)cache.send_bufs[node], slength, MPI_DOUBLE, node, 2, MPI_COMM_WORLD, cache.reqs + state.req_no++);
|
||||
}
|
||||
int rlength = data_packer(0, src[node], dst[node], node, UNPACK, VarList, VarList, Symmetry);
|
||||
cache.recv_lengths[node] = rlength;
|
||||
if (rlength > 0)
|
||||
{
|
||||
if (rlength > cache.recv_buf_caps[node])
|
||||
{
|
||||
if (cache.recv_bufs[node]) delete[] cache.recv_bufs[node];
|
||||
cache.recv_bufs[node] = new double[rlength];
|
||||
cache.recv_buf_caps[node] = rlength;
|
||||
}
|
||||
MPI_Irecv((void *)cache.recv_bufs[node], rlength, MPI_DOUBLE, node, 2, MPI_COMM_WORLD, cache.reqs + state.req_no++);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Sync_finish: wait for async MPI operations and unpack
|
||||
void Parallel::Sync_finish(SyncCache &cache, AsyncSyncState &state,
|
||||
MyList<var> *VarList, int Symmetry)
|
||||
{
|
||||
if (!state.active)
|
||||
return;
|
||||
|
||||
MPI_Waitall(state.req_no, cache.reqs, cache.stats);
|
||||
|
||||
int cpusize = cache.cpusize;
|
||||
MyList<Parallel::gridseg> **src = cache.combined_src;
|
||||
MyList<Parallel::gridseg> **dst = cache.combined_dst;
|
||||
|
||||
for (int node = 0; node < cpusize; node++)
|
||||
if (cache.recv_bufs[node] && cache.recv_lengths[node] > 0)
|
||||
data_packer(cache.recv_bufs[node], src[node], dst[node], node, UNPACK, VarList, VarList, Symmetry);
|
||||
|
||||
state.active = false;
|
||||
}
|
||||
// collect buffer grid segments or blocks for the periodic boundary condition of given patch
|
||||
// ---------------------------------------------------
|
||||
// |con | |con |
|
||||
|
||||
@@ -81,6 +81,42 @@ namespace Parallel
|
||||
int Symmetry);
|
||||
void Sync(Patch *Pat, MyList<var> *VarList, int Symmetry);
|
||||
void Sync(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry);
|
||||
void Sync_merged(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry);
|
||||
|
||||
struct SyncCache {
|
||||
bool valid;
|
||||
int cpusize;
|
||||
MyList<gridseg> **combined_src;
|
||||
MyList<gridseg> **combined_dst;
|
||||
int *send_lengths;
|
||||
int *recv_lengths;
|
||||
double **send_bufs;
|
||||
double **recv_bufs;
|
||||
int *send_buf_caps;
|
||||
int *recv_buf_caps;
|
||||
MPI_Request *reqs;
|
||||
MPI_Status *stats;
|
||||
int max_reqs;
|
||||
SyncCache();
|
||||
void invalidate();
|
||||
void destroy();
|
||||
};
|
||||
|
||||
void Sync_cached(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry, SyncCache &cache);
|
||||
void transfer_cached(MyList<gridseg> **src, MyList<gridseg> **dst,
|
||||
MyList<var> *VarList1, MyList<var> *VarList2,
|
||||
int Symmetry, SyncCache &cache);
|
||||
|
||||
struct AsyncSyncState {
|
||||
int req_no;
|
||||
bool active;
|
||||
AsyncSyncState() : req_no(0), active(false) {}
|
||||
};
|
||||
|
||||
void Sync_start(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry,
|
||||
SyncCache &cache, AsyncSyncState &state);
|
||||
void Sync_finish(SyncCache &cache, AsyncSyncState &state,
|
||||
MyList<var> *VarList, int Symmetry);
|
||||
void OutBdLow2Hi(Patch *Patc, Patch *Patf,
|
||||
MyList<var> *VarList1 /* source */, MyList<var> *VarList2 /* target */,
|
||||
int Symmetry);
|
||||
|
||||
@@ -2,7 +2,11 @@
|
||||
#ifndef SHELLPATCH_H
|
||||
#define SHELLPATCH_H
|
||||
|
||||
#ifdef MPI_STUB
|
||||
#include "mpi_stub.h"
|
||||
#else
|
||||
#include <mpi.h>
|
||||
#endif
|
||||
#include "MyList.h"
|
||||
#include "Block.h"
|
||||
#include "Parallel.h"
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,7 +1,8 @@
|
||||
|
||||
#ifndef TWO_PUNCTURES_H
|
||||
#define TWO_PUNCTURES_H
|
||||
|
||||
#include <omp.h>
|
||||
|
||||
#define StencilSize 19
|
||||
#define N_PlaneRelax 1
|
||||
#define NRELAX 200
|
||||
@@ -32,7 +33,7 @@ private:
|
||||
int npoints_A, npoints_B, npoints_phi;
|
||||
|
||||
double target_M_plus, target_M_minus;
|
||||
|
||||
|
||||
double admMass;
|
||||
|
||||
double adm_tol;
|
||||
@@ -42,32 +43,17 @@ private:
|
||||
|
||||
int ntotal;
|
||||
|
||||
// Pre-allocated workspace buffers for hot-path allocation elimination
|
||||
// LineRelax_be workspace (sized for n2)
|
||||
double *ws_diag_be, *ws_e_be, *ws_f_be, *ws_b_be, *ws_x_be;
|
||||
// LineRelax_al workspace (sized for n1)
|
||||
double *ws_diag_al, *ws_e_al, *ws_f_al, *ws_b_al, *ws_x_al;
|
||||
// ThomasAlgorithm workspace (sized for max(n1,n2))
|
||||
double *ws_thomas_y;
|
||||
// JFD_times_dv workspace (sized for nvar)
|
||||
double *ws_jfd_values;
|
||||
derivs ws_jfd_dU, ws_jfd_U;
|
||||
// chebft_Zeros workspace (sized for max(n1,n2,n3)+1)
|
||||
double *ws_cheb_c;
|
||||
// fourft workspace (sized for max(n1,n2,n3)/2+1 each)
|
||||
double *ws_four_a, *ws_four_b;
|
||||
// Derivatives_AB3 workspace
|
||||
double *ws_deriv_p, *ws_deriv_dp, *ws_deriv_d2p;
|
||||
double *ws_deriv_q, *ws_deriv_dq;
|
||||
double *ws_deriv_r, *ws_deriv_dr;
|
||||
int *ws_deriv_indx;
|
||||
// F_of_v workspace
|
||||
double *ws_fov_sources;
|
||||
double *ws_fov_values;
|
||||
derivs ws_fov_U;
|
||||
// J_times_dv workspace
|
||||
double *ws_jtdv_values;
|
||||
derivs ws_jtdv_dU, ws_jtdv_U;
|
||||
// ===== Precomputed spectral derivative matrices =====
|
||||
double *D1_A, *D2_A;
|
||||
double *D1_B, *D2_B;
|
||||
double *DF1_phi, *DF2_phi;
|
||||
|
||||
// ===== Pre-allocated workspace for LineRelax (per-thread) =====
|
||||
int max_threads;
|
||||
double **ws_diag_be, **ws_e_be, **ws_f_be, **ws_b_be, **ws_x_be;
|
||||
double **ws_l_be, **ws_u_be, **ws_d_be, **ws_y_be;
|
||||
double **ws_diag_al, **ws_e_al, **ws_f_al, **ws_b_al, **ws_x_al;
|
||||
double **ws_l_al, **ws_u_al, **ws_d_al, **ws_y_al;
|
||||
|
||||
struct parameters
|
||||
{
|
||||
@@ -85,6 +71,28 @@ public:
|
||||
int Newtonmaxit);
|
||||
~TwoPunctures();
|
||||
|
||||
// 02/07: New/modified methods
|
||||
void allocate_workspace();
|
||||
void free_workspace();
|
||||
void precompute_derivative_matrices();
|
||||
void build_cheb_deriv_matrices(int n, double *D1, double *D2);
|
||||
void build_fourier_deriv_matrices(int N, double *DF1, double *DF2);
|
||||
void Derivatives_AB3_MatMul(int nvar, int n1, int n2, int n3, derivs v);
|
||||
void ThomasAlgorithm_ws(int N, double *b, double *a, double *c, double *x, double *q,
|
||||
double *l, double *u_ws, double *d, double *y);
|
||||
void LineRelax_be_omp(double *dv,
|
||||
int const i, int const k, int const nvar,
|
||||
int const n1, int const n2, int const n3,
|
||||
double const *rhs, int const *ncols, int **cols,
|
||||
double **JFD, int tid);
|
||||
void LineRelax_al_omp(double *dv,
|
||||
int const j, int const k, int const nvar,
|
||||
int const n1, int const n2, int const n3,
|
||||
double const *rhs, int const *ncols,
|
||||
int **cols, double **JFD, int tid);
|
||||
void relax_omp(double *dv, int const nvar, int const n1, int const n2, int const n3,
|
||||
double const *rhs, int const *ncols, int **cols, double **JFD);
|
||||
|
||||
void Solve();
|
||||
void set_initial_guess(derivs v);
|
||||
int index(int i, int j, int k, int l, int a, int b, int c, int d);
|
||||
@@ -143,23 +151,11 @@ public:
|
||||
double BY_KKofxyz(double x, double y, double z);
|
||||
void SetMatrix_JFD(int nvar, int n1, int n2, int n3, derivs u, int *ncols, int **cols, double **Matrix);
|
||||
void J_times_dv(int nvar, int n1, int n2, int n3, derivs dv, double *Jdv, derivs u);
|
||||
void relax(double *dv, int const nvar, int const n1, int const n2, int const n3,
|
||||
double const *rhs, int const *ncols, int **cols, double **JFD);
|
||||
void LineRelax_be(double *dv,
|
||||
int const i, int const k, int const nvar,
|
||||
int const n1, int const n2, int const n3,
|
||||
double const *rhs, int const *ncols, int **cols,
|
||||
double **JFD);
|
||||
void JFD_times_dv(int i, int j, int k, int nvar, int n1, int n2,
|
||||
int n3, derivs dv, derivs u, double *values);
|
||||
void LinEquations(double A, double B, double X, double R,
|
||||
double x, double r, double phi,
|
||||
double y, double z, derivs dU, derivs U, double *values);
|
||||
void LineRelax_al(double *dv,
|
||||
int const j, int const k, int const nvar,
|
||||
int const n1, int const n2, int const n3,
|
||||
double const *rhs, int const *ncols,
|
||||
int **cols, double **JFD);
|
||||
void ThomasAlgorithm(int N, double *b, double *a, double *c, double *x, double *q);
|
||||
void Save(char *fname);
|
||||
// provided by Vasileios Paschalidis (vpaschal@illinois.edu)
|
||||
@@ -168,4 +164,4 @@ public:
|
||||
void SpecCoef(parameters par, int ivar, double *v, double *cf);
|
||||
};
|
||||
|
||||
#endif /* TWO_PUNCTURES_H */
|
||||
#endif /* TWO_PUNCTURES_H */
|
||||
@@ -19,7 +19,11 @@ using namespace std;
|
||||
#include <math.h>
|
||||
#endif
|
||||
|
||||
#ifdef MPI_STUB
|
||||
#include "mpi_stub.h"
|
||||
#else
|
||||
#include <mpi.h>
|
||||
#endif
|
||||
|
||||
#include "cgh.h"
|
||||
#include "ShellPatch.h"
|
||||
|
||||
@@ -19,7 +19,11 @@ using namespace std;
|
||||
#include <math.h>
|
||||
#endif
|
||||
|
||||
#ifdef MPI_STUB
|
||||
#include "mpi_stub.h"
|
||||
#else
|
||||
#include <mpi.h>
|
||||
#endif
|
||||
|
||||
#include "cgh.h"
|
||||
#include "ShellPatch.h"
|
||||
|
||||
@@ -19,7 +19,11 @@ using namespace std;
|
||||
#include <math.h>
|
||||
#endif
|
||||
|
||||
#ifdef MPI_STUB
|
||||
#include "mpi_stub.h"
|
||||
#else
|
||||
#include <mpi.h>
|
||||
#endif
|
||||
|
||||
#include "cgh.h"
|
||||
#include "ShellPatch.h"
|
||||
|
||||
@@ -730,6 +730,12 @@ void bssn_class::Initialize()
|
||||
PhysTime = StartTime;
|
||||
Setup_Black_Hole_position();
|
||||
}
|
||||
|
||||
// Initialize sync caches (per-level, for predictor and corrector)
|
||||
sync_cache_pre = new Parallel::SyncCache[GH->levels];
|
||||
sync_cache_cor = new Parallel::SyncCache[GH->levels];
|
||||
sync_cache_rp_coarse = new Parallel::SyncCache[GH->levels];
|
||||
sync_cache_rp_fine = new Parallel::SyncCache[GH->levels];
|
||||
}
|
||||
|
||||
//================================================================================================
|
||||
@@ -981,6 +987,32 @@ bssn_class::~bssn_class()
|
||||
delete Azzz;
|
||||
#endif
|
||||
|
||||
// Destroy sync caches before GH
|
||||
if (sync_cache_pre)
|
||||
{
|
||||
for (int i = 0; i < GH->levels; i++)
|
||||
sync_cache_pre[i].destroy();
|
||||
delete[] sync_cache_pre;
|
||||
}
|
||||
if (sync_cache_cor)
|
||||
{
|
||||
for (int i = 0; i < GH->levels; i++)
|
||||
sync_cache_cor[i].destroy();
|
||||
delete[] sync_cache_cor;
|
||||
}
|
||||
if (sync_cache_rp_coarse)
|
||||
{
|
||||
for (int i = 0; i < GH->levels; i++)
|
||||
sync_cache_rp_coarse[i].destroy();
|
||||
delete[] sync_cache_rp_coarse;
|
||||
}
|
||||
if (sync_cache_rp_fine)
|
||||
{
|
||||
for (int i = 0; i < GH->levels; i++)
|
||||
sync_cache_rp_fine[i].destroy();
|
||||
delete[] sync_cache_rp_fine;
|
||||
}
|
||||
|
||||
delete GH;
|
||||
#ifdef WithShell
|
||||
delete SH;
|
||||
@@ -2181,6 +2213,7 @@ void bssn_class::Evolve(int Steps)
|
||||
GH->Regrid(Symmetry, BH_num, Porgbr, Porg0,
|
||||
SynchList_cor, OldStateList, StateList, SynchList_pre,
|
||||
fgt(PhysTime - dT_mon, StartTime, dT_mon / 2), ErrorMonitor);
|
||||
for (int il = 0; il < GH->levels; il++) { sync_cache_pre[il].invalidate(); sync_cache_cor[il].invalidate(); sync_cache_rp_coarse[il].invalidate(); sync_cache_rp_fine[il].invalidate(); }
|
||||
#endif
|
||||
|
||||
#if (REGLEV == 0 && (PSTR == 1 || PSTR == 2))
|
||||
@@ -2396,6 +2429,7 @@ void bssn_class::RecursiveStep(int lev)
|
||||
GH->Regrid_Onelevel(lev, Symmetry, BH_num, Porgbr, Porg0,
|
||||
SynchList_cor, OldStateList, StateList, SynchList_pre,
|
||||
fgt(PhysTime - dT_lev, StartTime, dT_lev / 2), ErrorMonitor);
|
||||
for (int il = 0; il < GH->levels; il++) { sync_cache_pre[il].invalidate(); sync_cache_cor[il].invalidate(); sync_cache_rp_coarse[il].invalidate(); sync_cache_rp_fine[il].invalidate(); }
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -2574,6 +2608,7 @@ void bssn_class::ParallelStep()
|
||||
GH->Regrid_Onelevel(GH->mylev, Symmetry, BH_num, Porgbr, Porg0,
|
||||
SynchList_cor, OldStateList, StateList, SynchList_pre,
|
||||
fgt(PhysTime - dT_lev, StartTime, dT_lev / 2), ErrorMonitor);
|
||||
for (int il = 0; il < GH->levels; il++) { sync_cache_pre[il].invalidate(); sync_cache_cor[il].invalidate(); sync_cache_rp_coarse[il].invalidate(); sync_cache_rp_fine[il].invalidate(); }
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -2740,6 +2775,7 @@ void bssn_class::ParallelStep()
|
||||
GH->Regrid_Onelevel(lev + 1, Symmetry, BH_num, Porgbr, Porg0,
|
||||
SynchList_cor, OldStateList, StateList, SynchList_pre,
|
||||
fgt(PhysTime - dT_levp1, StartTime, dT_levp1 / 2), ErrorMonitor);
|
||||
for (int il = 0; il < GH->levels; il++) { sync_cache_pre[il].invalidate(); sync_cache_cor[il].invalidate(); sync_cache_rp_coarse[il].invalidate(); sync_cache_rp_fine[il].invalidate(); }
|
||||
|
||||
// a_stream.clear();
|
||||
// a_stream.str("");
|
||||
@@ -2754,6 +2790,7 @@ void bssn_class::ParallelStep()
|
||||
GH->Regrid_Onelevel(lev, Symmetry, BH_num, Porgbr, Porg0,
|
||||
SynchList_cor, OldStateList, StateList, SynchList_pre,
|
||||
fgt(PhysTime - dT_lev, StartTime, dT_lev / 2), ErrorMonitor);
|
||||
for (int il = 0; il < GH->levels; il++) { sync_cache_pre[il].invalidate(); sync_cache_cor[il].invalidate(); sync_cache_rp_coarse[il].invalidate(); sync_cache_rp_fine[il].invalidate(); }
|
||||
|
||||
// a_stream.clear();
|
||||
// a_stream.str("");
|
||||
@@ -2772,6 +2809,7 @@ void bssn_class::ParallelStep()
|
||||
GH->Regrid_Onelevel(lev - 1, Symmetry, BH_num, Porgbr, Porg0,
|
||||
SynchList_cor, OldStateList, StateList, SynchList_pre,
|
||||
fgt(PhysTime - dT_lev, StartTime, dT_levm1 / 2), ErrorMonitor);
|
||||
for (int il = 0; il < GH->levels; il++) { sync_cache_pre[il].invalidate(); sync_cache_cor[il].invalidate(); sync_cache_rp_coarse[il].invalidate(); sync_cache_rp_fine[il].invalidate(); }
|
||||
|
||||
// a_stream.clear();
|
||||
// a_stream.str("");
|
||||
@@ -2787,6 +2825,7 @@ void bssn_class::ParallelStep()
|
||||
GH->Regrid_Onelevel(lev - 1, Symmetry, BH_num, Porgbr, Porg0,
|
||||
SynchList_cor, OldStateList, StateList, SynchList_pre,
|
||||
fgt(PhysTime - dT_lev, StartTime, dT_levm1 / 2), ErrorMonitor);
|
||||
for (int il = 0; il < GH->levels; il++) { sync_cache_pre[il].invalidate(); sync_cache_cor[il].invalidate(); sync_cache_rp_coarse[il].invalidate(); sync_cache_rp_fine[il].invalidate(); }
|
||||
|
||||
// a_stream.clear();
|
||||
// a_stream.str("");
|
||||
@@ -3158,21 +3197,7 @@ void bssn_class::Step(int lev, int YN)
|
||||
}
|
||||
Pp = Pp->next;
|
||||
}
|
||||
// check error information
|
||||
{
|
||||
int erh = ERROR;
|
||||
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
||||
}
|
||||
if (ERROR)
|
||||
{
|
||||
Parallel::Dump_Data(GH->PatL[lev], StateList, 0, PhysTime, dT_lev);
|
||||
if (myrank == 0)
|
||||
{
|
||||
if (ErrorMonitor->outfile)
|
||||
ErrorMonitor->outfile << "find NaN in state variables at t = " << PhysTime << ", lev = " << lev << endl;
|
||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||
}
|
||||
}
|
||||
// NOTE: error check deferred to after Shell Patch computation to reduce MPI_Allreduce calls
|
||||
|
||||
#ifdef WithShell
|
||||
// evolve Shell Patches
|
||||
@@ -3190,9 +3215,9 @@ void bssn_class::Step(int lev, int YN)
|
||||
{
|
||||
#if (AGM == 0)
|
||||
f_enforce_ga(cg->shape,
|
||||
cg->fgfs[gxx0->sgfn], cg->fgfs[gxy0->sgfn], cg->fgfs[gxz0->sgfn],
|
||||
cg->fgfs[gxx0->sgfn], cg->fgfs[gxy0->sgfn], cg->fgfs[gxz0->sgfn],
|
||||
cg->fgfs[gyy0->sgfn], cg->fgfs[gyz0->sgfn], cg->fgfs[gzz0->sgfn],
|
||||
cg->fgfs[Axx0->sgfn], cg->fgfs[Axy0->sgfn], cg->fgfs[Axz0->sgfn],
|
||||
cg->fgfs[Axx0->sgfn], cg->fgfs[Axy0->sgfn], cg->fgfs[Axz0->sgfn],
|
||||
cg->fgfs[Ayy0->sgfn], cg->fgfs[Ayz0->sgfn], cg->fgfs[Azz0->sgfn]);
|
||||
#endif
|
||||
|
||||
@@ -3316,25 +3341,16 @@ void bssn_class::Step(int lev, int YN)
|
||||
#endif
|
||||
}
|
||||
|
||||
// check error information
|
||||
// Non-blocking error reduction overlapped with Sync to hide Allreduce latency
|
||||
MPI_Request err_req;
|
||||
{
|
||||
int erh = ERROR;
|
||||
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
||||
}
|
||||
|
||||
if (ERROR)
|
||||
{
|
||||
SH->Dump_Data(StateList, 0, PhysTime, dT_lev);
|
||||
if (myrank == 0)
|
||||
{
|
||||
if (ErrorMonitor->outfile)
|
||||
ErrorMonitor->outfile << "find NaN in state variables on Shell Patches at t = " << PhysTime << endl;
|
||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||
}
|
||||
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &err_req);
|
||||
}
|
||||
#endif
|
||||
|
||||
Parallel::Sync(GH->PatL[lev], SynchList_pre, Symmetry);
|
||||
Parallel::AsyncSyncState async_pre;
|
||||
Parallel::Sync_start(GH->PatL[lev], SynchList_pre, Symmetry, sync_cache_pre[lev], async_pre);
|
||||
|
||||
#ifdef WithShell
|
||||
if (lev == 0)
|
||||
@@ -3347,12 +3363,29 @@ void bssn_class::Step(int lev, int YN)
|
||||
{
|
||||
prev_clock = curr_clock;
|
||||
curr_clock = clock();
|
||||
cout << " Shell stuff synchronization used "
|
||||
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
||||
cout << " Shell stuff synchronization used "
|
||||
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
||||
<< " seconds! " << endl;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
Parallel::Sync_finish(sync_cache_pre[lev], async_pre, SynchList_pre, Symmetry);
|
||||
|
||||
#ifdef WithShell
|
||||
// Complete non-blocking error reduction and check
|
||||
MPI_Wait(&err_req, MPI_STATUS_IGNORE);
|
||||
if (ERROR)
|
||||
{
|
||||
Parallel::Dump_Data(GH->PatL[lev], StateList, 0, PhysTime, dT_lev);
|
||||
SH->Dump_Data(StateList, 0, PhysTime, dT_lev);
|
||||
if (myrank == 0)
|
||||
{
|
||||
if (ErrorMonitor->outfile)
|
||||
ErrorMonitor->outfile << "find NaN in state variables at t = " << PhysTime << ", lev = " << lev << endl;
|
||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#if (MAPBH == 0)
|
||||
// for black hole position
|
||||
@@ -3528,24 +3561,7 @@ void bssn_class::Step(int lev, int YN)
|
||||
Pp = Pp->next;
|
||||
}
|
||||
|
||||
// check error information
|
||||
{
|
||||
int erh = ERROR;
|
||||
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
||||
}
|
||||
|
||||
if (ERROR)
|
||||
{
|
||||
Parallel::Dump_Data(GH->PatL[lev], SynchList_pre, 0, PhysTime, dT_lev);
|
||||
if (myrank == 0)
|
||||
{
|
||||
if (ErrorMonitor->outfile)
|
||||
ErrorMonitor->outfile << "find NaN in RK4 substep#" << iter_count
|
||||
<< " variables at t = " << PhysTime
|
||||
<< ", lev = " << lev << endl;
|
||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||
}
|
||||
}
|
||||
// NOTE: error check deferred to after Shell Patch computation to reduce MPI_Allreduce calls
|
||||
|
||||
#ifdef WithShell
|
||||
// evolve Shell Patches
|
||||
@@ -3563,9 +3579,9 @@ void bssn_class::Step(int lev, int YN)
|
||||
{
|
||||
#if (AGM == 0)
|
||||
f_enforce_ga(cg->shape,
|
||||
cg->fgfs[gxx->sgfn], cg->fgfs[gxy->sgfn], cg->fgfs[gxz->sgfn],
|
||||
cg->fgfs[gxx->sgfn], cg->fgfs[gxy->sgfn], cg->fgfs[gxz->sgfn],
|
||||
cg->fgfs[gyy->sgfn], cg->fgfs[gyz->sgfn], cg->fgfs[gzz->sgfn],
|
||||
cg->fgfs[Axx->sgfn], cg->fgfs[Axy->sgfn], cg->fgfs[Axz->sgfn],
|
||||
cg->fgfs[Axx->sgfn], cg->fgfs[Axy->sgfn], cg->fgfs[Axz->sgfn],
|
||||
cg->fgfs[Ayy->sgfn], cg->fgfs[Ayz->sgfn], cg->fgfs[Azz->sgfn]);
|
||||
#elif (AGM == 1)
|
||||
if (iter_count == 3)
|
||||
@@ -3685,26 +3701,16 @@ void bssn_class::Step(int lev, int YN)
|
||||
sPp = sPp->next;
|
||||
}
|
||||
}
|
||||
// check error information
|
||||
// Non-blocking error reduction overlapped with Sync to hide Allreduce latency
|
||||
MPI_Request err_req_cor;
|
||||
{
|
||||
int erh = ERROR;
|
||||
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
||||
}
|
||||
if (ERROR)
|
||||
{
|
||||
SH->Dump_Data(SynchList_pre, 0, PhysTime, dT_lev);
|
||||
if (myrank == 0)
|
||||
{
|
||||
if (ErrorMonitor->outfile)
|
||||
ErrorMonitor->outfile << "find NaN on Shell Patches in RK4 substep#"
|
||||
<< iter_count << " variables at t = "
|
||||
<< PhysTime << endl;
|
||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||
}
|
||||
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &err_req_cor);
|
||||
}
|
||||
#endif
|
||||
|
||||
Parallel::Sync(GH->PatL[lev], SynchList_cor, Symmetry);
|
||||
Parallel::AsyncSyncState async_cor;
|
||||
Parallel::Sync_start(GH->PatL[lev], SynchList_cor, Symmetry, sync_cache_cor[lev], async_cor);
|
||||
|
||||
#ifdef WithShell
|
||||
if (lev == 0)
|
||||
@@ -3717,12 +3723,31 @@ void bssn_class::Step(int lev, int YN)
|
||||
{
|
||||
prev_clock = curr_clock;
|
||||
curr_clock = clock();
|
||||
cout << " Shell stuff synchronization used "
|
||||
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
||||
cout << " Shell stuff synchronization used "
|
||||
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
||||
<< " seconds! " << endl;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
Parallel::Sync_finish(sync_cache_cor[lev], async_cor, SynchList_cor, Symmetry);
|
||||
|
||||
#ifdef WithShell
|
||||
// Complete non-blocking error reduction and check
|
||||
MPI_Wait(&err_req_cor, MPI_STATUS_IGNORE);
|
||||
if (ERROR)
|
||||
{
|
||||
Parallel::Dump_Data(GH->PatL[lev], SynchList_pre, 0, PhysTime, dT_lev);
|
||||
SH->Dump_Data(SynchList_pre, 0, PhysTime, dT_lev);
|
||||
if (myrank == 0)
|
||||
{
|
||||
if (ErrorMonitor->outfile)
|
||||
ErrorMonitor->outfile << "find NaN in RK4 substep#" << iter_count
|
||||
<< " variables at t = " << PhysTime
|
||||
<< ", lev = " << lev << endl;
|
||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#if (MAPBH == 0)
|
||||
// for black hole position
|
||||
@@ -4034,22 +4059,7 @@ void bssn_class::Step(int lev, int YN)
|
||||
}
|
||||
Pp = Pp->next;
|
||||
}
|
||||
// check error information
|
||||
{
|
||||
int erh = ERROR;
|
||||
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
||||
}
|
||||
if (ERROR)
|
||||
{
|
||||
Parallel::Dump_Data(GH->PatL[lev], StateList, 0, PhysTime, dT_lev);
|
||||
if (myrank == 0)
|
||||
{
|
||||
if (ErrorMonitor->outfile)
|
||||
ErrorMonitor->outfile << "find NaN in state variables at t = " << PhysTime
|
||||
<< ", lev = " << lev << endl;
|
||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||
}
|
||||
}
|
||||
// NOTE: error check deferred to after Shell Patch computation to reduce MPI_Allreduce calls
|
||||
|
||||
#ifdef WithShell
|
||||
// evolve Shell Patches
|
||||
@@ -4067,15 +4077,15 @@ void bssn_class::Step(int lev, int YN)
|
||||
{
|
||||
#if (AGM == 0)
|
||||
f_enforce_ga(cg->shape,
|
||||
cg->fgfs[gxx0->sgfn], cg->fgfs[gxy0->sgfn], cg->fgfs[gxz0->sgfn],
|
||||
cg->fgfs[gxx0->sgfn], cg->fgfs[gxy0->sgfn], cg->fgfs[gxz0->sgfn],
|
||||
cg->fgfs[gyy0->sgfn], cg->fgfs[gyz0->sgfn], cg->fgfs[gzz0->sgfn],
|
||||
cg->fgfs[Axx0->sgfn], cg->fgfs[Axy0->sgfn], cg->fgfs[Axz0->sgfn],
|
||||
cg->fgfs[Axx0->sgfn], cg->fgfs[Axy0->sgfn], cg->fgfs[Axz0->sgfn],
|
||||
cg->fgfs[Ayy0->sgfn], cg->fgfs[Ayz0->sgfn], cg->fgfs[Azz0->sgfn]);
|
||||
#endif
|
||||
|
||||
if (f_compute_rhs_bssn_ss(cg->shape, TRK4, cg->X[0], cg->X[1], cg->X[2],
|
||||
cg->fgfs[fngfs + ShellPatch::gx],
|
||||
cg->fgfs[fngfs + ShellPatch::gy],
|
||||
cg->fgfs[fngfs + ShellPatch::gx],
|
||||
cg->fgfs[fngfs + ShellPatch::gy],
|
||||
cg->fgfs[fngfs + ShellPatch::gz],
|
||||
cg->fgfs[fngfs + ShellPatch::drhodx],
|
||||
cg->fgfs[fngfs + ShellPatch::drhody],
|
||||
@@ -4190,25 +4200,16 @@ void bssn_class::Step(int lev, int YN)
|
||||
}
|
||||
#endif
|
||||
}
|
||||
// check error information
|
||||
// Non-blocking error reduction overlapped with Sync to hide Allreduce latency
|
||||
MPI_Request err_req;
|
||||
{
|
||||
int erh = ERROR;
|
||||
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
||||
}
|
||||
if (ERROR)
|
||||
{
|
||||
SH->Dump_Data(StateList, 0, PhysTime, dT_lev);
|
||||
if (myrank == 0)
|
||||
{
|
||||
if (ErrorMonitor->outfile)
|
||||
ErrorMonitor->outfile << "find NaN in state variables on Shell Patches at t = "
|
||||
<< PhysTime << endl;
|
||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||
}
|
||||
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &err_req);
|
||||
}
|
||||
#endif
|
||||
|
||||
Parallel::Sync(GH->PatL[lev], SynchList_pre, Symmetry);
|
||||
Parallel::AsyncSyncState async_pre;
|
||||
Parallel::Sync_start(GH->PatL[lev], SynchList_pre, Symmetry, sync_cache_pre[lev], async_pre);
|
||||
|
||||
#ifdef WithShell
|
||||
if (lev == 0)
|
||||
@@ -4221,9 +4222,27 @@ void bssn_class::Step(int lev, int YN)
|
||||
{
|
||||
prev_clock = curr_clock;
|
||||
curr_clock = clock();
|
||||
cout << " Shell stuff synchronization used "
|
||||
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
||||
<< " seconds! " << endl;
|
||||
cout << " Shell stuff synchronization used "
|
||||
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
||||
<< " seconds! " << endl;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
Parallel::Sync_finish(sync_cache_pre[lev], async_pre, SynchList_pre, Symmetry);
|
||||
|
||||
#ifdef WithShell
|
||||
// Complete non-blocking error reduction and check
|
||||
MPI_Wait(&err_req, MPI_STATUS_IGNORE);
|
||||
if (ERROR)
|
||||
{
|
||||
Parallel::Dump_Data(GH->PatL[lev], StateList, 0, PhysTime, dT_lev);
|
||||
SH->Dump_Data(StateList, 0, PhysTime, dT_lev);
|
||||
if (myrank == 0)
|
||||
{
|
||||
if (ErrorMonitor->outfile)
|
||||
ErrorMonitor->outfile << "find NaN in state variables at t = " << PhysTime
|
||||
<< ", lev = " << lev << endl;
|
||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -4386,23 +4405,7 @@ void bssn_class::Step(int lev, int YN)
|
||||
Pp = Pp->next;
|
||||
}
|
||||
|
||||
// check error information
|
||||
{
|
||||
int erh = ERROR;
|
||||
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
||||
}
|
||||
if (ERROR)
|
||||
{
|
||||
Parallel::Dump_Data(GH->PatL[lev], SynchList_pre, 0, PhysTime, dT_lev);
|
||||
if (myrank == 0)
|
||||
{
|
||||
if (ErrorMonitor->outfile)
|
||||
ErrorMonitor->outfile << "find NaN in RK4 substep#" << iter_count
|
||||
<< " variables at t = " << PhysTime
|
||||
<< ", lev = " << lev << endl;
|
||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||
}
|
||||
}
|
||||
// NOTE: error check deferred to after Shell Patch computation to reduce MPI_Allreduce calls
|
||||
|
||||
#ifdef WithShell
|
||||
// evolve Shell Patches
|
||||
@@ -4420,9 +4423,9 @@ void bssn_class::Step(int lev, int YN)
|
||||
{
|
||||
#if (AGM == 0)
|
||||
f_enforce_ga(cg->shape,
|
||||
cg->fgfs[gxx->sgfn], cg->fgfs[gxy->sgfn], cg->fgfs[gxz->sgfn],
|
||||
cg->fgfs[gxx->sgfn], cg->fgfs[gxy->sgfn], cg->fgfs[gxz->sgfn],
|
||||
cg->fgfs[gyy->sgfn], cg->fgfs[gyz->sgfn], cg->fgfs[gzz->sgfn],
|
||||
cg->fgfs[Axx->sgfn], cg->fgfs[Axy->sgfn], cg->fgfs[Axz->sgfn],
|
||||
cg->fgfs[Axx->sgfn], cg->fgfs[Axy->sgfn], cg->fgfs[Axz->sgfn],
|
||||
cg->fgfs[Ayy->sgfn], cg->fgfs[Ayz->sgfn], cg->fgfs[Azz->sgfn]);
|
||||
#elif (AGM == 1)
|
||||
if (iter_count == 3)
|
||||
@@ -4542,25 +4545,16 @@ void bssn_class::Step(int lev, int YN)
|
||||
sPp = sPp->next;
|
||||
}
|
||||
}
|
||||
// check error information
|
||||
// Non-blocking error reduction overlapped with Sync to hide Allreduce latency
|
||||
MPI_Request err_req_cor;
|
||||
{
|
||||
int erh = ERROR;
|
||||
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
||||
}
|
||||
if (ERROR)
|
||||
{
|
||||
SH->Dump_Data(SynchList_pre, 0, PhysTime, dT_lev);
|
||||
if (myrank == 0)
|
||||
{
|
||||
if (ErrorMonitor->outfile)
|
||||
ErrorMonitor->outfile << "find NaN on Shell Patches in RK4 substep#" << iter_count
|
||||
<< " variables at t = " << PhysTime << endl;
|
||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||
}
|
||||
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &err_req_cor);
|
||||
}
|
||||
#endif
|
||||
|
||||
Parallel::Sync(GH->PatL[lev], SynchList_cor, Symmetry);
|
||||
Parallel::AsyncSyncState async_cor;
|
||||
Parallel::Sync_start(GH->PatL[lev], SynchList_cor, Symmetry, sync_cache_cor[lev], async_cor);
|
||||
|
||||
#ifdef WithShell
|
||||
if (lev == 0)
|
||||
@@ -4573,11 +4567,30 @@ void bssn_class::Step(int lev, int YN)
|
||||
{
|
||||
prev_clock = curr_clock;
|
||||
curr_clock = clock();
|
||||
cout << " Shell stuff synchronization used "
|
||||
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
||||
cout << " Shell stuff synchronization used "
|
||||
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
||||
<< " seconds! " << endl;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
Parallel::Sync_finish(sync_cache_cor[lev], async_cor, SynchList_cor, Symmetry);
|
||||
|
||||
#ifdef WithShell
|
||||
// Complete non-blocking error reduction and check
|
||||
MPI_Wait(&err_req_cor, MPI_STATUS_IGNORE);
|
||||
if (ERROR)
|
||||
{
|
||||
Parallel::Dump_Data(GH->PatL[lev], SynchList_pre, 0, PhysTime, dT_lev);
|
||||
SH->Dump_Data(SynchList_pre, 0, PhysTime, dT_lev);
|
||||
if (myrank == 0)
|
||||
{
|
||||
if (ErrorMonitor->outfile)
|
||||
ErrorMonitor->outfile << "find NaN in RK4 substep#" << iter_count
|
||||
<< " variables at t = " << PhysTime
|
||||
<< ", lev = " << lev << endl;
|
||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
// for black hole position
|
||||
if (BH_num > 0 && lev == GH->levels - 1)
|
||||
@@ -4943,11 +4956,19 @@ void bssn_class::Step(int lev, int YN)
|
||||
|
||||
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"after Predictor rhs calculation");
|
||||
|
||||
// check error information
|
||||
// Non-blocking error reduction overlapped with Sync to hide Allreduce latency
|
||||
MPI_Request err_req;
|
||||
{
|
||||
int erh = ERROR;
|
||||
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, GH->Commlev[lev]);
|
||||
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, GH->Commlev[lev], &err_req);
|
||||
}
|
||||
|
||||
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"before Predictor sync");
|
||||
|
||||
Parallel::Sync_cached(GH->PatL[lev], SynchList_pre, Symmetry, sync_cache_pre[lev]);
|
||||
|
||||
// Complete non-blocking error reduction and check
|
||||
MPI_Wait(&err_req, MPI_STATUS_IGNORE);
|
||||
if (ERROR)
|
||||
{
|
||||
Parallel::Dump_Data(GH->PatL[lev], StateList, 0, PhysTime, dT_lev);
|
||||
@@ -4959,10 +4980,6 @@ void bssn_class::Step(int lev, int YN)
|
||||
}
|
||||
}
|
||||
|
||||
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"before Predictor sync");
|
||||
|
||||
Parallel::Sync(GH->PatL[lev], SynchList_pre, Symmetry);
|
||||
|
||||
#if (MAPBH == 0)
|
||||
// for black hole position
|
||||
if (BH_num > 0 && lev == GH->levels - 1)
|
||||
@@ -5140,30 +5157,34 @@ void bssn_class::Step(int lev, int YN)
|
||||
|
||||
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"before Corrector error check");
|
||||
|
||||
// check error information
|
||||
// Non-blocking error reduction overlapped with Sync to hide Allreduce latency
|
||||
MPI_Request err_req_cor;
|
||||
{
|
||||
int erh = ERROR;
|
||||
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, GH->Commlev[lev]);
|
||||
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, GH->Commlev[lev], &err_req_cor);
|
||||
}
|
||||
|
||||
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"before Corrector sync");
|
||||
|
||||
Parallel::Sync_cached(GH->PatL[lev], SynchList_cor, Symmetry, sync_cache_cor[lev]);
|
||||
|
||||
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"after Corrector sync");
|
||||
|
||||
// Complete non-blocking error reduction and check
|
||||
MPI_Wait(&err_req_cor, MPI_STATUS_IGNORE);
|
||||
if (ERROR)
|
||||
{
|
||||
Parallel::Dump_Data(GH->PatL[lev], SynchList_pre, 0, PhysTime, dT_lev);
|
||||
if (myrank == 0)
|
||||
{
|
||||
if (ErrorMonitor->outfile)
|
||||
ErrorMonitor->outfile << "find NaN in RK4 substep#" << iter_count
|
||||
<< " variables at t = " << PhysTime
|
||||
ErrorMonitor->outfile << "find NaN in RK4 substep#" << iter_count
|
||||
<< " variables at t = " << PhysTime
|
||||
<< ", lev = " << lev << endl;
|
||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||
}
|
||||
}
|
||||
|
||||
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"before Corrector sync");
|
||||
|
||||
Parallel::Sync(GH->PatL[lev], SynchList_cor, Symmetry);
|
||||
|
||||
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"after Corrector sync");
|
||||
|
||||
#if (MAPBH == 0)
|
||||
// for black hole position
|
||||
if (BH_num > 0 && lev == GH->levels - 1)
|
||||
@@ -5447,21 +5468,11 @@ void bssn_class::SHStep()
|
||||
#if (PSTR == 1 || PSTR == 2)
|
||||
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"before Predictor's error check");
|
||||
#endif
|
||||
// check error information
|
||||
// Non-blocking error reduction overlapped with Synch to hide Allreduce latency
|
||||
MPI_Request err_req;
|
||||
{
|
||||
int erh = ERROR;
|
||||
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
||||
}
|
||||
|
||||
if (ERROR)
|
||||
{
|
||||
SH->Dump_Data(StateList, 0, PhysTime, dT_lev);
|
||||
if (myrank == 0)
|
||||
{
|
||||
if (ErrorMonitor->outfile)
|
||||
ErrorMonitor->outfile << "find NaN in state variables on Shell Patches at t = " << PhysTime << endl;
|
||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||
}
|
||||
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &err_req);
|
||||
}
|
||||
|
||||
{
|
||||
@@ -5473,12 +5484,25 @@ void bssn_class::SHStep()
|
||||
{
|
||||
prev_clock = curr_clock;
|
||||
curr_clock = clock();
|
||||
cout << " Shell stuff synchronization used "
|
||||
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
||||
cout << " Shell stuff synchronization used "
|
||||
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
||||
<< " seconds! " << endl;
|
||||
}
|
||||
}
|
||||
|
||||
// Complete non-blocking error reduction and check
|
||||
MPI_Wait(&err_req, MPI_STATUS_IGNORE);
|
||||
if (ERROR)
|
||||
{
|
||||
SH->Dump_Data(StateList, 0, PhysTime, dT_lev);
|
||||
if (myrank == 0)
|
||||
{
|
||||
if (ErrorMonitor->outfile)
|
||||
ErrorMonitor->outfile << "find NaN in state variables on Shell Patches at t = " << PhysTime << endl;
|
||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||
}
|
||||
}
|
||||
|
||||
// corrector
|
||||
for (iter_count = 1; iter_count < 4; iter_count++)
|
||||
{
|
||||
@@ -5621,21 +5645,11 @@ void bssn_class::SHStep()
|
||||
sPp = sPp->next;
|
||||
}
|
||||
}
|
||||
// check error information
|
||||
// Non-blocking error reduction overlapped with Synch to hide Allreduce latency
|
||||
MPI_Request err_req_cor;
|
||||
{
|
||||
int erh = ERROR;
|
||||
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
||||
}
|
||||
if (ERROR)
|
||||
{
|
||||
SH->Dump_Data(SynchList_pre, 0, PhysTime, dT_lev);
|
||||
if (myrank == 0)
|
||||
{
|
||||
if (ErrorMonitor->outfile)
|
||||
ErrorMonitor->outfile << "find NaN on Shell Patches in RK4 substep#" << iter_count
|
||||
<< " variables at t = " << PhysTime << endl;
|
||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||
}
|
||||
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &err_req_cor);
|
||||
}
|
||||
|
||||
{
|
||||
@@ -5647,12 +5661,26 @@ void bssn_class::SHStep()
|
||||
{
|
||||
prev_clock = curr_clock;
|
||||
curr_clock = clock();
|
||||
cout << " Shell stuff synchronization used "
|
||||
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
||||
cout << " Shell stuff synchronization used "
|
||||
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
||||
<< " seconds! " << endl;
|
||||
}
|
||||
}
|
||||
|
||||
// Complete non-blocking error reduction and check
|
||||
MPI_Wait(&err_req_cor, MPI_STATUS_IGNORE);
|
||||
if (ERROR)
|
||||
{
|
||||
SH->Dump_Data(SynchList_pre, 0, PhysTime, dT_lev);
|
||||
if (myrank == 0)
|
||||
{
|
||||
if (ErrorMonitor->outfile)
|
||||
ErrorMonitor->outfile << "find NaN on Shell Patches in RK4 substep#" << iter_count
|
||||
<< " variables at t = " << PhysTime << endl;
|
||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||
}
|
||||
}
|
||||
|
||||
sPp = SH->PatL;
|
||||
while (sPp)
|
||||
{
|
||||
@@ -5781,7 +5809,7 @@ void bssn_class::RestrictProlong(int lev, int YN, bool BB,
|
||||
// misc::tillherecheck(GH->Commlev[GH->mylev],GH->start_rank[GH->mylev],a_stream.str());
|
||||
#endif
|
||||
|
||||
Parallel::Sync(GH->PatL[lev - 1], SynchList_pre, Symmetry);
|
||||
Parallel::Sync_cached(GH->PatL[lev - 1], SynchList_pre, Symmetry, sync_cache_rp_coarse[lev]);
|
||||
|
||||
#if (PSTR == 1 || PSTR == 2)
|
||||
// a_stream.clear();
|
||||
@@ -5842,7 +5870,7 @@ void bssn_class::RestrictProlong(int lev, int YN, bool BB,
|
||||
// misc::tillherecheck(GH->Commlev[GH->mylev],GH->start_rank[GH->mylev],a_stream.str());
|
||||
#endif
|
||||
|
||||
Parallel::Sync(GH->PatL[lev - 1], SL, Symmetry);
|
||||
Parallel::Sync_cached(GH->PatL[lev - 1], SL, Symmetry, sync_cache_rp_coarse[lev]);
|
||||
|
||||
#if (PSTR == 1 || PSTR == 2)
|
||||
// a_stream.clear();
|
||||
@@ -5880,7 +5908,7 @@ void bssn_class::RestrictProlong(int lev, int YN, bool BB,
|
||||
#endif
|
||||
}
|
||||
|
||||
Parallel::Sync(GH->PatL[lev], SL, Symmetry);
|
||||
Parallel::Sync_cached(GH->PatL[lev], SL, Symmetry, sync_cache_rp_fine[lev]);
|
||||
|
||||
#if (PSTR == 1 || PSTR == 2)
|
||||
// a_stream.clear();
|
||||
@@ -5938,7 +5966,7 @@ void bssn_class::RestrictProlong_aux(int lev, int YN, bool BB,
|
||||
Parallel::Restrict_bam(GH->PatL[lev - 1], GH->PatL[lev], SL, SynchList_pre, GH->rsul[lev], Symmetry);
|
||||
#endif
|
||||
|
||||
Parallel::Sync(GH->PatL[lev - 1], SynchList_pre, Symmetry);
|
||||
Parallel::Sync_cached(GH->PatL[lev - 1], SynchList_pre, Symmetry, sync_cache_rp_coarse[lev]);
|
||||
|
||||
#if (RPB == 0)
|
||||
Ppc = GH->PatL[lev - 1];
|
||||
@@ -5970,7 +5998,7 @@ void bssn_class::RestrictProlong_aux(int lev, int YN, bool BB,
|
||||
Parallel::Restrict_bam(GH->PatL[lev - 1], GH->PatL[lev], SL, SL, GH->rsul[lev], Symmetry);
|
||||
#endif
|
||||
|
||||
Parallel::Sync(GH->PatL[lev - 1], SL, Symmetry);
|
||||
Parallel::Sync_cached(GH->PatL[lev - 1], SL, Symmetry, sync_cache_rp_coarse[lev]);
|
||||
|
||||
#if (RPB == 0)
|
||||
Ppc = GH->PatL[lev - 1];
|
||||
@@ -5994,7 +6022,7 @@ void bssn_class::RestrictProlong_aux(int lev, int YN, bool BB,
|
||||
#endif
|
||||
}
|
||||
|
||||
Parallel::Sync(GH->PatL[lev], SL, Symmetry);
|
||||
Parallel::Sync_cached(GH->PatL[lev], SL, Symmetry, sync_cache_rp_fine[lev]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6045,7 +6073,7 @@ void bssn_class::RestrictProlong(int lev, int YN, bool BB)
|
||||
Parallel::Restrict_bam(GH->PatL[lev - 1], GH->PatL[lev], SynchList_cor, SynchList_pre, GH->rsul[lev], Symmetry);
|
||||
#endif
|
||||
|
||||
Parallel::Sync(GH->PatL[lev - 1], SynchList_pre, Symmetry);
|
||||
Parallel::Sync_cached(GH->PatL[lev - 1], SynchList_pre, Symmetry, sync_cache_rp_coarse[lev]);
|
||||
|
||||
#if (RPB == 0)
|
||||
Ppc = GH->PatL[lev - 1];
|
||||
@@ -6079,7 +6107,7 @@ void bssn_class::RestrictProlong(int lev, int YN, bool BB)
|
||||
Parallel::Restrict_bam(GH->PatL[lev - 1], GH->PatL[lev], SynchList_cor, StateList, GH->rsul[lev], Symmetry);
|
||||
#endif
|
||||
|
||||
Parallel::Sync(GH->PatL[lev - 1], StateList, Symmetry);
|
||||
Parallel::Sync_cached(GH->PatL[lev - 1], StateList, Symmetry, sync_cache_rp_coarse[lev]);
|
||||
|
||||
#if (RPB == 0)
|
||||
Ppc = GH->PatL[lev - 1];
|
||||
@@ -6103,7 +6131,7 @@ void bssn_class::RestrictProlong(int lev, int YN, bool BB)
|
||||
#endif
|
||||
}
|
||||
|
||||
Parallel::Sync(GH->PatL[lev], SynchList_cor, Symmetry);
|
||||
Parallel::Sync_cached(GH->PatL[lev], SynchList_cor, Symmetry, sync_cache_rp_fine[lev]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6186,10 +6214,10 @@ void bssn_class::ProlongRestrict(int lev, int YN, bool BB)
|
||||
#else
|
||||
Parallel::Restrict_after(GH->PatL[lev - 1], GH->PatL[lev], SynchList_cor, StateList, Symmetry);
|
||||
#endif
|
||||
Parallel::Sync(GH->PatL[lev - 1], StateList, Symmetry);
|
||||
Parallel::Sync_cached(GH->PatL[lev - 1], StateList, Symmetry, sync_cache_rp_coarse[lev]);
|
||||
}
|
||||
|
||||
Parallel::Sync(GH->PatL[lev], SynchList_cor, Symmetry);
|
||||
Parallel::Sync_cached(GH->PatL[lev], SynchList_cor, Symmetry, sync_cache_rp_fine[lev]);
|
||||
}
|
||||
}
|
||||
#undef MIXOUTB
|
||||
|
||||
@@ -19,7 +19,11 @@ using namespace std;
|
||||
#include <math.h>
|
||||
#endif
|
||||
|
||||
#ifdef MPI_STUB
|
||||
#include "mpi_stub.h"
|
||||
#else
|
||||
#include <mpi.h>
|
||||
#endif
|
||||
|
||||
#include "macrodef.h"
|
||||
#include "cgh.h"
|
||||
@@ -126,6 +130,11 @@ public:
|
||||
MyList<var> *OldStateList, *DumpList;
|
||||
MyList<var> *ConstraintList;
|
||||
|
||||
Parallel::SyncCache *sync_cache_pre; // per-level cache for predictor sync
|
||||
Parallel::SyncCache *sync_cache_cor; // per-level cache for corrector sync
|
||||
Parallel::SyncCache *sync_cache_rp_coarse; // RestrictProlong sync on PatL[lev-1]
|
||||
Parallel::SyncCache *sync_cache_rp_fine; // RestrictProlong sync on PatL[lev]
|
||||
|
||||
monitor *ErrorMonitor, *Psi4Monitor, *BHMonitor, *MAPMonitor;
|
||||
monitor *ConVMonitor;
|
||||
surface_integral *Waveshell;
|
||||
|
||||
@@ -19,7 +19,11 @@ using namespace std;
|
||||
#include <math.h>
|
||||
#endif
|
||||
|
||||
#ifdef MPI_STUB
|
||||
#include "mpi_stub.h"
|
||||
#else
|
||||
#include <mpi.h>
|
||||
#endif
|
||||
|
||||
#include "macrodef.h"
|
||||
#include "cgh.h"
|
||||
|
||||
@@ -20,7 +20,11 @@ using namespace std;
|
||||
#include <map.h>
|
||||
#endif
|
||||
|
||||
#ifdef MPI_STUB
|
||||
#include "mpi_stub.h"
|
||||
#else
|
||||
#include <mpi.h>
|
||||
#endif
|
||||
|
||||
#include "macrodef.h"
|
||||
#include "misc.h"
|
||||
|
||||
@@ -2,7 +2,11 @@
|
||||
#ifndef CGH_H
|
||||
#define CGH_H
|
||||
|
||||
#ifdef MPI_STUB
|
||||
#include "mpi_stub.h"
|
||||
#else
|
||||
#include <mpi.h>
|
||||
#endif
|
||||
#include "MyList.h"
|
||||
#include "MPatch.h"
|
||||
#include "macrodef.h"
|
||||
|
||||
@@ -19,7 +19,11 @@ using namespace std;
|
||||
#include <time.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#ifdef MPI_STUB
|
||||
#include "mpi_stub.h"
|
||||
#else
|
||||
#include <mpi.h>
|
||||
#endif
|
||||
|
||||
#include "var.h"
|
||||
#include "MyList.h"
|
||||
|
||||
@@ -69,6 +69,7 @@
|
||||
fy = ZEO
|
||||
fz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -151,6 +152,7 @@
|
||||
|
||||
fx = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -218,6 +220,7 @@
|
||||
|
||||
fy = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -282,6 +285,7 @@
|
||||
|
||||
fz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -371,6 +375,7 @@
|
||||
fxz = ZEO
|
||||
fyz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -469,6 +474,7 @@
|
||||
|
||||
fxx = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -531,6 +537,7 @@
|
||||
|
||||
fyy = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -594,6 +601,7 @@
|
||||
|
||||
fzz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -657,6 +665,7 @@
|
||||
|
||||
fxy = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -719,6 +728,7 @@
|
||||
|
||||
fxz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -780,6 +790,7 @@
|
||||
|
||||
fyz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -866,6 +877,7 @@
|
||||
fxz = ZEO
|
||||
fyz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -997,6 +1009,7 @@
|
||||
fy = ZEO
|
||||
fz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -1151,6 +1164,7 @@
|
||||
|
||||
fx = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -1227,6 +1241,7 @@
|
||||
|
||||
fy = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -1297,6 +1312,7 @@
|
||||
|
||||
fz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -1401,6 +1417,7 @@
|
||||
fxz = ZEO
|
||||
fyz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -1576,6 +1593,7 @@
|
||||
|
||||
fxx = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -1643,6 +1661,7 @@
|
||||
|
||||
fyy = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -1712,6 +1731,7 @@
|
||||
|
||||
fzz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -1781,6 +1801,7 @@
|
||||
|
||||
fxy = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -1851,6 +1872,7 @@
|
||||
|
||||
fxz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -1919,6 +1941,7 @@
|
||||
|
||||
fyz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -2011,6 +2034,7 @@
|
||||
fy = ZEO
|
||||
fz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -2127,6 +2151,7 @@
|
||||
|
||||
fx = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -2212,6 +2237,7 @@
|
||||
|
||||
fy = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -2288,6 +2314,7 @@
|
||||
|
||||
fz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -2406,6 +2433,7 @@
|
||||
fxz = ZEO
|
||||
fyz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -2593,6 +2621,7 @@
|
||||
|
||||
fxx = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -2665,6 +2694,7 @@
|
||||
|
||||
fyy = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -2740,6 +2770,7 @@
|
||||
|
||||
fzz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -2815,6 +2846,7 @@
|
||||
|
||||
fxy = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -2895,6 +2927,7 @@
|
||||
|
||||
fxz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -2973,6 +3006,7 @@
|
||||
|
||||
fyz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -3080,6 +3114,7 @@
|
||||
fy = ZEO
|
||||
fz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -3216,6 +3251,7 @@
|
||||
|
||||
fx = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -3311,6 +3347,7 @@
|
||||
|
||||
fy = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -3395,6 +3432,7 @@
|
||||
|
||||
fz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -3530,6 +3568,7 @@
|
||||
fxz = ZEO
|
||||
fyz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -3802,6 +3841,7 @@
|
||||
|
||||
fxx = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -3883,6 +3923,7 @@
|
||||
|
||||
fyy = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -3967,6 +4008,7 @@
|
||||
|
||||
fzz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -4051,6 +4093,7 @@
|
||||
|
||||
fxy = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -4153,6 +4196,7 @@
|
||||
|
||||
fxz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
@@ -4253,6 +4297,7 @@
|
||||
|
||||
fyz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
|
||||
@@ -81,6 +81,7 @@
|
||||
fy = ZEO
|
||||
fz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -179,6 +180,7 @@
|
||||
|
||||
fx = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -262,6 +264,7 @@
|
||||
|
||||
fy = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -342,6 +345,7 @@
|
||||
|
||||
fz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -443,6 +447,7 @@
|
||||
fxz = ZEO
|
||||
fyz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -553,6 +558,7 @@
|
||||
|
||||
fxx = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -627,6 +633,7 @@
|
||||
|
||||
fyy = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -702,6 +709,7 @@
|
||||
|
||||
fzz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -777,6 +785,7 @@
|
||||
|
||||
fxy = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -851,6 +860,7 @@
|
||||
|
||||
fxz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -924,6 +934,7 @@
|
||||
|
||||
fyz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -1019,6 +1030,7 @@
|
||||
fy = ZEO
|
||||
fz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -1134,6 +1146,7 @@
|
||||
|
||||
fx = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -1227,6 +1240,7 @@
|
||||
|
||||
fy = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -1314,6 +1328,7 @@
|
||||
|
||||
fz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -1430,6 +1445,7 @@
|
||||
fxz = ZEO
|
||||
fyz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -1580,6 +1596,7 @@
|
||||
|
||||
fxx = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -1659,6 +1676,7 @@
|
||||
|
||||
fyy = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -1740,6 +1758,7 @@
|
||||
|
||||
fzz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -1821,6 +1840,7 @@
|
||||
|
||||
fxy = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -1903,6 +1923,7 @@
|
||||
|
||||
fxz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -1983,6 +2004,7 @@
|
||||
|
||||
fyz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -2087,6 +2109,7 @@
|
||||
fy = ZEO
|
||||
fz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -2219,6 +2242,7 @@
|
||||
|
||||
fx = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -2321,6 +2345,7 @@
|
||||
|
||||
fy = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -2414,6 +2439,7 @@
|
||||
|
||||
fz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -2544,6 +2570,7 @@
|
||||
fxz = ZEO
|
||||
fyz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -2743,6 +2770,7 @@
|
||||
|
||||
fxx = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -2827,6 +2855,7 @@
|
||||
|
||||
fyy = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -2914,6 +2943,7 @@
|
||||
|
||||
fzz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -3001,6 +3031,7 @@
|
||||
|
||||
fxy = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -3093,6 +3124,7 @@
|
||||
|
||||
fxz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -3183,6 +3215,7 @@
|
||||
|
||||
fyz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -3302,6 +3335,7 @@
|
||||
fy = ZEO
|
||||
fz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -3454,6 +3488,7 @@
|
||||
|
||||
fx = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -3566,6 +3601,7 @@
|
||||
|
||||
fy = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -3667,6 +3703,7 @@
|
||||
|
||||
fz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -3814,6 +3851,7 @@
|
||||
fxz = ZEO
|
||||
fyz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -4098,6 +4136,7 @@
|
||||
|
||||
fxx = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -4191,6 +4230,7 @@
|
||||
|
||||
fyy = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -4287,6 +4327,7 @@
|
||||
|
||||
fzz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -4383,6 +4424,7 @@
|
||||
|
||||
fxy = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -4497,6 +4539,7 @@
|
||||
|
||||
fxz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -4609,6 +4652,7 @@
|
||||
|
||||
fyz = ZEO
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -4679,6 +4723,7 @@ subroutine fderivs_shc(ex,f,fx,fy,fz,crho,sigma,R,SYM1,SYM2,SYM3,Symmetry,Lev,ss
|
||||
#if 0
|
||||
integer :: i,j,k
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -4729,6 +4774,7 @@ subroutine fdderivs_shc(ex,f,fxx,fxy,fxz,fyy,fyz,fzz,crho,sigma,R,SYM1,SYM2,SYM3
|
||||
#if 0
|
||||
integer :: i,j,k
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
|
||||
!~~~~~~>
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k,lgxx,lgyy,lgzz,ldetg,lgupxx,lgupxy,lgupxz,lgupyy,lgupyz,lgupzz,ltrA,lscale)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
@@ -104,6 +105,7 @@
|
||||
|
||||
!~~~~~~>
|
||||
|
||||
!$omp parallel do collapse(2) private(i,j,k,lgxx,lgyy,lgzz,lscale,lgxy,lgxz,lgyz,lgupxx,lgupxy,lgupxz,lgupyy,lgupyz,lgupzz,ltrA)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
|
||||
@@ -6,7 +6,11 @@
|
||||
#include <stdio.h>
|
||||
#include <assert.h>
|
||||
#include <math.h>
|
||||
#ifdef MPI_STUB
|
||||
#include "mpi_stub.h"
|
||||
#else
|
||||
#include <mpi.h>
|
||||
#endif
|
||||
|
||||
#include "util_Table.h"
|
||||
#include "cctk.h"
|
||||
|
||||
@@ -6,7 +6,11 @@
|
||||
#include <stdio.h>
|
||||
#include <assert.h>
|
||||
#include <math.h>
|
||||
#ifdef MPI_STUB
|
||||
#include "mpi_stub.h"
|
||||
#else
|
||||
#include <mpi.h>
|
||||
#endif
|
||||
|
||||
#include "cctk.h"
|
||||
|
||||
|
||||
@@ -65,7 +65,8 @@ real*8,intent(in) :: eps
|
||||
! dx^4
|
||||
|
||||
! note the sign (-1)^r-1, now r=2
|
||||
do k=1,ex(3)
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
|
||||
@@ -159,7 +160,8 @@ integer, parameter :: NO_SYMM=0, OCTANT=2
|
||||
|
||||
call symmetry_bd(3,ex,f,fh,SoA)
|
||||
|
||||
do k=1,ex(3)
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
|
||||
@@ -273,7 +275,8 @@ real*8,intent(in) :: eps
|
||||
! dx^8
|
||||
|
||||
! note the sign (-1)^r-1, now r=4
|
||||
do k=1,ex(3)
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
|
||||
@@ -385,7 +388,8 @@ real*8,intent(in) :: eps
|
||||
! dx^10
|
||||
|
||||
! note the sign (-1)^r-1, now r=5
|
||||
do k=1,ex(3)
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
|
||||
|
||||
@@ -80,7 +80,8 @@ real*8,intent(in) :: eps
|
||||
! dx^4
|
||||
|
||||
! note the sign (-1)^r-1, now r=2
|
||||
do k=1,ex(3)
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
|
||||
@@ -178,7 +179,8 @@ real*8,intent(in) :: eps
|
||||
! dx^4
|
||||
|
||||
! note the sign (-1)^r-1, now r=2
|
||||
do k=1,ex(3)
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
|
||||
@@ -273,7 +275,8 @@ integer, parameter :: NO_SYMM=0, EQ_SYMM=1, OCTANT=2
|
||||
|
||||
call symmetry_stbd(2,ex,f,fh,SoA)
|
||||
|
||||
do k=1,ex(3)
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
|
||||
@@ -369,7 +372,8 @@ integer, parameter :: NO_SYMM=0, EQ_SYMM=1, OCTANT=2
|
||||
|
||||
call symmetry_stbd(3,ex,f,fh,SoA)
|
||||
|
||||
do k=1,ex(3)
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
|
||||
@@ -510,7 +514,8 @@ integer, parameter :: NO_SYMM=0, EQ_SYMM=1, OCTANT=2
|
||||
|
||||
call symmetry_stbd(3,ex,f,fh,SoA)
|
||||
|
||||
do k=1,ex(3)
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
|
||||
@@ -598,7 +603,8 @@ integer, parameter :: NO_SYMM=0, EQ_SYMM=1, OCTANT=2
|
||||
|
||||
call symmetry_stbd(3,ex,f,fh,SoA)
|
||||
|
||||
do k=1,ex(3)
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
|
||||
@@ -694,7 +700,8 @@ real*8,intent(in) :: eps
|
||||
! dx^8
|
||||
|
||||
! note the sign (-1)^r-1, now r=4
|
||||
do k=1,ex(3)
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
|
||||
@@ -794,7 +801,8 @@ integer, parameter :: NO_SYMM=0, EQ_SYMM=1, OCTANT=2
|
||||
|
||||
call symmetry_stbd(4,ex,f,fh,SoA)
|
||||
|
||||
do k=1,ex(3)
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
|
||||
@@ -903,7 +911,8 @@ real*8,intent(in) :: eps
|
||||
! dx^10
|
||||
|
||||
! note the sign (-1)^r-1, now r=5
|
||||
do k=1,ex(3)
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
|
||||
@@ -1006,7 +1015,8 @@ integer, parameter :: NO_SYMM=0, EQ_SYMM=1, OCTANT=2
|
||||
|
||||
call symmetry_stbd(5,ex,f,fh,SoA)
|
||||
|
||||
do k=1,ex(3)
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)
|
||||
do j=1,ex(2)
|
||||
do i=1,ex(1)
|
||||
|
||||
|
||||
@@ -68,7 +68,8 @@ subroutine lopsided(ex,X,Y,Z,f,f_rhs,Sfx,Sfy,Sfz,Symmetry,SoA)
|
||||
|
||||
! upper bound set ex-1 only for efficiency,
|
||||
! the loop body will set ex 0 also
|
||||
do k=1,ex(3)-1
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
! x direction
|
||||
@@ -233,7 +234,8 @@ subroutine lopsided(ex,X,Y,Z,f,f_rhs,Sfx,Sfy,Sfz,Symmetry,SoA)
|
||||
|
||||
! upper bound set ex-1 only for efficiency,
|
||||
! the loop body will set ex 0 also
|
||||
do k=1,ex(3)-1
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
#if 0
|
||||
@@ -558,7 +560,8 @@ subroutine lopsided(ex,X,Y,Z,f,f_rhs,Sfx,Sfy,Sfz,Symmetry,SoA)
|
||||
|
||||
! upper bound set ex-1 only for efficiency,
|
||||
! the loop body will set ex 0 also
|
||||
do k=1,ex(3)-1
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
! x direction
|
||||
@@ -774,7 +777,8 @@ subroutine lopsided(ex,X,Y,Z,f,f_rhs,Sfx,Sfy,Sfz,Symmetry,SoA)
|
||||
|
||||
! upper bound set ex-1 only for efficiency,
|
||||
! the loop body will set ex 0 also
|
||||
do k=1,ex(3)-1
|
||||
!$omp parallel do collapse(2) private(i,j,k)
|
||||
do k=1,ex(3)-1
|
||||
do j=1,ex(2)-1
|
||||
do i=1,ex(1)-1
|
||||
! x direction
|
||||
|
||||
@@ -16,6 +16,12 @@ include makefile.inc
|
||||
.cu.o:
|
||||
$(Cu) $(CUDA_APP_FLAGS) -c $< -o $@ $(CUDA_LIB_PATH)
|
||||
|
||||
TwoPunctures.o: TwoPunctures.C
|
||||
${CXX} $(CXXAPPFLAGS) -qopenmp -c $< -o $@
|
||||
|
||||
TwoPunctureABE.o: TwoPunctureABE.C
|
||||
${CXX} $(CXXAPPFLAGS) -qopenmp -c $< -o $@
|
||||
|
||||
# Input files
|
||||
C++FILES = ABE.o Ansorg.o Block.o misc.o monitor.o Parallel.o MPatch.o var.o\
|
||||
cgh.o bssn_class.o surface_integral.o ShellPatch.o\
|
||||
@@ -89,14 +95,14 @@ $(CUDAFILES): bssn_gpu.h gpu_mem.h gpu_rhsSS_mem.h
|
||||
misc.o : zbesh.o
|
||||
|
||||
# projects
|
||||
ABE: $(C++FILES) $(F90FILES) $(F77FILES) $(AHFDOBJS)
|
||||
$(CLINKER) $(CXXAPPFLAGS) -o $@ $(C++FILES) $(F90FILES) $(F77FILES) $(AHFDOBJS) $(LDLIBS)
|
||||
ABE: $(C++FILES) $(F90FILES) $(F77FILES) $(AHFDOBJS)
|
||||
$(CLINKER) $(CXXAPPFLAGS) -qopenmp -o $@ $(C++FILES) $(F90FILES) $(F77FILES) $(AHFDOBJS) $(LDLIBS)
|
||||
|
||||
ABEGPU: $(C++FILES_GPU) $(F90FILES) $(F77FILES) $(AHFDOBJS) $(CUDAFILES)
|
||||
$(CLINKER) $(CXXAPPFLAGS) -o $@ $(C++FILES_GPU) $(F90FILES) $(F77FILES) $(AHFDOBJS) $(CUDAFILES) $(LDLIBS)
|
||||
|
||||
TwoPunctureABE: $(TwoPunctureFILES)
|
||||
$(CLINKER) $(CXXAPPFLAGS) -o $@ $(TwoPunctureFILES) $(LDLIBS)
|
||||
$(CLINKER) $(CXXAPPFLAGS) -qopenmp -o $@ $(TwoPunctureFILES) $(LDLIBS)
|
||||
|
||||
clean:
|
||||
rm *.o ABE ABEGPU TwoPunctureABE make.log -f
|
||||
|
||||
@@ -6,25 +6,23 @@
|
||||
## Intel oneAPI version with oneMKL (Optimized for performance)
|
||||
filein = -I/usr/include/ -I${MKLROOT}/include
|
||||
|
||||
## Using sequential MKL (OpenMP disabled for better single-threaded performance)
|
||||
## Using OpenMP-threaded MKL for parallel performance
|
||||
## Added -lifcore for Intel Fortran runtime and -limf for Intel math library
|
||||
LDLIBS = -L${MKLROOT}/lib -lmkl_intel_lp64 -lmkl_sequential -lmkl_core -lifcore -limf -lpthread -lm -ldl
|
||||
LDLIBS = -L${MKLROOT}/lib -lmkl_intel_lp64 -lmkl_intel_thread -lmkl_core -liomp5 -lifcore -limf -lpthread -lm -ldl
|
||||
|
||||
## Aggressive optimization flags:
|
||||
## -O3: Maximum optimization
|
||||
## -xHost: Optimize for the host CPU architecture (Intel/AMD compatible)
|
||||
## -fp-model fast=2: Aggressive floating-point optimizations
|
||||
## -fma: Enable fused multiply-add instructions
|
||||
## Note: OpenMP has been disabled (-qopenmp removed) due to performance issues
|
||||
CXXAPPFLAGS = -O3 -xHost -fp-model fast=2 -fma -ipo \
|
||||
-Dfortran3 -Dnewc -I${MKLROOT}/include
|
||||
f90appflags = -O3 -xHost -fp-model fast=2 -fma -ipo \
|
||||
## Aggressive optimization flags + PGO Phase 2 (profile-guided optimization)
|
||||
## -fprofile-instr-use: use collected profile data to guide optimization decisions
|
||||
## (branch prediction, basic block layout, inlining, loop unrolling)
|
||||
PROFDATA = /home/amss/AMSS-NCKU/pgo_profile/default.profdata
|
||||
CXXAPPFLAGS = -O3 -march=native -fp-model fast=2 -fma -ipo -qopenmp \
|
||||
-DMPI_STUB -Dfortran3 -Dnewc -I${MKLROOT}/include
|
||||
f90appflags = -O3 -march=native -fp-model fast=2 -fma -ipo -qopenmp \
|
||||
-align array64byte -fpp -I${MKLROOT}/include
|
||||
f90 = ifx
|
||||
f77 = ifx
|
||||
CXX = icpx
|
||||
CC = icx
|
||||
CLINKER = mpiicpx
|
||||
CLINKER = icpx
|
||||
|
||||
Cu = nvcc
|
||||
CUDA_LIB_PATH = -L/usr/lib/cuda/lib64 -I/usr/include -I/usr/lib/cuda/include
|
||||
|
||||
@@ -14,7 +14,11 @@ using namespace std;
|
||||
#include <string.h>
|
||||
#include <math.h>
|
||||
#endif
|
||||
#ifdef MPI_STUB
|
||||
#include "mpi_stub.h"
|
||||
#else
|
||||
#include <mpi.h>
|
||||
#endif
|
||||
|
||||
#include "misc.h"
|
||||
#include "macrodef.h"
|
||||
|
||||
@@ -24,7 +24,11 @@ using namespace std;
|
||||
#include <complex.h>
|
||||
#endif
|
||||
|
||||
#ifdef MPI_STUB
|
||||
#include "mpi_stub.h"
|
||||
#else
|
||||
#include <mpi.h>
|
||||
#endif
|
||||
|
||||
namespace misc
|
||||
{
|
||||
|
||||
@@ -20,7 +20,11 @@ using namespace std;
|
||||
#endif
|
||||
#include <time.h>
|
||||
|
||||
#ifdef MPI_STUB
|
||||
#include "mpi_stub.h"
|
||||
#else
|
||||
#include <mpi.h>
|
||||
#endif
|
||||
|
||||
class monitor
|
||||
{
|
||||
|
||||
153
AMSS_NCKU_source/mpi_stub.h
Normal file
153
AMSS_NCKU_source/mpi_stub.h
Normal file
@@ -0,0 +1,153 @@
|
||||
#ifndef MPI_STUB_H
|
||||
#define MPI_STUB_H
|
||||
|
||||
/*
|
||||
* MPI Stub Header — single-process shim for AMSS-NCKU ABE solver.
|
||||
* Provides all MPI types, constants, and functions used in the codebase
|
||||
* as no-ops or trivial implementations for nprocs=1, myrank=0.
|
||||
*/
|
||||
|
||||
#include <cstring>
|
||||
#include <cstdlib>
|
||||
#include <cstdio>
|
||||
#include <time.h>
|
||||
|
||||
/* ── Types ─────────────────────────────────────────────────────────── */
|
||||
|
||||
typedef int MPI_Comm;
|
||||
typedef int MPI_Datatype;
|
||||
typedef int MPI_Op;
|
||||
typedef int MPI_Request;
|
||||
typedef int MPI_Group;
|
||||
|
||||
typedef struct MPI_Status {
|
||||
int MPI_SOURCE;
|
||||
int MPI_TAG;
|
||||
int MPI_ERROR;
|
||||
} MPI_Status;
|
||||
|
||||
/* ── Constants ─────────────────────────────────────────────────────── */
|
||||
|
||||
#define MPI_COMM_WORLD 0
|
||||
|
||||
#define MPI_INT 1
|
||||
#define MPI_DOUBLE 2
|
||||
#define MPI_DOUBLE_PRECISION 2
|
||||
#define MPI_DOUBLE_INT 3
|
||||
|
||||
#define MPI_SUM 1
|
||||
#define MPI_MAX 2
|
||||
#define MPI_MAXLOC 3
|
||||
|
||||
#define MPI_STATUS_IGNORE ((MPI_Status *)0)
|
||||
#define MPI_STATUSES_IGNORE ((MPI_Status *)0)
|
||||
|
||||
#define MPI_MAX_PROCESSOR_NAME 256
|
||||
|
||||
/* ── Helper: sizeof for MPI_Datatype ──────────────────────────────── */
|
||||
|
||||
static inline size_t mpi_stub_sizeof(MPI_Datatype type) {
|
||||
switch (type) {
|
||||
case MPI_INT: return sizeof(int);
|
||||
case MPI_DOUBLE: return sizeof(double);
|
||||
case MPI_DOUBLE_INT: return sizeof(double) + sizeof(int);
|
||||
default: return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* ── Init / Finalize ──────────────────────────────────────────────── */
|
||||
|
||||
static inline int MPI_Init(int *, char ***) { return 0; }
|
||||
static inline int MPI_Finalize() { return 0; }
|
||||
|
||||
/* ── Communicator queries ─────────────────────────────────────────── */
|
||||
|
||||
static inline int MPI_Comm_rank(MPI_Comm, int *rank) { *rank = 0; return 0; }
|
||||
static inline int MPI_Comm_size(MPI_Comm, int *size) { *size = 1; return 0; }
|
||||
static inline int MPI_Comm_split(MPI_Comm comm, int, int, MPI_Comm *newcomm) {
|
||||
*newcomm = comm;
|
||||
return 0;
|
||||
}
|
||||
static inline int MPI_Comm_free(MPI_Comm *) { return 0; }
|
||||
|
||||
/* ── Group operations ─────────────────────────────────────────────── */
|
||||
|
||||
static inline int MPI_Comm_group(MPI_Comm, MPI_Group *group) {
|
||||
*group = 0;
|
||||
return 0;
|
||||
}
|
||||
static inline int MPI_Group_translate_ranks(MPI_Group, int n,
|
||||
const int *ranks1, MPI_Group, int *ranks2) {
|
||||
for (int i = 0; i < n; ++i) ranks2[i] = ranks1[i];
|
||||
return 0;
|
||||
}
|
||||
static inline int MPI_Group_free(MPI_Group *) { return 0; }
|
||||
|
||||
/* ── Collective operations ────────────────────────────────────────── */
|
||||
|
||||
static inline int MPI_Allreduce(const void *sendbuf, void *recvbuf,
|
||||
int count, MPI_Datatype datatype, MPI_Op, MPI_Comm) {
|
||||
std::memcpy(recvbuf, sendbuf, count * mpi_stub_sizeof(datatype));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int MPI_Iallreduce(const void *sendbuf, void *recvbuf,
|
||||
int count, MPI_Datatype datatype, MPI_Op, MPI_Comm,
|
||||
MPI_Request *request) {
|
||||
std::memcpy(recvbuf, sendbuf, count * mpi_stub_sizeof(datatype));
|
||||
*request = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int MPI_Bcast(void *, int, MPI_Datatype, int, MPI_Comm) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int MPI_Barrier(MPI_Comm) { return 0; }
|
||||
|
||||
/* ── Point-to-point (never reached with nprocs=1) ─────────────────── */
|
||||
|
||||
static inline int MPI_Send(const void *, int, MPI_Datatype, int, int, MPI_Comm) {
|
||||
return 0;
|
||||
}
|
||||
static inline int MPI_Recv(void *, int, MPI_Datatype, int, int, MPI_Comm, MPI_Status *) {
|
||||
return 0;
|
||||
}
|
||||
static inline int MPI_Isend(const void *, int, MPI_Datatype, int, int, MPI_Comm,
|
||||
MPI_Request *req) {
|
||||
*req = 0;
|
||||
return 0;
|
||||
}
|
||||
static inline int MPI_Irecv(void *, int, MPI_Datatype, int, int, MPI_Comm,
|
||||
MPI_Request *req) {
|
||||
*req = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* ── Completion ───────────────────────────────────────────────────── */
|
||||
|
||||
static inline int MPI_Wait(MPI_Request *, MPI_Status *) { return 0; }
|
||||
static inline int MPI_Waitall(int, MPI_Request *, MPI_Status *) { return 0; }
|
||||
|
||||
/* ── Utility ──────────────────────────────────────────────────────── */
|
||||
|
||||
static inline int MPI_Abort(MPI_Comm, int error_code) {
|
||||
std::fprintf(stderr, "MPI_Abort called with error code %d\n", error_code);
|
||||
std::exit(error_code);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline double MPI_Wtime() {
|
||||
struct timespec ts;
|
||||
clock_gettime(CLOCK_MONOTONIC, &ts);
|
||||
return (double)ts.tv_sec + (double)ts.tv_nsec * 1.0e-9;
|
||||
}
|
||||
|
||||
static inline int MPI_Get_processor_name(char *name, int *resultlen) {
|
||||
const char *stub_name = "localhost";
|
||||
std::strcpy(name, stub_name);
|
||||
*resultlen = (int)std::strlen(stub_name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* MPI_STUB_H */
|
||||
@@ -24,7 +24,11 @@ using namespace std;
|
||||
#include <map.h>
|
||||
#endif
|
||||
|
||||
#ifdef MPI_STUB
|
||||
#include "mpi_stub.h"
|
||||
#else
|
||||
#include <mpi.h>
|
||||
#endif
|
||||
|
||||
namespace parameters
|
||||
{
|
||||
|
||||
@@ -30,7 +30,11 @@ using namespace std;
|
||||
#include <sys/time.h>
|
||||
#include <sys/resource.h>
|
||||
|
||||
#ifdef MPI_STUB
|
||||
#include "mpi_stub.h"
|
||||
#else
|
||||
#include <mpi.h>
|
||||
#endif
|
||||
|
||||
/* Real time */
|
||||
#define TimerSignal SIGALRM
|
||||
|
||||
@@ -109,23 +109,33 @@
|
||||
|
||||
if( RK4 == 0 ) then
|
||||
|
||||
!$omp parallel workshare
|
||||
f1 = f0 + HLF * dT * f_rhs
|
||||
!$omp end parallel workshare
|
||||
|
||||
elseif(RK4 == 1 ) then
|
||||
|
||||
!$omp parallel workshare
|
||||
f_rhs = f_rhs + TWO * f1
|
||||
|
||||
!$omp end parallel workshare
|
||||
!$omp parallel workshare
|
||||
f1 = f0 + HLF * dT * f1
|
||||
!$omp end parallel workshare
|
||||
|
||||
elseif(RK4 == 2 ) then
|
||||
|
||||
!$omp parallel workshare
|
||||
f_rhs = f_rhs + TWO * f1
|
||||
|
||||
!$omp end parallel workshare
|
||||
!$omp parallel workshare
|
||||
f1 = f0 + dT * f1
|
||||
!$omp end parallel workshare
|
||||
|
||||
elseif( RK4 == 3 ) then
|
||||
|
||||
|
||||
!$omp parallel workshare
|
||||
f1 = f0 +F1o6 * dT *(f1 + f_rhs)
|
||||
!$omp end parallel workshare
|
||||
|
||||
else
|
||||
|
||||
@@ -134,7 +144,7 @@
|
||||
|
||||
endif
|
||||
|
||||
return
|
||||
return
|
||||
|
||||
end subroutine rungekutta4_rout
|
||||
!-----------------------------------------------------------------------------
|
||||
@@ -215,15 +225,19 @@
|
||||
|
||||
if( RK4 == 0 ) then
|
||||
|
||||
!$omp parallel workshare
|
||||
f1 = f0 + dT * f_rhs
|
||||
!$omp end parallel workshare
|
||||
|
||||
else
|
||||
|
||||
!$omp parallel workshare
|
||||
f1 = f0 + HLF * dT * (f1+f_rhs)
|
||||
!$omp end parallel workshare
|
||||
|
||||
endif
|
||||
|
||||
return
|
||||
return
|
||||
|
||||
end subroutine icn_rout
|
||||
!~~~~~~~~~~~~~~~~~~
|
||||
@@ -239,8 +253,10 @@
|
||||
real*8, dimension(ex(1),ex(2),ex(3)),intent(in) ::f_rhs
|
||||
real*8, dimension(ex(1),ex(2),ex(3)),intent(out) ::f1
|
||||
|
||||
!$omp parallel workshare
|
||||
f1 = f0 + dT * f_rhs
|
||||
!$omp end parallel workshare
|
||||
|
||||
return
|
||||
return
|
||||
|
||||
end subroutine euler_rout
|
||||
|
||||
@@ -19,7 +19,11 @@ using namespace std;
|
||||
#include <math.h>
|
||||
#endif
|
||||
|
||||
#ifdef MPI_STUB
|
||||
#include "mpi_stub.h"
|
||||
#else
|
||||
#include <mpi.h>
|
||||
#endif
|
||||
|
||||
#include "cgh.h"
|
||||
#include "ShellPatch.h"
|
||||
|
||||
@@ -18,7 +18,11 @@ using namespace std;
|
||||
#include <math.h>
|
||||
#endif
|
||||
|
||||
#ifdef MPI_STUB
|
||||
#include "mpi_stub.h"
|
||||
#else
|
||||
#include <mpi.h>
|
||||
#endif
|
||||
|
||||
#include "misc.h"
|
||||
#include "microdef.h"
|
||||
|
||||
@@ -3,7 +3,11 @@
|
||||
#include <math.h>
|
||||
#include <string.h>
|
||||
|
||||
#ifdef MPI_STUB
|
||||
#include "mpi_stub.h"
|
||||
#else
|
||||
#include <mpi.h>
|
||||
#endif
|
||||
|
||||
#include "util_Table.h"
|
||||
#include "cctk.h"
|
||||
|
||||
@@ -20,7 +20,11 @@ using namespace std;
|
||||
#include <math.h>
|
||||
#include <map.h>
|
||||
#endif
|
||||
#ifdef MPI_STUB
|
||||
#include "mpi_stub.h"
|
||||
#else
|
||||
#include <mpi.h>
|
||||
#endif
|
||||
|
||||
#include "misc.h"
|
||||
#include "cgh.h"
|
||||
@@ -220,16 +224,9 @@ void surface_integral::surf_Wave(double rex, int lev, cgh *GH, var *Rpsi4, var *
|
||||
pox[2][n] = rex * nz_g[n];
|
||||
}
|
||||
|
||||
double *shellf;
|
||||
shellf = new double[n_tot * InList];
|
||||
|
||||
GH->PatL[lev]->data->Interp_Points(DG_List, n_tot, pox, shellf, Symmetry);
|
||||
|
||||
int mp, Lp, Nmin, Nmax;
|
||||
|
||||
mp = n_tot / cpusize;
|
||||
Lp = n_tot - cpusize * mp;
|
||||
|
||||
if (Lp > myrank)
|
||||
{
|
||||
Nmin = myrank * mp + myrank;
|
||||
@@ -241,6 +238,11 @@ void surface_integral::surf_Wave(double rex, int lev, cgh *GH, var *Rpsi4, var *
|
||||
Nmax = Nmin + mp - 1;
|
||||
}
|
||||
|
||||
double *shellf;
|
||||
shellf = new double[n_tot * InList];
|
||||
|
||||
GH->PatL[lev]->data->Interp_Points(DG_List, n_tot, pox, shellf, Symmetry, Nmin, Nmax);
|
||||
|
||||
//|~~~~~> Integrate the dot product of Dphi with the surface normal.
|
||||
|
||||
double *RP_out, *IP_out;
|
||||
@@ -363,8 +365,17 @@ void surface_integral::surf_Wave(double rex, int lev, cgh *GH, var *Rpsi4, var *
|
||||
}
|
||||
//|------+ Communicate and sum the results from each processor.
|
||||
|
||||
MPI_Allreduce(RP_out, RP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
MPI_Allreduce(IP_out, IP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
{
|
||||
double *RPIP_out = new double[2 * NN];
|
||||
double *RPIP = new double[2 * NN];
|
||||
memcpy(RPIP_out, RP_out, NN * sizeof(double));
|
||||
memcpy(RPIP_out + NN, IP_out, NN * sizeof(double));
|
||||
MPI_Allreduce(RPIP_out, RPIP, 2 * NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
memcpy(RP, RPIP, NN * sizeof(double));
|
||||
memcpy(IP, RPIP + NN, NN * sizeof(double));
|
||||
delete[] RPIP_out;
|
||||
delete[] RPIP;
|
||||
}
|
||||
|
||||
//|------= Free memory.
|
||||
|
||||
@@ -556,8 +567,17 @@ void surface_integral::surf_Wave(double rex, int lev, cgh *GH, var *Rpsi4, var *
|
||||
}
|
||||
//|------+ Communicate and sum the results from each processor.
|
||||
|
||||
MPI_Allreduce(RP_out, RP, NN, MPI_DOUBLE, MPI_SUM, Comm_here);
|
||||
MPI_Allreduce(IP_out, IP, NN, MPI_DOUBLE, MPI_SUM, Comm_here);
|
||||
{
|
||||
double *RPIP_out = new double[2 * NN];
|
||||
double *RPIP = new double[2 * NN];
|
||||
memcpy(RPIP_out, RP_out, NN * sizeof(double));
|
||||
memcpy(RPIP_out + NN, IP_out, NN * sizeof(double));
|
||||
MPI_Allreduce(RPIP_out, RPIP, 2 * NN, MPI_DOUBLE, MPI_SUM, Comm_here);
|
||||
memcpy(RP, RPIP, NN * sizeof(double));
|
||||
memcpy(IP, RPIP + NN, NN * sizeof(double));
|
||||
delete[] RPIP_out;
|
||||
delete[] RPIP;
|
||||
}
|
||||
|
||||
//|------= Free memory.
|
||||
|
||||
@@ -735,8 +755,17 @@ void surface_integral::surf_Wave(double rex, int lev, ShellPatch *GH, var *Rpsi4
|
||||
}
|
||||
//|------+ Communicate and sum the results from each processor.
|
||||
|
||||
MPI_Allreduce(RP_out, RP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
MPI_Allreduce(IP_out, IP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
{
|
||||
double *RPIP_out = new double[2 * NN];
|
||||
double *RPIP = new double[2 * NN];
|
||||
memcpy(RPIP_out, RP_out, NN * sizeof(double));
|
||||
memcpy(RPIP_out + NN, IP_out, NN * sizeof(double));
|
||||
MPI_Allreduce(RPIP_out, RPIP, 2 * NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
memcpy(RP, RPIP, NN * sizeof(double));
|
||||
memcpy(IP, RPIP + NN, NN * sizeof(double));
|
||||
delete[] RPIP_out;
|
||||
delete[] RPIP;
|
||||
}
|
||||
|
||||
//|------= Free memory.
|
||||
|
||||
@@ -984,8 +1013,17 @@ void surface_integral::surf_Wave(double rex, int lev, ShellPatch *GH,
|
||||
}
|
||||
//|------+ Communicate and sum the results from each processor.
|
||||
|
||||
MPI_Allreduce(RP_out, RP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
MPI_Allreduce(IP_out, IP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
{
|
||||
double *RPIP_out = new double[2 * NN];
|
||||
double *RPIP = new double[2 * NN];
|
||||
memcpy(RPIP_out, RP_out, NN * sizeof(double));
|
||||
memcpy(RPIP_out + NN, IP_out, NN * sizeof(double));
|
||||
MPI_Allreduce(RPIP_out, RPIP, 2 * NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
memcpy(RP, RPIP, NN * sizeof(double));
|
||||
memcpy(IP, RPIP + NN, NN * sizeof(double));
|
||||
delete[] RPIP_out;
|
||||
delete[] RPIP;
|
||||
}
|
||||
|
||||
//|------= Free memory.
|
||||
|
||||
@@ -1419,8 +1457,17 @@ void surface_integral::surf_Wave(double rex, int lev, ShellPatch *GH,
|
||||
}
|
||||
//|------+ Communicate and sum the results from each processor.
|
||||
|
||||
MPI_Allreduce(RP_out, RP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
MPI_Allreduce(IP_out, IP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
{
|
||||
double *RPIP_out = new double[2 * NN];
|
||||
double *RPIP = new double[2 * NN];
|
||||
memcpy(RPIP_out, RP_out, NN * sizeof(double));
|
||||
memcpy(RPIP_out + NN, IP_out, NN * sizeof(double));
|
||||
MPI_Allreduce(RPIP_out, RPIP, 2 * NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
memcpy(RP, RPIP, NN * sizeof(double));
|
||||
memcpy(IP, RPIP + NN, NN * sizeof(double));
|
||||
delete[] RPIP_out;
|
||||
delete[] RPIP;
|
||||
}
|
||||
|
||||
//|------= Free memory.
|
||||
|
||||
@@ -1854,8 +1901,17 @@ void surface_integral::surf_Wave(double rex, int lev, cgh *GH,
|
||||
}
|
||||
//|------+ Communicate and sum the results from each processor.
|
||||
|
||||
MPI_Allreduce(RP_out, RP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
MPI_Allreduce(IP_out, IP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
{
|
||||
double *RPIP_out = new double[2 * NN];
|
||||
double *RPIP = new double[2 * NN];
|
||||
memcpy(RPIP_out, RP_out, NN * sizeof(double));
|
||||
memcpy(RPIP_out + NN, IP_out, NN * sizeof(double));
|
||||
MPI_Allreduce(RPIP_out, RPIP, 2 * NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
memcpy(RP, RPIP, NN * sizeof(double));
|
||||
memcpy(IP, RPIP + NN, NN * sizeof(double));
|
||||
delete[] RPIP_out;
|
||||
delete[] RPIP;
|
||||
}
|
||||
|
||||
//|------= Free memory.
|
||||
|
||||
@@ -2040,8 +2096,17 @@ void surface_integral::surf_Wave(double rex, int lev, NullShellPatch2 *GH, var *
|
||||
}
|
||||
//|------+ Communicate and sum the results from each processor.
|
||||
|
||||
MPI_Allreduce(RP_out, RP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
MPI_Allreduce(IP_out, IP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
{
|
||||
double *RPIP_out = new double[2 * NN];
|
||||
double *RPIP = new double[2 * NN];
|
||||
memcpy(RPIP_out, RP_out, NN * sizeof(double));
|
||||
memcpy(RPIP_out + NN, IP_out, NN * sizeof(double));
|
||||
MPI_Allreduce(RPIP_out, RPIP, 2 * NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
memcpy(RP, RPIP, NN * sizeof(double));
|
||||
memcpy(IP, RPIP + NN, NN * sizeof(double));
|
||||
delete[] RPIP_out;
|
||||
delete[] RPIP;
|
||||
}
|
||||
|
||||
//|------= Free memory.
|
||||
|
||||
@@ -2226,8 +2291,17 @@ void surface_integral::surf_Wave(double rex, int lev, NullShellPatch *GH, var *R
|
||||
}
|
||||
//|------+ Communicate and sum the results from each processor.
|
||||
|
||||
MPI_Allreduce(RP_out, RP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
MPI_Allreduce(IP_out, IP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
{
|
||||
double *RPIP_out = new double[2 * NN];
|
||||
double *RPIP = new double[2 * NN];
|
||||
memcpy(RPIP_out, RP_out, NN * sizeof(double));
|
||||
memcpy(RPIP_out + NN, IP_out, NN * sizeof(double));
|
||||
MPI_Allreduce(RPIP_out, RPIP, 2 * NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
memcpy(RP, RPIP, NN * sizeof(double));
|
||||
memcpy(IP, RPIP + NN, NN * sizeof(double));
|
||||
delete[] RPIP_out;
|
||||
delete[] RPIP;
|
||||
}
|
||||
|
||||
//|------= Free memory.
|
||||
|
||||
@@ -2314,25 +2388,9 @@ void surface_integral::surf_MassPAng(double rex, int lev, cgh *GH, var *chi, var
|
||||
pox[2][n] = rex * nz_g[n];
|
||||
}
|
||||
|
||||
double *shellf;
|
||||
shellf = new double[n_tot * InList];
|
||||
|
||||
// we have assumed there is only one box on this level,
|
||||
// so we do not need loop boxes
|
||||
GH->PatL[lev]->data->Interp_Points(DG_List, n_tot, pox, shellf, Symmetry);
|
||||
|
||||
double Mass_out = 0;
|
||||
double ang_outx, ang_outy, ang_outz;
|
||||
double p_outx, p_outy, p_outz;
|
||||
ang_outx = ang_outy = ang_outz = 0.0;
|
||||
p_outx = p_outy = p_outz = 0.0;
|
||||
const double f1o8 = 0.125;
|
||||
|
||||
int mp, Lp, Nmin, Nmax;
|
||||
|
||||
mp = n_tot / cpusize;
|
||||
Lp = n_tot - cpusize * mp;
|
||||
|
||||
if (Lp > myrank)
|
||||
{
|
||||
Nmin = myrank * mp + myrank;
|
||||
@@ -2344,6 +2402,20 @@ void surface_integral::surf_MassPAng(double rex, int lev, cgh *GH, var *chi, var
|
||||
Nmax = Nmin + mp - 1;
|
||||
}
|
||||
|
||||
double *shellf;
|
||||
shellf = new double[n_tot * InList];
|
||||
|
||||
// we have assumed there is only one box on this level,
|
||||
// so we do not need loop boxes
|
||||
GH->PatL[lev]->data->Interp_Points(DG_List, n_tot, pox, shellf, Symmetry, Nmin, Nmax);
|
||||
|
||||
double Mass_out = 0;
|
||||
double ang_outx, ang_outy, ang_outz;
|
||||
double p_outx, p_outy, p_outz;
|
||||
ang_outx = ang_outy = ang_outz = 0.0;
|
||||
p_outx = p_outy = p_outz = 0.0;
|
||||
const double f1o8 = 0.125;
|
||||
|
||||
double Chi, Psi;
|
||||
double Gxx, Gxy, Gxz, Gyy, Gyz, Gzz;
|
||||
double gupxx, gupxy, gupxz, gupyy, gupyz, gupzz;
|
||||
@@ -2464,15 +2536,13 @@ void surface_integral::surf_MassPAng(double rex, int lev, cgh *GH, var *chi, var
|
||||
}
|
||||
}
|
||||
|
||||
MPI_Allreduce(&Mass_out, &mass, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
|
||||
MPI_Allreduce(&ang_outx, &sx, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
MPI_Allreduce(&ang_outy, &sy, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
MPI_Allreduce(&ang_outz, &sz, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
|
||||
MPI_Allreduce(&p_outx, &px, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
MPI_Allreduce(&p_outy, &py, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
MPI_Allreduce(&p_outz, &pz, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
{
|
||||
double scalar_out[7] = {Mass_out, ang_outx, ang_outy, ang_outz, p_outx, p_outy, p_outz};
|
||||
double scalar_in[7];
|
||||
MPI_Allreduce(scalar_out, scalar_in, 7, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
mass = scalar_in[0]; sx = scalar_in[1]; sy = scalar_in[2]; sz = scalar_in[3];
|
||||
px = scalar_in[4]; py = scalar_in[5]; pz = scalar_in[6];
|
||||
}
|
||||
|
||||
#ifdef GaussInt
|
||||
mass = mass * rex * rex * dphi * factor;
|
||||
@@ -2735,15 +2805,13 @@ void surface_integral::surf_MassPAng(double rex, int lev, cgh *GH, var *chi, var
|
||||
}
|
||||
}
|
||||
|
||||
MPI_Allreduce(&Mass_out, &mass, 1, MPI_DOUBLE, MPI_SUM, Comm_here);
|
||||
|
||||
MPI_Allreduce(&ang_outx, &sx, 1, MPI_DOUBLE, MPI_SUM, Comm_here);
|
||||
MPI_Allreduce(&ang_outy, &sy, 1, MPI_DOUBLE, MPI_SUM, Comm_here);
|
||||
MPI_Allreduce(&ang_outz, &sz, 1, MPI_DOUBLE, MPI_SUM, Comm_here);
|
||||
|
||||
MPI_Allreduce(&p_outx, &px, 1, MPI_DOUBLE, MPI_SUM, Comm_here);
|
||||
MPI_Allreduce(&p_outy, &py, 1, MPI_DOUBLE, MPI_SUM, Comm_here);
|
||||
MPI_Allreduce(&p_outz, &pz, 1, MPI_DOUBLE, MPI_SUM, Comm_here);
|
||||
{
|
||||
double scalar_out[7] = {Mass_out, ang_outx, ang_outy, ang_outz, p_outx, p_outy, p_outz};
|
||||
double scalar_in[7];
|
||||
MPI_Allreduce(scalar_out, scalar_in, 7, MPI_DOUBLE, MPI_SUM, Comm_here);
|
||||
mass = scalar_in[0]; sx = scalar_in[1]; sy = scalar_in[2]; sz = scalar_in[3];
|
||||
px = scalar_in[4]; py = scalar_in[5]; pz = scalar_in[6];
|
||||
}
|
||||
|
||||
#ifdef GaussInt
|
||||
mass = mass * rex * rex * dphi * factor;
|
||||
@@ -3020,15 +3088,13 @@ void surface_integral::surf_MassPAng(double rex, int lev, ShellPatch *GH, var *c
|
||||
}
|
||||
}
|
||||
|
||||
MPI_Allreduce(&Mass_out, &mass, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
|
||||
MPI_Allreduce(&ang_outx, &sx, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
MPI_Allreduce(&ang_outy, &sy, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
MPI_Allreduce(&ang_outz, &sz, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
|
||||
MPI_Allreduce(&p_outx, &px, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
MPI_Allreduce(&p_outy, &py, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
MPI_Allreduce(&p_outz, &pz, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
{
|
||||
double scalar_out[7] = {Mass_out, ang_outx, ang_outy, ang_outz, p_outx, p_outy, p_outz};
|
||||
double scalar_in[7];
|
||||
MPI_Allreduce(scalar_out, scalar_in, 7, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
mass = scalar_in[0]; sx = scalar_in[1]; sy = scalar_in[2]; sz = scalar_in[3];
|
||||
px = scalar_in[4]; py = scalar_in[5]; pz = scalar_in[6];
|
||||
}
|
||||
|
||||
#ifdef GaussInt
|
||||
mass = mass * rex * rex * dphi * factor;
|
||||
@@ -3607,8 +3673,17 @@ void surface_integral::surf_Wave(double rex, cgh *GH, ShellPatch *SH,
|
||||
}
|
||||
//|------+ Communicate and sum the results from each processor.
|
||||
|
||||
MPI_Allreduce(RP_out, RP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
MPI_Allreduce(IP_out, IP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
{
|
||||
double *RPIP_out = new double[2 * NN];
|
||||
double *RPIP = new double[2 * NN];
|
||||
memcpy(RPIP_out, RP_out, NN * sizeof(double));
|
||||
memcpy(RPIP_out + NN, IP_out, NN * sizeof(double));
|
||||
MPI_Allreduce(RPIP_out, RPIP, 2 * NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||
memcpy(RP, RPIP, NN * sizeof(double));
|
||||
memcpy(IP, RPIP + NN, NN * sizeof(double));
|
||||
delete[] RPIP_out;
|
||||
delete[] RPIP;
|
||||
}
|
||||
|
||||
//|------= Free memory.
|
||||
|
||||
|
||||
@@ -18,7 +18,11 @@ using namespace std;
|
||||
#include <math.h>
|
||||
#endif
|
||||
|
||||
#ifdef MPI_STUB
|
||||
#include "mpi_stub.h"
|
||||
#else
|
||||
#include <mpi.h>
|
||||
#endif
|
||||
|
||||
#include "misc.h"
|
||||
#include "macrodef.h"
|
||||
|
||||
@@ -20,7 +20,11 @@ using namespace std;
|
||||
#include <map.h>
|
||||
#endif
|
||||
|
||||
#ifdef MPI_STUB
|
||||
#include "mpi_stub.h"
|
||||
#else
|
||||
#include <mpi.h>
|
||||
#endif
|
||||
|
||||
#include "misc.h"
|
||||
#include "macrodef.h"
|
||||
|
||||
@@ -9,7 +9,11 @@
|
||||
using namespace std;
|
||||
|
||||
#include <time.h>
|
||||
#ifdef MPI_STUB
|
||||
#include "mpi_stub.h"
|
||||
#else
|
||||
#include <mpi.h>
|
||||
#endif
|
||||
|
||||
#include "var.h"
|
||||
|
||||
|
||||
@@ -10,17 +10,31 @@
|
||||
|
||||
import AMSS_NCKU_Input as input_data
|
||||
import subprocess
|
||||
import time
|
||||
import os
|
||||
|
||||
## OpenMP configuration for threaded Fortran kernels
|
||||
## OMP_NUM_THREADS: set to number of physical cores (not hyperthreads)
|
||||
## OMP_PROC_BIND: bind threads to cores to avoid migration overhead
|
||||
## OMP_STACKSIZE: each thread needs stack space for fh arrays (~3.6MB)
|
||||
if "OMP_NUM_THREADS" not in os.environ:
|
||||
os.environ["OMP_NUM_THREADS"] = "96"
|
||||
os.environ["OMP_STACKSIZE"] = "16M"
|
||||
os.environ["OMP_PROC_BIND"] = "close"
|
||||
os.environ["OMP_PLACES"] = "cores"
|
||||
## CPU core binding configuration using taskset
|
||||
## taskset ensures all child processes inherit the CPU affinity mask
|
||||
## This forces make and all compiler processes to use only nohz_full cores (4-55, 60-111)
|
||||
## Format: taskset -c 4-55,60-111 ensures processes only run on these cores
|
||||
NUMACTL_CPU_BIND = "taskset -c 0-111"
|
||||
#NUMACTL_CPU_BIND = "taskset -c 0-111"
|
||||
#NUMACTL_CPU_BIND = "taskset -c 16-47,64-95"
|
||||
#NUMACTL_CPU_BIND = "taskset -c 8-15"
|
||||
NUMACTL_CPU_BIND = ""
|
||||
|
||||
## Build parallelism configuration
|
||||
## Use nohz_full cores (4-55, 60-111) for compilation: 52 + 52 = 104 cores
|
||||
## Set make -j to utilize available cores for faster builds
|
||||
BUILD_JOBS = 104
|
||||
BUILD_JOBS = 16
|
||||
|
||||
|
||||
##################################################################
|
||||
@@ -114,28 +128,28 @@ def run_ABE():
|
||||
print( )
|
||||
|
||||
## Define the command to run; cast other values to strings as needed
|
||||
|
||||
|
||||
if (input_data.GPU_Calculation == "no"):
|
||||
mpi_command = NUMACTL_CPU_BIND + " mpirun -np " + str(input_data.MPI_processes) + " ./ABE"
|
||||
mpi_command_outfile = "ABE_out.log"
|
||||
run_command = NUMACTL_CPU_BIND + " ./ABE"
|
||||
run_command_outfile = "ABE_out.log"
|
||||
elif (input_data.GPU_Calculation == "yes"):
|
||||
mpi_command = NUMACTL_CPU_BIND + " mpirun -np " + str(input_data.MPI_processes) + " ./ABEGPU"
|
||||
mpi_command_outfile = "ABEGPU_out.log"
|
||||
|
||||
## Execute the MPI command and stream output
|
||||
mpi_process = subprocess.Popen(mpi_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)
|
||||
run_command = NUMACTL_CPU_BIND + " ./ABEGPU"
|
||||
run_command_outfile = "ABEGPU_out.log"
|
||||
|
||||
## Execute the command and stream output
|
||||
run_process = subprocess.Popen(run_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)
|
||||
|
||||
## Write ABE run output to file while printing to stdout
|
||||
with open(mpi_command_outfile, 'w') as file0:
|
||||
with open(run_command_outfile, 'w') as file0:
|
||||
## Read and print output lines; also write each line to file
|
||||
for line in mpi_process.stdout:
|
||||
for line in run_process.stdout:
|
||||
print(line, end='') # stream output in real time
|
||||
file0.write(line) # write the line to file
|
||||
file0.flush() # flush to ensure each line is written immediately (optional)
|
||||
file0.close()
|
||||
|
||||
## Wait for the process to finish
|
||||
mpi_return_code = mpi_process.wait()
|
||||
run_return_code = run_process.wait()
|
||||
|
||||
print( )
|
||||
print( " The ABE/ABEGPU simulation is finished " )
|
||||
@@ -152,13 +166,14 @@ def run_ABE():
|
||||
## Run the AMSS-NCKU TwoPuncture program TwoPunctureABE
|
||||
|
||||
def run_TwoPunctureABE():
|
||||
|
||||
tp_time1=time.time()
|
||||
print( )
|
||||
print( " Running the AMSS-NCKU executable file TwoPunctureABE " )
|
||||
print( )
|
||||
|
||||
## Define the command to run
|
||||
TwoPuncture_command = NUMACTL_CPU_BIND + " ./TwoPunctureABE"
|
||||
#TwoPuncture_command = NUMACTL_CPU_BIND + " ./TwoPunctureABE"
|
||||
TwoPuncture_command = " ./TwoPunctureABE"
|
||||
TwoPuncture_command_outfile = "TwoPunctureABE_out.log"
|
||||
|
||||
## Execute the command with subprocess.Popen and stream output
|
||||
@@ -179,7 +194,9 @@ def run_TwoPunctureABE():
|
||||
print( )
|
||||
print( " The TwoPunctureABE simulation is finished " )
|
||||
print( )
|
||||
|
||||
tp_time2=time.time()
|
||||
et=tp_time2-tp_time1
|
||||
print(f"Used time: {et}")
|
||||
return
|
||||
|
||||
##################################################################
|
||||
|
||||
97
pgo_profile/PGO_Profile_Analysis.md
Normal file
97
pgo_profile/PGO_Profile_Analysis.md
Normal file
@@ -0,0 +1,97 @@
|
||||
# AMSS-NCKU PGO Profile Analysis Report
|
||||
|
||||
## 1. Profiling Environment
|
||||
|
||||
| Item | Value |
|
||||
|------|-------|
|
||||
| Compiler | Intel oneAPI DPC++/C++ 2025.3.0 (icpx/ifx) |
|
||||
| Instrumentation Flag | `-fprofile-instr-generate` |
|
||||
| Optimization Level (instrumented) | `-O2 -xHost -fma` |
|
||||
| MPI Processes | 1 (single process to avoid MPI+instrumentation deadlock) |
|
||||
| Profile File | `default_9725750769337483397_0.profraw` (327 KB) |
|
||||
| Merged Profile | `default.profdata` (394 KB) |
|
||||
| llvm-profdata | `/home/intel/oneapi/compiler/2025.3/bin/compiler/llvm-profdata` |
|
||||
|
||||
## 2. Reduced Simulation Parameters (for profiling run)
|
||||
|
||||
| Parameter | Production Value | Profiling Value |
|
||||
|-----------|-----------------|-----------------|
|
||||
| MPI_processes | 64 | 1 |
|
||||
| grid_level | 9 | 4 |
|
||||
| static_grid_level | 5 | 3 |
|
||||
| static_grid_number | 96 | 24 |
|
||||
| moving_grid_number | 48 | 16 |
|
||||
| largest_box_xyz_max | 320^3 | 160^3 |
|
||||
| Final_Evolution_Time | 1000.0 | 10.0 |
|
||||
| Evolution_Step_Number | 10,000,000 | 1,000 |
|
||||
| Detector_Number | 12 | 2 |
|
||||
|
||||
## 3. Profile Summary
|
||||
|
||||
| Metric | Value |
|
||||
|--------|-------|
|
||||
| Total instrumented functions | 1,392 |
|
||||
| Functions with non-zero counts | 117 (8.4%) |
|
||||
| Functions with zero counts | 1,275 (91.6%) |
|
||||
| Maximum function entry count | 386,459,248 |
|
||||
| Maximum internal block count | 370,477,680 |
|
||||
| Total block count | 4,198,023,118 |
|
||||
|
||||
## 4. Top 20 Hotspot Functions
|
||||
|
||||
| Rank | Total Count | Max Block Count | Function | Category |
|
||||
|------|------------|-----------------|----------|----------|
|
||||
| 1 | 1,241,601,732 | 370,477,680 | `polint_` | Interpolation |
|
||||
| 2 | 755,994,435 | 230,156,640 | `prolong3_` | Grid prolongation |
|
||||
| 3 | 667,964,095 | 3,697,792 | `compute_rhs_bssn_` | BSSN RHS evolution |
|
||||
| 4 | 539,736,051 | 386,459,248 | `symmetry_bd_` | Symmetry boundary |
|
||||
| 5 | 277,310,808 | 53,170,728 | `lopsided_` | Lopsided FD stencil |
|
||||
| 6 | 155,534,488 | 94,535,040 | `decide3d_` | 3D grid decision |
|
||||
| 7 | 119,267,712 | 19,266,048 | `rungekutta4_rout_` | RK4 time integrator |
|
||||
| 8 | 91,574,616 | 48,824,160 | `kodis_` | Kreiss-Oliger dissipation |
|
||||
| 9 | 67,555,389 | 43,243,680 | `fderivs_` | Finite differences |
|
||||
| 10 | 55,296,000 | 42,246,144 | `misc::fact(int)` | Factorial utility |
|
||||
| 11 | 43,191,071 | 27,663,328 | `fdderivs_` | 2nd-order FD derivatives |
|
||||
| 12 | 36,233,965 | 22,429,440 | `restrict3_` | Grid restriction |
|
||||
| 13 | 24,698,512 | 17,231,520 | `polin3_` | Polynomial interpolation |
|
||||
| 14 | 22,962,942 | 20,968,768 | `copy_` | Data copy |
|
||||
| 15 | 20,135,696 | 17,259,168 | `Ansorg::barycentric(...)` | Spectral interpolation |
|
||||
| 16 | 14,650,224 | 7,224,768 | `Ansorg::barycentric_omega(...)` | Spectral weights |
|
||||
| 17 | 13,242,296 | 2,871,920 | `global_interp_` | Global interpolation |
|
||||
| 18 | 12,672,000 | 7,734,528 | `sommerfeld_rout_` | Sommerfeld boundary |
|
||||
| 19 | 6,872,832 | 1,880,064 | `sommerfeld_routbam_` | Sommerfeld boundary (BAM) |
|
||||
| 20 | 5,709,900 | 2,809,632 | `l2normhelper_` | L2 norm computation |
|
||||
|
||||
## 5. Hotspot Category Breakdown
|
||||
|
||||
Top 20 functions account for ~98% of total execution counts:
|
||||
|
||||
| Category | Functions | Combined Count | Share |
|
||||
|----------|-----------|---------------|-------|
|
||||
| Interpolation / Prolongation / Restriction | polint_, prolong3_, restrict3_, polin3_, global_interp_, Ansorg::* | ~2,093M | ~50% |
|
||||
| BSSN RHS + FD stencils | compute_rhs_bssn_, lopsided_, fderivs_, fdderivs_ | ~1,056M | ~25% |
|
||||
| Boundary conditions | symmetry_bd_, sommerfeld_rout_, sommerfeld_routbam_ | ~559M | ~13% |
|
||||
| Time integration | rungekutta4_rout_ | ~119M | ~3% |
|
||||
| Dissipation | kodis_ | ~92M | ~2% |
|
||||
| Utilities | misc::fact, decide3d_, copy_, l2normhelper_ | ~256M | ~6% |
|
||||
|
||||
## 6. Conclusions
|
||||
|
||||
1. **Profile data is valid**: 1,392 functions instrumented, 117 exercised with ~4.2 billion total counts.
|
||||
2. **Hotspot concentration is high**: Top 5 functions alone account for ~76% of all counts, which is ideal for PGO — the compiler has strong branch/layout optimization targets.
|
||||
3. **Fortran numerical kernels dominate**: `polint_`, `prolong3_`, `compute_rhs_bssn_`, `symmetry_bd_`, `lopsided_` are all Fortran routines in the inner evolution loop. PGO will optimize their branch prediction and basic block layout.
|
||||
4. **91.6% of functions have zero counts**: These are code paths for unused features (GPU, BSSN-EScalar, BSSN-EM, Z4C, etc.). PGO will deprioritize them, improving instruction cache utilization.
|
||||
5. **Profile is representative**: Despite the reduced grid size, the code path coverage matches production — the same kernels (RHS, prolongation, restriction, boundary) are exercised. PGO branch probabilities from this profile will transfer well to full-scale runs.
|
||||
|
||||
## 7. PGO Phase 2 Usage
|
||||
|
||||
To apply the profile, use the following flags in `makefile.inc`:
|
||||
|
||||
```makefile
|
||||
CXXAPPFLAGS = -O3 -xHost -fp-model fast=2 -fma -ipo \
|
||||
-fprofile-instr-use=/home/amss/AMSS-NCKU/pgo_profile/default.profdata \
|
||||
-Dfortran3 -Dnewc -I${MKLROOT}/include
|
||||
f90appflags = -O3 -xHost -fp-model fast=2 -fma -ipo \
|
||||
-fprofile-instr-use=/home/amss/AMSS-NCKU/pgo_profile/default.profdata \
|
||||
-align array64byte -fpp -I${MKLROOT}/include
|
||||
```
|
||||
BIN
pgo_profile/default.profdata
Normal file
BIN
pgo_profile/default.profdata
Normal file
Binary file not shown.
BIN
pgo_profile/default_9725750769337483397_0.profraw
Normal file
BIN
pgo_profile/default_9725750769337483397_0.profraw
Normal file
Binary file not shown.
Reference in New Issue
Block a user