Compare commits
7 Commits
chb-new
...
chb-copilo
| Author | SHA1 | Date | |
|---|---|---|---|
|
8b68b5d782
|
|||
|
dd2443c926
|
|||
|
2d7ba5c60c
|
|||
|
4777cad4ed
|
|||
|
afd4006da2
|
|||
|
|
a918dc103e
|
||
|
|
38c2c30186
|
@@ -341,9 +341,8 @@ void Patch::Interp_Points(MyList<var> *VarList,
|
|||||||
double *Shellf, int Symmetry)
|
double *Shellf, int Symmetry)
|
||||||
{
|
{
|
||||||
// NOTE: we do not Synchnize variables here, make sure of that before calling this routine
|
// NOTE: we do not Synchnize variables here, make sure of that before calling this routine
|
||||||
int myrank, nprocs;
|
int myrank;
|
||||||
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
|
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
|
||||||
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
|
|
||||||
|
|
||||||
int ordn = 2 * ghost_width;
|
int ordn = 2 * ghost_width;
|
||||||
MyList<var> *varl;
|
MyList<var> *varl;
|
||||||
@@ -355,18 +354,24 @@ void Patch::Interp_Points(MyList<var> *VarList,
|
|||||||
varl = varl->next;
|
varl = varl->next;
|
||||||
}
|
}
|
||||||
|
|
||||||
memset(Shellf, 0, sizeof(double) * NN * num_var);
|
double *shellf;
|
||||||
|
shellf = new double[NN * num_var];
|
||||||
|
memset(shellf, 0, sizeof(double) * NN * num_var);
|
||||||
|
|
||||||
// owner_rank[j] records which MPI rank owns point j
|
// we use weight to monitor code, later some day we can move it for optimization
|
||||||
// All ranks traverse the same block list so they all agree on ownership
|
int *weight;
|
||||||
int *owner_rank;
|
weight = new int[NN];
|
||||||
owner_rank = new int[NN];
|
memset(weight, 0, sizeof(int) * NN);
|
||||||
for (int j = 0; j < NN; j++)
|
|
||||||
owner_rank[j] = -1;
|
double *DH, *llb, *uub;
|
||||||
|
DH = new double[dim];
|
||||||
|
|
||||||
double DH[dim], llb[dim], uub[dim];
|
|
||||||
for (int i = 0; i < dim; i++)
|
for (int i = 0; i < dim; i++)
|
||||||
|
{
|
||||||
DH[i] = getdX(i);
|
DH[i] = getdX(i);
|
||||||
|
}
|
||||||
|
llb = new double[dim];
|
||||||
|
uub = new double[dim];
|
||||||
|
|
||||||
for (int j = 0; j < NN; j++) // run along points
|
for (int j = 0; j < NN; j++) // run along points
|
||||||
{
|
{
|
||||||
@@ -398,6 +403,12 @@ void Patch::Interp_Points(MyList<var> *VarList,
|
|||||||
bool flag = true;
|
bool flag = true;
|
||||||
for (int i = 0; i < dim; i++)
|
for (int i = 0; i < dim; i++)
|
||||||
{
|
{
|
||||||
|
// NOTE: our dividing structure is (exclude ghost)
|
||||||
|
// -1 0
|
||||||
|
// 1 2
|
||||||
|
// so (0,1) does not belong to any part for vertex structure
|
||||||
|
// here we put (0,0.5) to left part and (0.5,1) to right part
|
||||||
|
// BUT for cell structure the bbox is (-1.5,0.5) and (0.5,2.5), there is no missing region at all
|
||||||
#ifdef Vertex
|
#ifdef Vertex
|
||||||
#ifdef Cell
|
#ifdef Cell
|
||||||
#error Both Cell and Vertex are defined
|
#error Both Cell and Vertex are defined
|
||||||
@@ -422,7 +433,6 @@ void Patch::Interp_Points(MyList<var> *VarList,
|
|||||||
if (flag)
|
if (flag)
|
||||||
{
|
{
|
||||||
notfind = false;
|
notfind = false;
|
||||||
owner_rank[j] = BP->rank;
|
|
||||||
if (myrank == BP->rank)
|
if (myrank == BP->rank)
|
||||||
{
|
{
|
||||||
//---> interpolation
|
//---> interpolation
|
||||||
@@ -430,11 +440,14 @@ void Patch::Interp_Points(MyList<var> *VarList,
|
|||||||
int k = 0;
|
int k = 0;
|
||||||
while (varl) // run along variables
|
while (varl) // run along variables
|
||||||
{
|
{
|
||||||
f_global_interp(BP->shape, BP->X[0], BP->X[1], BP->X[2], BP->fgfs[varl->data->sgfn], Shellf[j * num_var + k],
|
// shellf[j*num_var+k] = Parallel::global_interp(dim,BP->shape,BP->X,BP->fgfs[varl->data->sgfn],
|
||||||
|
// pox,ordn,varl->data->SoA,Symmetry);
|
||||||
|
f_global_interp(BP->shape, BP->X[0], BP->X[1], BP->X[2], BP->fgfs[varl->data->sgfn], shellf[j * num_var + k],
|
||||||
pox[0], pox[1], pox[2], ordn, varl->data->SoA, Symmetry);
|
pox[0], pox[1], pox[2], ordn, varl->data->SoA, Symmetry);
|
||||||
varl = varl->next;
|
varl = varl->next;
|
||||||
k++;
|
k++;
|
||||||
}
|
}
|
||||||
|
weight[j] = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (Bp == ble)
|
if (Bp == ble)
|
||||||
@@ -443,327 +456,103 @@ void Patch::Interp_Points(MyList<var> *VarList,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Replace MPI_Allreduce with per-owner MPI_Bcast:
|
MPI_Allreduce(shellf, Shellf, NN * num_var, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
// Group consecutive points by owner rank and broadcast each group.
|
int *Weight;
|
||||||
// Since each point's data is non-zero only on the owner rank,
|
Weight = new int[NN];
|
||||||
// Bcast from owner is equivalent to Allreduce(MPI_SUM) but much cheaper.
|
MPI_Allreduce(weight, Weight, NN, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
||||||
|
|
||||||
|
// misc::tillherecheck("print me");
|
||||||
|
|
||||||
|
for (int i = 0; i < NN; i++)
|
||||||
{
|
{
|
||||||
int j = 0;
|
if (Weight[i] > 1)
|
||||||
while (j < NN)
|
|
||||||
{
|
{
|
||||||
int cur_owner = owner_rank[j];
|
if (myrank == 0)
|
||||||
if (cur_owner < 0)
|
cout << "WARNING: Patch::Interp_Points meets multiple weight" << endl;
|
||||||
{
|
for (int j = 0; j < num_var; j++)
|
||||||
if (myrank == 0)
|
Shellf[j + i * num_var] = Shellf[j + i * num_var] / Weight[i];
|
||||||
{
|
|
||||||
cout << "ERROR: Patch::Interp_Points fails to find point (";
|
|
||||||
for (int d = 0; d < dim; d++)
|
|
||||||
{
|
|
||||||
cout << XX[d][j];
|
|
||||||
if (d < dim - 1)
|
|
||||||
cout << ",";
|
|
||||||
else
|
|
||||||
cout << ")";
|
|
||||||
}
|
|
||||||
cout << " on Patch (";
|
|
||||||
for (int d = 0; d < dim; d++)
|
|
||||||
{
|
|
||||||
cout << bbox[d] << "+" << lli[d] * DH[d];
|
|
||||||
if (d < dim - 1)
|
|
||||||
cout << ",";
|
|
||||||
else
|
|
||||||
cout << ")--";
|
|
||||||
}
|
|
||||||
cout << "(";
|
|
||||||
for (int d = 0; d < dim; d++)
|
|
||||||
{
|
|
||||||
cout << bbox[dim + d] << "-" << uui[d] * DH[d];
|
|
||||||
if (d < dim - 1)
|
|
||||||
cout << ",";
|
|
||||||
else
|
|
||||||
cout << ")" << endl;
|
|
||||||
}
|
|
||||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
|
||||||
}
|
|
||||||
j++;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
// Find contiguous run of points with the same owner
|
|
||||||
int jstart = j;
|
|
||||||
while (j < NN && owner_rank[j] == cur_owner)
|
|
||||||
j++;
|
|
||||||
int count = (j - jstart) * num_var;
|
|
||||||
MPI_Bcast(Shellf + jstart * num_var, count, MPI_DOUBLE, cur_owner, MPI_COMM_WORLD);
|
|
||||||
}
|
}
|
||||||
}
|
else if (Weight[i] == 0 && myrank == 0)
|
||||||
|
|
||||||
delete[] owner_rank;
|
|
||||||
}
|
|
||||||
void Patch::Interp_Points(MyList<var> *VarList,
|
|
||||||
int NN, double **XX,
|
|
||||||
double *Shellf, int Symmetry,
|
|
||||||
int Nmin_consumer, int Nmax_consumer)
|
|
||||||
{
|
|
||||||
// Targeted point-to-point overload: each owner sends each point only to
|
|
||||||
// the one rank that needs it for integration (consumer), reducing
|
|
||||||
// communication volume by ~nprocs times compared to the Bcast version.
|
|
||||||
int myrank, nprocs;
|
|
||||||
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
|
|
||||||
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
|
|
||||||
|
|
||||||
int ordn = 2 * ghost_width;
|
|
||||||
MyList<var> *varl;
|
|
||||||
int num_var = 0;
|
|
||||||
varl = VarList;
|
|
||||||
while (varl)
|
|
||||||
{
|
|
||||||
num_var++;
|
|
||||||
varl = varl->next;
|
|
||||||
}
|
|
||||||
|
|
||||||
memset(Shellf, 0, sizeof(double) * NN * num_var);
|
|
||||||
|
|
||||||
// owner_rank[j] records which MPI rank owns point j
|
|
||||||
int *owner_rank;
|
|
||||||
owner_rank = new int[NN];
|
|
||||||
for (int j = 0; j < NN; j++)
|
|
||||||
owner_rank[j] = -1;
|
|
||||||
|
|
||||||
double DH[dim], llb[dim], uub[dim];
|
|
||||||
for (int i = 0; i < dim; i++)
|
|
||||||
DH[i] = getdX(i);
|
|
||||||
|
|
||||||
// --- Interpolation phase (identical to original) ---
|
|
||||||
for (int j = 0; j < NN; j++)
|
|
||||||
{
|
|
||||||
double pox[dim];
|
|
||||||
for (int i = 0; i < dim; i++)
|
|
||||||
{
|
|
||||||
pox[i] = XX[i][j];
|
|
||||||
if (myrank == 0 && (XX[i][j] < bbox[i] + lli[i] * DH[i] || XX[i][j] > bbox[dim + i] - uui[i] * DH[i]))
|
|
||||||
{
|
|
||||||
cout << "Patch::Interp_Points: point (";
|
|
||||||
for (int k = 0; k < dim; k++)
|
|
||||||
{
|
|
||||||
cout << XX[k][j];
|
|
||||||
if (k < dim - 1)
|
|
||||||
cout << ",";
|
|
||||||
else
|
|
||||||
cout << ") is out of current Patch." << endl;
|
|
||||||
}
|
|
||||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
MyList<Block> *Bp = blb;
|
|
||||||
bool notfind = true;
|
|
||||||
while (notfind && Bp)
|
|
||||||
{
|
|
||||||
Block *BP = Bp->data;
|
|
||||||
|
|
||||||
bool flag = true;
|
|
||||||
for (int i = 0; i < dim; i++)
|
|
||||||
{
|
|
||||||
#ifdef Vertex
|
|
||||||
#ifdef Cell
|
|
||||||
#error Both Cell and Vertex are defined
|
|
||||||
#endif
|
|
||||||
llb[i] = (feq(BP->bbox[i], bbox[i], DH[i] / 2)) ? BP->bbox[i] + lli[i] * DH[i] : BP->bbox[i] + (ghost_width - 0.5) * DH[i];
|
|
||||||
uub[i] = (feq(BP->bbox[dim + i], bbox[dim + i], DH[i] / 2)) ? BP->bbox[dim + i] - uui[i] * DH[i] : BP->bbox[dim + i] - (ghost_width - 0.5) * DH[i];
|
|
||||||
#else
|
|
||||||
#ifdef Cell
|
|
||||||
llb[i] = (feq(BP->bbox[i], bbox[i], DH[i] / 2)) ? BP->bbox[i] + lli[i] * DH[i] : BP->bbox[i] + ghost_width * DH[i];
|
|
||||||
uub[i] = (feq(BP->bbox[dim + i], bbox[dim + i], DH[i] / 2)) ? BP->bbox[dim + i] - uui[i] * DH[i] : BP->bbox[dim + i] - ghost_width * DH[i];
|
|
||||||
#else
|
|
||||||
#error Not define Vertex nor Cell
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
if (XX[i][j] - llb[i] < -DH[i] / 2 || XX[i][j] - uub[i] > DH[i] / 2)
|
|
||||||
{
|
|
||||||
flag = false;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (flag)
|
|
||||||
{
|
|
||||||
notfind = false;
|
|
||||||
owner_rank[j] = BP->rank;
|
|
||||||
if (myrank == BP->rank)
|
|
||||||
{
|
|
||||||
varl = VarList;
|
|
||||||
int k = 0;
|
|
||||||
while (varl)
|
|
||||||
{
|
|
||||||
f_global_interp(BP->shape, BP->X[0], BP->X[1], BP->X[2], BP->fgfs[varl->data->sgfn], Shellf[j * num_var + k],
|
|
||||||
pox[0], pox[1], pox[2], ordn, varl->data->SoA, Symmetry);
|
|
||||||
varl = varl->next;
|
|
||||||
k++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (Bp == ble)
|
|
||||||
break;
|
|
||||||
Bp = Bp->next;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Error check for unfound points ---
|
|
||||||
for (int j = 0; j < NN; j++)
|
|
||||||
{
|
|
||||||
if (owner_rank[j] < 0 && myrank == 0)
|
|
||||||
{
|
{
|
||||||
cout << "ERROR: Patch::Interp_Points fails to find point (";
|
cout << "ERROR: Patch::Interp_Points fails to find point (";
|
||||||
for (int d = 0; d < dim; d++)
|
for (int j = 0; j < dim; j++)
|
||||||
{
|
{
|
||||||
cout << XX[d][j];
|
cout << XX[j][i];
|
||||||
if (d < dim - 1)
|
if (j < dim - 1)
|
||||||
cout << ",";
|
cout << ",";
|
||||||
else
|
else
|
||||||
cout << ")";
|
cout << ")";
|
||||||
}
|
}
|
||||||
cout << " on Patch (";
|
cout << " on Patch (";
|
||||||
for (int d = 0; d < dim; d++)
|
for (int j = 0; j < dim; j++)
|
||||||
{
|
{
|
||||||
cout << bbox[d] << "+" << lli[d] * DH[d];
|
cout << bbox[j] << "+" << lli[j] * getdX(j);
|
||||||
if (d < dim - 1)
|
if (j < dim - 1)
|
||||||
cout << ",";
|
cout << ",";
|
||||||
else
|
else
|
||||||
cout << ")--";
|
cout << ")--";
|
||||||
}
|
}
|
||||||
cout << "(";
|
cout << "(";
|
||||||
for (int d = 0; d < dim; d++)
|
for (int j = 0; j < dim; j++)
|
||||||
{
|
{
|
||||||
cout << bbox[dim + d] << "-" << uui[d] * DH[d];
|
cout << bbox[dim + j] << "-" << uui[j] * getdX(j);
|
||||||
if (d < dim - 1)
|
if (j < dim - 1)
|
||||||
cout << ",";
|
cout << ",";
|
||||||
else
|
else
|
||||||
cout << ")" << endl;
|
cout << ")" << endl;
|
||||||
}
|
}
|
||||||
|
#if 0
|
||||||
|
checkBlock();
|
||||||
|
#else
|
||||||
|
cout << "splited domains:" << endl;
|
||||||
|
{
|
||||||
|
MyList<Block> *Bp = blb;
|
||||||
|
while (Bp)
|
||||||
|
{
|
||||||
|
Block *BP = Bp->data;
|
||||||
|
|
||||||
|
for (int i = 0; i < dim; i++)
|
||||||
|
{
|
||||||
|
#ifdef Vertex
|
||||||
|
#ifdef Cell
|
||||||
|
#error Both Cell and Vertex are defined
|
||||||
|
#endif
|
||||||
|
llb[i] = (feq(BP->bbox[i], bbox[i], DH[i] / 2)) ? BP->bbox[i] + lli[i] * DH[i] : BP->bbox[i] + (ghost_width - 0.5) * DH[i];
|
||||||
|
uub[i] = (feq(BP->bbox[dim + i], bbox[dim + i], DH[i] / 2)) ? BP->bbox[dim + i] - uui[i] * DH[i] : BP->bbox[dim + i] - (ghost_width - 0.5) * DH[i];
|
||||||
|
#else
|
||||||
|
#ifdef Cell
|
||||||
|
llb[i] = (feq(BP->bbox[i], bbox[i], DH[i] / 2)) ? BP->bbox[i] + lli[i] * DH[i] : BP->bbox[i] + ghost_width * DH[i];
|
||||||
|
uub[i] = (feq(BP->bbox[dim + i], bbox[dim + i], DH[i] / 2)) ? BP->bbox[dim + i] - uui[i] * DH[i] : BP->bbox[dim + i] - ghost_width * DH[i];
|
||||||
|
#else
|
||||||
|
#error Not define Vertex nor Cell
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
cout << "(";
|
||||||
|
for (int j = 0; j < dim; j++)
|
||||||
|
{
|
||||||
|
cout << llb[j] << ":" << uub[j];
|
||||||
|
if (j < dim - 1)
|
||||||
|
cout << ",";
|
||||||
|
else
|
||||||
|
cout << ")" << endl;
|
||||||
|
}
|
||||||
|
if (Bp == ble)
|
||||||
|
break;
|
||||||
|
Bp = Bp->next;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// --- Targeted point-to-point communication phase ---
|
delete[] shellf;
|
||||||
// Compute consumer_rank[j] using the same deterministic formula as surface_integral
|
delete[] weight;
|
||||||
int *consumer_rank = new int[NN];
|
delete[] Weight;
|
||||||
{
|
delete[] DH;
|
||||||
int mp = NN / nprocs;
|
delete[] llb;
|
||||||
int Lp = NN - nprocs * mp;
|
delete[] uub;
|
||||||
for (int j = 0; j < NN; j++)
|
|
||||||
{
|
|
||||||
if (j < Lp * (mp + 1))
|
|
||||||
consumer_rank[j] = j / (mp + 1);
|
|
||||||
else
|
|
||||||
consumer_rank[j] = Lp + (j - Lp * (mp + 1)) / mp;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Count sends and recvs per rank
|
|
||||||
int *send_count = new int[nprocs];
|
|
||||||
int *recv_count = new int[nprocs];
|
|
||||||
memset(send_count, 0, sizeof(int) * nprocs);
|
|
||||||
memset(recv_count, 0, sizeof(int) * nprocs);
|
|
||||||
|
|
||||||
for (int j = 0; j < NN; j++)
|
|
||||||
{
|
|
||||||
int own = owner_rank[j];
|
|
||||||
int con = consumer_rank[j];
|
|
||||||
if (own == con)
|
|
||||||
continue; // local — no communication needed
|
|
||||||
if (own == myrank)
|
|
||||||
send_count[con]++;
|
|
||||||
if (con == myrank)
|
|
||||||
recv_count[own]++;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build send buffers: for each destination rank, pack (index, data) pairs
|
|
||||||
// Each entry: 1 int (point index j) + num_var doubles
|
|
||||||
int total_send = 0, total_recv = 0;
|
|
||||||
int *send_offset = new int[nprocs];
|
|
||||||
int *recv_offset = new int[nprocs];
|
|
||||||
for (int r = 0; r < nprocs; r++)
|
|
||||||
{
|
|
||||||
send_offset[r] = total_send;
|
|
||||||
total_send += send_count[r];
|
|
||||||
recv_offset[r] = total_recv;
|
|
||||||
total_recv += recv_count[r];
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pack send buffers: each message contains (j, data[0..num_var-1]) per point
|
|
||||||
int stride = 1 + num_var; // 1 double for index + num_var doubles for data
|
|
||||||
double *sendbuf = new double[total_send * stride];
|
|
||||||
double *recvbuf = new double[total_recv * stride];
|
|
||||||
|
|
||||||
// Temporary counters for packing
|
|
||||||
int *pack_pos = new int[nprocs];
|
|
||||||
memset(pack_pos, 0, sizeof(int) * nprocs);
|
|
||||||
|
|
||||||
for (int j = 0; j < NN; j++)
|
|
||||||
{
|
|
||||||
int own = owner_rank[j];
|
|
||||||
int con = consumer_rank[j];
|
|
||||||
if (own != myrank || con == myrank)
|
|
||||||
continue;
|
|
||||||
int pos = (send_offset[con] + pack_pos[con]) * stride;
|
|
||||||
sendbuf[pos] = (double)j; // point index
|
|
||||||
for (int v = 0; v < num_var; v++)
|
|
||||||
sendbuf[pos + 1 + v] = Shellf[j * num_var + v];
|
|
||||||
pack_pos[con]++;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Post non-blocking recvs and sends
|
|
||||||
int n_req = 0;
|
|
||||||
for (int r = 0; r < nprocs; r++)
|
|
||||||
{
|
|
||||||
if (recv_count[r] > 0) n_req++;
|
|
||||||
if (send_count[r] > 0) n_req++;
|
|
||||||
}
|
|
||||||
|
|
||||||
MPI_Request *reqs = new MPI_Request[n_req];
|
|
||||||
int req_idx = 0;
|
|
||||||
|
|
||||||
for (int r = 0; r < nprocs; r++)
|
|
||||||
{
|
|
||||||
if (recv_count[r] > 0)
|
|
||||||
{
|
|
||||||
MPI_Irecv(recvbuf + recv_offset[r] * stride,
|
|
||||||
recv_count[r] * stride, MPI_DOUBLE,
|
|
||||||
r, 0, MPI_COMM_WORLD, &reqs[req_idx++]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for (int r = 0; r < nprocs; r++)
|
|
||||||
{
|
|
||||||
if (send_count[r] > 0)
|
|
||||||
{
|
|
||||||
MPI_Isend(sendbuf + send_offset[r] * stride,
|
|
||||||
send_count[r] * stride, MPI_DOUBLE,
|
|
||||||
r, 0, MPI_COMM_WORLD, &reqs[req_idx++]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (n_req > 0)
|
|
||||||
MPI_Waitall(n_req, reqs, MPI_STATUSES_IGNORE);
|
|
||||||
|
|
||||||
// Unpack recv buffers into Shellf
|
|
||||||
for (int i = 0; i < total_recv; i++)
|
|
||||||
{
|
|
||||||
int pos = i * stride;
|
|
||||||
int j = (int)recvbuf[pos];
|
|
||||||
for (int v = 0; v < num_var; v++)
|
|
||||||
Shellf[j * num_var + v] = recvbuf[pos + 1 + v];
|
|
||||||
}
|
|
||||||
|
|
||||||
delete[] reqs;
|
|
||||||
delete[] sendbuf;
|
|
||||||
delete[] recvbuf;
|
|
||||||
delete[] pack_pos;
|
|
||||||
delete[] send_offset;
|
|
||||||
delete[] recv_offset;
|
|
||||||
delete[] send_count;
|
|
||||||
delete[] recv_count;
|
|
||||||
delete[] consumer_rank;
|
|
||||||
delete[] owner_rank;
|
|
||||||
}
|
}
|
||||||
void Patch::Interp_Points(MyList<var> *VarList,
|
void Patch::Interp_Points(MyList<var> *VarList,
|
||||||
int NN, double **XX,
|
int NN, double **XX,
|
||||||
@@ -784,22 +573,24 @@ void Patch::Interp_Points(MyList<var> *VarList,
|
|||||||
varl = varl->next;
|
varl = varl->next;
|
||||||
}
|
}
|
||||||
|
|
||||||
memset(Shellf, 0, sizeof(double) * NN * num_var);
|
double *shellf;
|
||||||
|
shellf = new double[NN * num_var];
|
||||||
|
memset(shellf, 0, sizeof(double) * NN * num_var);
|
||||||
|
|
||||||
// owner_rank[j] stores the global rank that owns point j
|
// we use weight to monitor code, later some day we can move it for optimization
|
||||||
int *owner_rank;
|
int *weight;
|
||||||
owner_rank = new int[NN];
|
weight = new int[NN];
|
||||||
for (int j = 0; j < NN; j++)
|
memset(weight, 0, sizeof(int) * NN);
|
||||||
owner_rank[j] = -1;
|
|
||||||
|
|
||||||
// Build global-to-local rank translation for Comm_here
|
double *DH, *llb, *uub;
|
||||||
MPI_Group world_group, local_group;
|
DH = new double[dim];
|
||||||
MPI_Comm_group(MPI_COMM_WORLD, &world_group);
|
|
||||||
MPI_Comm_group(Comm_here, &local_group);
|
|
||||||
|
|
||||||
double DH[dim], llb[dim], uub[dim];
|
|
||||||
for (int i = 0; i < dim; i++)
|
for (int i = 0; i < dim; i++)
|
||||||
|
{
|
||||||
DH[i] = getdX(i);
|
DH[i] = getdX(i);
|
||||||
|
}
|
||||||
|
llb = new double[dim];
|
||||||
|
uub = new double[dim];
|
||||||
|
|
||||||
for (int j = 0; j < NN; j++) // run along points
|
for (int j = 0; j < NN; j++) // run along points
|
||||||
{
|
{
|
||||||
@@ -831,6 +622,12 @@ void Patch::Interp_Points(MyList<var> *VarList,
|
|||||||
bool flag = true;
|
bool flag = true;
|
||||||
for (int i = 0; i < dim; i++)
|
for (int i = 0; i < dim; i++)
|
||||||
{
|
{
|
||||||
|
// NOTE: our dividing structure is (exclude ghost)
|
||||||
|
// -1 0
|
||||||
|
// 1 2
|
||||||
|
// so (0,1) does not belong to any part for vertex structure
|
||||||
|
// here we put (0,0.5) to left part and (0.5,1) to right part
|
||||||
|
// BUT for cell structure the bbox is (-1.5,0.5) and (0.5,2.5), there is no missing region at all
|
||||||
#ifdef Vertex
|
#ifdef Vertex
|
||||||
#ifdef Cell
|
#ifdef Cell
|
||||||
#error Both Cell and Vertex are defined
|
#error Both Cell and Vertex are defined
|
||||||
@@ -855,7 +652,6 @@ void Patch::Interp_Points(MyList<var> *VarList,
|
|||||||
if (flag)
|
if (flag)
|
||||||
{
|
{
|
||||||
notfind = false;
|
notfind = false;
|
||||||
owner_rank[j] = BP->rank;
|
|
||||||
if (myrank == BP->rank)
|
if (myrank == BP->rank)
|
||||||
{
|
{
|
||||||
//---> interpolation
|
//---> interpolation
|
||||||
@@ -863,11 +659,14 @@ void Patch::Interp_Points(MyList<var> *VarList,
|
|||||||
int k = 0;
|
int k = 0;
|
||||||
while (varl) // run along variables
|
while (varl) // run along variables
|
||||||
{
|
{
|
||||||
f_global_interp(BP->shape, BP->X[0], BP->X[1], BP->X[2], BP->fgfs[varl->data->sgfn], Shellf[j * num_var + k],
|
// shellf[j*num_var+k] = Parallel::global_interp(dim,BP->shape,BP->X,BP->fgfs[varl->data->sgfn],
|
||||||
|
// pox,ordn,varl->data->SoA,Symmetry);
|
||||||
|
f_global_interp(BP->shape, BP->X[0], BP->X[1], BP->X[2], BP->fgfs[varl->data->sgfn], shellf[j * num_var + k],
|
||||||
pox[0], pox[1], pox[2], ordn, varl->data->SoA, Symmetry);
|
pox[0], pox[1], pox[2], ordn, varl->data->SoA, Symmetry);
|
||||||
varl = varl->next;
|
varl = varl->next;
|
||||||
k++;
|
k++;
|
||||||
}
|
}
|
||||||
|
weight[j] = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (Bp == ble)
|
if (Bp == ble)
|
||||||
@@ -876,35 +675,97 @@ void Patch::Interp_Points(MyList<var> *VarList,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Collect unique global owner ranks and translate to local ranks in Comm_here
|
MPI_Allreduce(shellf, Shellf, NN * num_var, MPI_DOUBLE, MPI_SUM, Comm_here);
|
||||||
// Then broadcast each owner's points via MPI_Bcast on Comm_here
|
int *Weight;
|
||||||
{
|
Weight = new int[NN];
|
||||||
int j = 0;
|
MPI_Allreduce(weight, Weight, NN, MPI_INT, MPI_SUM, Comm_here);
|
||||||
while (j < NN)
|
|
||||||
{
|
|
||||||
int cur_owner_global = owner_rank[j];
|
|
||||||
if (cur_owner_global < 0)
|
|
||||||
{
|
|
||||||
// Point not found — skip (error check disabled for sub-communicator levels)
|
|
||||||
j++;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
// Translate global rank to local rank in Comm_here
|
|
||||||
int cur_owner_local;
|
|
||||||
MPI_Group_translate_ranks(world_group, 1, &cur_owner_global, local_group, &cur_owner_local);
|
|
||||||
|
|
||||||
// Find contiguous run of points with the same owner
|
// misc::tillherecheck("print me");
|
||||||
int jstart = j;
|
// if(lmyrank == 0) cout<<"myrank = "<<myrank<<"print me"<<endl;
|
||||||
while (j < NN && owner_rank[j] == cur_owner_global)
|
|
||||||
j++;
|
for (int i = 0; i < NN; i++)
|
||||||
int count = (j - jstart) * num_var;
|
{
|
||||||
MPI_Bcast(Shellf + jstart * num_var, count, MPI_DOUBLE, cur_owner_local, Comm_here);
|
if (Weight[i] > 1)
|
||||||
|
{
|
||||||
|
if (lmyrank == 0)
|
||||||
|
cout << "WARNING: Patch::Interp_Points meets multiple weight" << endl;
|
||||||
|
for (int j = 0; j < num_var; j++)
|
||||||
|
Shellf[j + i * num_var] = Shellf[j + i * num_var] / Weight[i];
|
||||||
}
|
}
|
||||||
|
#if 0 // for not involved levels, this may fail
|
||||||
|
else if(Weight[i] == 0 && lmyrank == 0)
|
||||||
|
{
|
||||||
|
cout<<"ERROR: Patch::Interp_Points fails to find point (";
|
||||||
|
for(int j=0;j<dim;j++)
|
||||||
|
{
|
||||||
|
cout<<XX[j][i];
|
||||||
|
if(j<dim-1) cout<<",";
|
||||||
|
else cout<<")";
|
||||||
|
}
|
||||||
|
cout<<" on Patch (";
|
||||||
|
for(int j=0;j<dim;j++)
|
||||||
|
{
|
||||||
|
cout<<bbox[j]<<"+"<<lli[j]*getdX(j);
|
||||||
|
if(j<dim-1) cout<<",";
|
||||||
|
else cout<<")--";
|
||||||
|
}
|
||||||
|
cout<<"(";
|
||||||
|
for(int j=0;j<dim;j++)
|
||||||
|
{
|
||||||
|
cout<<bbox[dim+j]<<"-"<<uui[j]*getdX(j);
|
||||||
|
if(j<dim-1) cout<<",";
|
||||||
|
else cout<<")"<<endl;
|
||||||
|
}
|
||||||
|
#if 0
|
||||||
|
checkBlock();
|
||||||
|
#else
|
||||||
|
cout<<"splited domains:"<<endl;
|
||||||
|
{
|
||||||
|
MyList<Block> *Bp=blb;
|
||||||
|
while(Bp)
|
||||||
|
{
|
||||||
|
Block *BP=Bp->data;
|
||||||
|
|
||||||
|
for(int i=0;i<dim;i++)
|
||||||
|
{
|
||||||
|
#ifdef Vertex
|
||||||
|
#ifdef Cell
|
||||||
|
#error Both Cell and Vertex are defined
|
||||||
|
#endif
|
||||||
|
llb[i] = (feq(BP->bbox[i] ,bbox[i] ,DH[i]/2)) ? BP->bbox[i]+lli[i]*DH[i] : BP->bbox[i] +(ghost_width-0.5)*DH[i];
|
||||||
|
uub[i] = (feq(BP->bbox[dim+i],bbox[dim+i],DH[i]/2)) ? BP->bbox[dim+i]-uui[i]*DH[i] : BP->bbox[dim+i]-(ghost_width-0.5)*DH[i];
|
||||||
|
#else
|
||||||
|
#ifdef Cell
|
||||||
|
llb[i] = (feq(BP->bbox[i] ,bbox[i] ,DH[i]/2)) ? BP->bbox[i]+lli[i]*DH[i] : BP->bbox[i] +ghost_width*DH[i];
|
||||||
|
uub[i] = (feq(BP->bbox[dim+i],bbox[dim+i],DH[i]/2)) ? BP->bbox[dim+i]-uui[i]*DH[i] : BP->bbox[dim+i]-ghost_width*DH[i];
|
||||||
|
#else
|
||||||
|
#error Not define Vertex nor Cell
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
cout<<"(";
|
||||||
|
for(int j=0;j<dim;j++)
|
||||||
|
{
|
||||||
|
cout<<llb[j]<<":"<<uub[j];
|
||||||
|
if(j<dim-1) cout<<",";
|
||||||
|
else cout<<")"<<endl;
|
||||||
|
}
|
||||||
|
if(Bp == ble) break;
|
||||||
|
Bp=Bp->next;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
MPI_Abort(MPI_COMM_WORLD,1);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
MPI_Group_free(&world_group);
|
delete[] shellf;
|
||||||
MPI_Group_free(&local_group);
|
delete[] weight;
|
||||||
delete[] owner_rank;
|
delete[] Weight;
|
||||||
|
delete[] DH;
|
||||||
|
delete[] llb;
|
||||||
|
delete[] uub;
|
||||||
}
|
}
|
||||||
void Patch::checkBlock()
|
void Patch::checkBlock()
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -39,10 +39,6 @@ public:
|
|||||||
|
|
||||||
bool Find_Point(double *XX);
|
bool Find_Point(double *XX);
|
||||||
|
|
||||||
void Interp_Points(MyList<var> *VarList,
|
|
||||||
int NN, double **XX,
|
|
||||||
double *Shellf, int Symmetry,
|
|
||||||
int Nmin_consumer, int Nmax_consumer);
|
|
||||||
void Interp_Points(MyList<var> *VarList,
|
void Interp_Points(MyList<var> *VarList,
|
||||||
int NN, double **XX,
|
int NN, double **XX,
|
||||||
double *Shellf, int Symmetry, MPI_Comm Comm_here);
|
double *Shellf, int Symmetry, MPI_Comm Comm_here);
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -81,43 +81,53 @@ namespace Parallel
|
|||||||
int Symmetry);
|
int Symmetry);
|
||||||
void Sync(Patch *Pat, MyList<var> *VarList, int Symmetry);
|
void Sync(Patch *Pat, MyList<var> *VarList, int Symmetry);
|
||||||
void Sync(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry);
|
void Sync(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry);
|
||||||
void Sync_merged(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry);
|
|
||||||
|
|
||||||
struct SyncCache {
|
// Async Sync: overlap MPI communication with computation
|
||||||
bool valid;
|
struct TransferState
|
||||||
int cpusize;
|
{
|
||||||
MyList<gridseg> **combined_src;
|
|
||||||
MyList<gridseg> **combined_dst;
|
|
||||||
int *send_lengths;
|
|
||||||
int *recv_lengths;
|
|
||||||
double **send_bufs;
|
|
||||||
double **recv_bufs;
|
|
||||||
int *send_buf_caps;
|
|
||||||
int *recv_buf_caps;
|
|
||||||
MPI_Request *reqs;
|
MPI_Request *reqs;
|
||||||
MPI_Status *stats;
|
MPI_Status *stats;
|
||||||
int max_reqs;
|
|
||||||
bool lengths_valid;
|
|
||||||
SyncCache();
|
|
||||||
void invalidate();
|
|
||||||
void destroy();
|
|
||||||
};
|
|
||||||
|
|
||||||
void Sync_cached(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry, SyncCache &cache);
|
|
||||||
void transfer_cached(MyList<gridseg> **src, MyList<gridseg> **dst,
|
|
||||||
MyList<var> *VarList1, MyList<var> *VarList2,
|
|
||||||
int Symmetry, SyncCache &cache);
|
|
||||||
|
|
||||||
struct AsyncSyncState {
|
|
||||||
int req_no;
|
int req_no;
|
||||||
bool active;
|
double **send_data;
|
||||||
AsyncSyncState() : req_no(0), active(false) {}
|
double **rec_data;
|
||||||
|
int cpusize;
|
||||||
|
MyList<gridseg> **transfer_src;
|
||||||
|
MyList<gridseg> **transfer_dst;
|
||||||
|
MyList<gridseg> **src;
|
||||||
|
MyList<gridseg> *dst;
|
||||||
|
MyList<var> *VarList1;
|
||||||
|
MyList<var> *VarList2;
|
||||||
|
int Symmetry;
|
||||||
|
bool owns_gsl; // true if this state owns and should free the GSLs
|
||||||
};
|
};
|
||||||
|
struct SyncHandle
|
||||||
|
{
|
||||||
|
TransferState *states;
|
||||||
|
int num_states;
|
||||||
|
};
|
||||||
|
SyncHandle *SyncBegin(Patch *Pat, MyList<var> *VarList, int Symmetry);
|
||||||
|
SyncHandle *SyncBegin(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry);
|
||||||
|
void SyncEnd(SyncHandle *handle);
|
||||||
|
|
||||||
void Sync_start(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry,
|
// Cached GSL plan: pre-build grid segment lists once, reuse across multiple Sync calls
|
||||||
SyncCache &cache, AsyncSyncState &state);
|
struct SyncPlanEntry
|
||||||
void Sync_finish(SyncCache &cache, AsyncSyncState &state,
|
{
|
||||||
MyList<var> *VarList, int Symmetry);
|
int cpusize;
|
||||||
|
MyList<gridseg> **transfer_src;
|
||||||
|
MyList<gridseg> **transfer_dst;
|
||||||
|
MyList<gridseg> **src;
|
||||||
|
MyList<gridseg> *dst;
|
||||||
|
};
|
||||||
|
struct SyncPlan
|
||||||
|
{
|
||||||
|
SyncPlanEntry *entries;
|
||||||
|
int num_entries;
|
||||||
|
int Symmetry;
|
||||||
|
};
|
||||||
|
SyncPlan *SyncPreparePlan(MyList<Patch> *PatL, int Symmetry);
|
||||||
|
void SyncFreePlan(SyncPlan *plan);
|
||||||
|
SyncHandle *SyncBeginWithPlan(SyncPlan *plan, MyList<var> *VarList);
|
||||||
|
SyncHandle *SyncBeginWithPlan(SyncPlan *plan, MyList<var> *VarList1, MyList<var> *VarList2);
|
||||||
void OutBdLow2Hi(Patch *Patc, Patch *Patf,
|
void OutBdLow2Hi(Patch *Patc, Patch *Patf,
|
||||||
MyList<var> *VarList1 /* source */, MyList<var> *VarList2 /* target */,
|
MyList<var> *VarList1 /* source */, MyList<var> *VarList2 /* target */,
|
||||||
int Symmetry);
|
int Symmetry);
|
||||||
@@ -130,15 +140,6 @@ namespace Parallel
|
|||||||
void OutBdLow2Himix(MyList<Patch> *PatcL, MyList<Patch> *PatfL,
|
void OutBdLow2Himix(MyList<Patch> *PatcL, MyList<Patch> *PatfL,
|
||||||
MyList<var> *VarList1 /* source */, MyList<var> *VarList2 /* target */,
|
MyList<var> *VarList1 /* source */, MyList<var> *VarList2 /* target */,
|
||||||
int Symmetry);
|
int Symmetry);
|
||||||
void Restrict_cached(MyList<Patch> *PatcL, MyList<Patch> *PatfL,
|
|
||||||
MyList<var> *VarList1, MyList<var> *VarList2,
|
|
||||||
int Symmetry, SyncCache &cache);
|
|
||||||
void OutBdLow2Hi_cached(MyList<Patch> *PatcL, MyList<Patch> *PatfL,
|
|
||||||
MyList<var> *VarList1, MyList<var> *VarList2,
|
|
||||||
int Symmetry, SyncCache &cache);
|
|
||||||
void OutBdLow2Himix_cached(MyList<Patch> *PatcL, MyList<Patch> *PatfL,
|
|
||||||
MyList<var> *VarList1, MyList<var> *VarList2,
|
|
||||||
int Symmetry, SyncCache &cache);
|
|
||||||
void Prolong(Patch *Patc, Patch *Patf,
|
void Prolong(Patch *Patc, Patch *Patf,
|
||||||
MyList<var> *VarList1 /* source */, MyList<var> *VarList2 /* target */,
|
MyList<var> *VarList1 /* source */, MyList<var> *VarList2 /* target */,
|
||||||
int Symmetry);
|
int Symmetry);
|
||||||
|
|||||||
@@ -186,6 +186,12 @@ void Z4c_class::Step(int lev, int YN)
|
|||||||
int ERROR = 0;
|
int ERROR = 0;
|
||||||
|
|
||||||
MyList<ss_patch> *sPp;
|
MyList<ss_patch> *sPp;
|
||||||
|
|
||||||
|
// Pre-build grid segment lists once for this level's patches.
|
||||||
|
// These are reused across predictor + 3 corrector SyncBegin calls,
|
||||||
|
// avoiding O(cpusize * blocks^2) rebuild each time.
|
||||||
|
Parallel::SyncPlan *sync_plan = Parallel::SyncPreparePlan(GH->PatL[lev], Symmetry);
|
||||||
|
|
||||||
// Predictor
|
// Predictor
|
||||||
MyList<Patch> *Pp = GH->PatL[lev];
|
MyList<Patch> *Pp = GH->PatL[lev];
|
||||||
while (Pp)
|
while (Pp)
|
||||||
@@ -321,7 +327,26 @@ void Z4c_class::Step(int lev, int YN)
|
|||||||
}
|
}
|
||||||
Pp = Pp->next;
|
Pp = Pp->next;
|
||||||
}
|
}
|
||||||
// NOTE: error check deferred to after Shell Patch computation to reduce MPI_Allreduce calls
|
// Start async ghost zone exchange - overlaps with error check and Shell computation
|
||||||
|
Parallel::SyncHandle *sync_pre = Parallel::SyncBeginWithPlan(sync_plan, SynchList_pre);
|
||||||
|
|
||||||
|
// check error information (overlaps with MPI transfer)
|
||||||
|
{
|
||||||
|
int erh = ERROR;
|
||||||
|
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
||||||
|
}
|
||||||
|
if (ERROR)
|
||||||
|
{
|
||||||
|
Parallel::SyncEnd(sync_pre); sync_pre = 0;
|
||||||
|
Parallel::Dump_Data(GH->PatL[lev], StateList, 0, PhysTime, dT_lev);
|
||||||
|
if (myrank == 0)
|
||||||
|
{
|
||||||
|
if (ErrorMonitor->outfile)
|
||||||
|
ErrorMonitor->outfile << "find NaN in state variables at t = " << PhysTime
|
||||||
|
<< ", lev = " << lev << endl;
|
||||||
|
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef WithShell
|
#ifdef WithShell
|
||||||
// evolve Shell Patches
|
// evolve Shell Patches
|
||||||
@@ -339,9 +364,9 @@ void Z4c_class::Step(int lev, int YN)
|
|||||||
{
|
{
|
||||||
#if (AGM == 0)
|
#if (AGM == 0)
|
||||||
f_enforce_ga(cg->shape,
|
f_enforce_ga(cg->shape,
|
||||||
cg->fgfs[gxx0->sgfn], cg->fgfs[gxy0->sgfn], cg->fgfs[gxz0->sgfn],
|
cg->fgfs[gxx0->sgfn], cg->fgfs[gxy0->sgfn], cg->fgfs[gxz0->sgfn],
|
||||||
cg->fgfs[gyy0->sgfn], cg->fgfs[gyz0->sgfn], cg->fgfs[gzz0->sgfn],
|
cg->fgfs[gyy0->sgfn], cg->fgfs[gyz0->sgfn], cg->fgfs[gzz0->sgfn],
|
||||||
cg->fgfs[Axx0->sgfn], cg->fgfs[Axy0->sgfn], cg->fgfs[Axz0->sgfn],
|
cg->fgfs[Axx0->sgfn], cg->fgfs[Axy0->sgfn], cg->fgfs[Axz0->sgfn],
|
||||||
cg->fgfs[Ayy0->sgfn], cg->fgfs[Ayz0->sgfn], cg->fgfs[Azz0->sgfn]);
|
cg->fgfs[Ayy0->sgfn], cg->fgfs[Ayz0->sgfn], cg->fgfs[Azz0->sgfn]);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@@ -453,16 +478,26 @@ void Z4c_class::Step(int lev, int YN)
|
|||||||
sPp = sPp->next;
|
sPp = sPp->next;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Non-blocking error reduction overlapped with Sync to hide Allreduce latency
|
// check error information
|
||||||
MPI_Request err_req_pre;
|
|
||||||
{
|
{
|
||||||
int erh = ERROR;
|
int erh = ERROR;
|
||||||
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &err_req_pre);
|
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
||||||
|
}
|
||||||
|
if (ERROR)
|
||||||
|
{
|
||||||
|
Parallel::SyncEnd(sync_pre); sync_pre = 0;
|
||||||
|
SH->Dump_Data(StateList, 0, PhysTime, dT_lev);
|
||||||
|
if (myrank == 0)
|
||||||
|
{
|
||||||
|
if (ErrorMonitor->outfile)
|
||||||
|
ErrorMonitor->outfile << "find NaN in state variables on Shell Patches at t = " << PhysTime << endl;
|
||||||
|
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
Parallel::AsyncSyncState async_pre;
|
// Complete async ghost zone exchange
|
||||||
Parallel::Sync_start(GH->PatL[lev], SynchList_pre, Symmetry, sync_cache_pre[lev], async_pre);
|
if (sync_pre) Parallel::SyncEnd(sync_pre);
|
||||||
|
|
||||||
#ifdef WithShell
|
#ifdef WithShell
|
||||||
if (lev == 0)
|
if (lev == 0)
|
||||||
@@ -475,30 +510,12 @@ void Z4c_class::Step(int lev, int YN)
|
|||||||
{
|
{
|
||||||
prev_clock = curr_clock;
|
prev_clock = curr_clock;
|
||||||
curr_clock = clock();
|
curr_clock = clock();
|
||||||
cout << " Shell stuff synchronization used "
|
cout << " Shell stuff synchronization used "
|
||||||
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
||||||
<< " seconds! " << endl;
|
<< " seconds! " << endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
Parallel::Sync_finish(sync_cache_pre[lev], async_pre, SynchList_pre, Symmetry);
|
|
||||||
|
|
||||||
#ifdef WithShell
|
|
||||||
// Complete non-blocking error reduction and check
|
|
||||||
MPI_Wait(&err_req_pre, MPI_STATUS_IGNORE);
|
|
||||||
if (ERROR)
|
|
||||||
{
|
|
||||||
Parallel::Dump_Data(GH->PatL[lev], StateList, 0, PhysTime, dT_lev);
|
|
||||||
SH->Dump_Data(StateList, 0, PhysTime, dT_lev);
|
|
||||||
if (myrank == 0)
|
|
||||||
{
|
|
||||||
if (ErrorMonitor->outfile)
|
|
||||||
ErrorMonitor->outfile << "find NaN in state variables at t = " << PhysTime
|
|
||||||
<< ", lev = " << lev << endl;
|
|
||||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// for black hole position
|
// for black hole position
|
||||||
if (BH_num > 0 && lev == GH->levels - 1)
|
if (BH_num > 0 && lev == GH->levels - 1)
|
||||||
@@ -688,7 +705,27 @@ void Z4c_class::Step(int lev, int YN)
|
|||||||
Pp = Pp->next;
|
Pp = Pp->next;
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE: error check deferred to after Shell Patch computation to reduce MPI_Allreduce calls
|
// Start async ghost zone exchange - overlaps with error check and Shell computation
|
||||||
|
Parallel::SyncHandle *sync_cor = Parallel::SyncBeginWithPlan(sync_plan, SynchList_cor);
|
||||||
|
|
||||||
|
// check error information (overlaps with MPI transfer)
|
||||||
|
{
|
||||||
|
int erh = ERROR;
|
||||||
|
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
||||||
|
}
|
||||||
|
if (ERROR)
|
||||||
|
{
|
||||||
|
Parallel::SyncEnd(sync_cor); sync_cor = 0;
|
||||||
|
Parallel::Dump_Data(GH->PatL[lev], SynchList_pre, 0, PhysTime, dT_lev);
|
||||||
|
if (myrank == 0)
|
||||||
|
{
|
||||||
|
if (ErrorMonitor->outfile)
|
||||||
|
ErrorMonitor->outfile << "find NaN in RK4 substep#" << iter_count
|
||||||
|
<< " variables at t = " << PhysTime
|
||||||
|
<< ", lev = " << lev << endl;
|
||||||
|
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef WithShell
|
#ifdef WithShell
|
||||||
// evolve Shell Patches
|
// evolve Shell Patches
|
||||||
@@ -829,16 +866,27 @@ void Z4c_class::Step(int lev, int YN)
|
|||||||
sPp = sPp->next;
|
sPp = sPp->next;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Non-blocking error reduction overlapped with Sync to hide Allreduce latency
|
// check error information
|
||||||
MPI_Request err_req_cor;
|
|
||||||
{
|
{
|
||||||
int erh = ERROR;
|
int erh = ERROR;
|
||||||
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &err_req_cor);
|
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
||||||
|
}
|
||||||
|
if (ERROR)
|
||||||
|
{
|
||||||
|
Parallel::SyncEnd(sync_cor); sync_cor = 0;
|
||||||
|
SH->Dump_Data(SynchList_pre, 0, PhysTime, dT_lev);
|
||||||
|
if (myrank == 0)
|
||||||
|
{
|
||||||
|
if (ErrorMonitor->outfile)
|
||||||
|
ErrorMonitor->outfile << "find NaN on Shell Patches in RK4 substep#" << iter_count
|
||||||
|
<< " variables at t = " << PhysTime << endl;
|
||||||
|
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
Parallel::AsyncSyncState async_cor;
|
// Complete async ghost zone exchange
|
||||||
Parallel::Sync_start(GH->PatL[lev], SynchList_cor, Symmetry, sync_cache_cor[lev], async_cor);
|
if (sync_cor) Parallel::SyncEnd(sync_cor);
|
||||||
|
|
||||||
#ifdef WithShell
|
#ifdef WithShell
|
||||||
if (lev == 0)
|
if (lev == 0)
|
||||||
@@ -851,30 +899,11 @@ void Z4c_class::Step(int lev, int YN)
|
|||||||
{
|
{
|
||||||
prev_clock = curr_clock;
|
prev_clock = curr_clock;
|
||||||
curr_clock = clock();
|
curr_clock = clock();
|
||||||
cout << " Shell stuff synchronization used "
|
cout << " Shell stuff synchronization used "
|
||||||
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
||||||
<< " seconds! " << endl;
|
<< " seconds! " << endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
Parallel::Sync_finish(sync_cache_cor[lev], async_cor, SynchList_cor, Symmetry);
|
|
||||||
|
|
||||||
#ifdef WithShell
|
|
||||||
// Complete non-blocking error reduction and check
|
|
||||||
MPI_Wait(&err_req_cor, MPI_STATUS_IGNORE);
|
|
||||||
if (ERROR)
|
|
||||||
{
|
|
||||||
Parallel::Dump_Data(GH->PatL[lev], SynchList_pre, 0, PhysTime, dT_lev);
|
|
||||||
SH->Dump_Data(SynchList_pre, 0, PhysTime, dT_lev);
|
|
||||||
if (myrank == 0)
|
|
||||||
{
|
|
||||||
if (ErrorMonitor->outfile)
|
|
||||||
ErrorMonitor->outfile << "find NaN in RK4 substep#" << iter_count
|
|
||||||
<< " variables at t = " << PhysTime
|
|
||||||
<< ", lev = " << lev << endl;
|
|
||||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
// for black hole position
|
// for black hole position
|
||||||
if (BH_num > 0 && lev == GH->levels - 1)
|
if (BH_num > 0 && lev == GH->levels - 1)
|
||||||
@@ -1031,6 +1060,8 @@ void Z4c_class::Step(int lev, int YN)
|
|||||||
Porg0[ithBH][2] = Porg1[ithBH][2];
|
Porg0[ithBH][2] = Porg1[ithBH][2];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Parallel::SyncFreePlan(sync_plan);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
// for constraint preserving boundary (CPBC)
|
// for constraint preserving boundary (CPBC)
|
||||||
@@ -1064,6 +1095,10 @@ void Z4c_class::Step(int lev, int YN)
|
|||||||
int ERROR = 0;
|
int ERROR = 0;
|
||||||
|
|
||||||
MyList<ss_patch> *sPp;
|
MyList<ss_patch> *sPp;
|
||||||
|
|
||||||
|
// Pre-build grid segment lists once for this level's patches.
|
||||||
|
Parallel::SyncPlan *sync_plan = Parallel::SyncPreparePlan(GH->PatL[lev], Symmetry);
|
||||||
|
|
||||||
// Predictor
|
// Predictor
|
||||||
MyList<Patch> *Pp = GH->PatL[lev];
|
MyList<Patch> *Pp = GH->PatL[lev];
|
||||||
while (Pp)
|
while (Pp)
|
||||||
@@ -1241,7 +1276,22 @@ void Z4c_class::Step(int lev, int YN)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
// NOTE: error check deferred to after Shell Patch computation to reduce MPI_Allreduce calls
|
// check error information
|
||||||
|
{
|
||||||
|
int erh = ERROR;
|
||||||
|
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
||||||
|
}
|
||||||
|
if (ERROR)
|
||||||
|
{
|
||||||
|
Parallel::Dump_Data(GH->PatL[lev], StateList, 0, PhysTime, dT_lev);
|
||||||
|
if (myrank == 0)
|
||||||
|
{
|
||||||
|
if (ErrorMonitor->outfile)
|
||||||
|
ErrorMonitor->outfile << "find NaN in state variables at t = " << PhysTime
|
||||||
|
<< ", lev = " << lev << endl;
|
||||||
|
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// evolve Shell Patches
|
// evolve Shell Patches
|
||||||
if (lev == 0)
|
if (lev == 0)
|
||||||
@@ -1516,15 +1566,28 @@ void Z4c_class::Step(int lev, int YN)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
// Non-blocking error reduction overlapped with Sync to hide Allreduce latency
|
// Start async ghost zone exchange - overlaps with error check
|
||||||
MPI_Request err_req_pre;
|
Parallel::SyncHandle *sync_pre = Parallel::SyncBeginWithPlan(sync_plan, SynchList_pre);
|
||||||
|
|
||||||
|
// check error information (overlaps with MPI transfer)
|
||||||
{
|
{
|
||||||
int erh = ERROR;
|
int erh = ERROR;
|
||||||
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &err_req_pre);
|
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
||||||
|
}
|
||||||
|
if (ERROR)
|
||||||
|
{
|
||||||
|
Parallel::SyncEnd(sync_pre); sync_pre = 0;
|
||||||
|
SH->Dump_Data(StateList, 0, PhysTime, dT_lev);
|
||||||
|
if (myrank == 0)
|
||||||
|
{
|
||||||
|
if (ErrorMonitor->outfile)
|
||||||
|
ErrorMonitor->outfile << "find NaN in state variables on Shell Patches at t = " << PhysTime << endl;
|
||||||
|
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Parallel::AsyncSyncState async_pre;
|
// Complete async ghost zone exchange
|
||||||
Parallel::Sync_start(GH->PatL[lev], SynchList_pre, Symmetry, sync_cache_pre[lev], async_pre);
|
if (sync_pre) Parallel::SyncEnd(sync_pre);
|
||||||
|
|
||||||
if (lev == 0)
|
if (lev == 0)
|
||||||
{
|
{
|
||||||
@@ -1536,8 +1599,8 @@ void Z4c_class::Step(int lev, int YN)
|
|||||||
{
|
{
|
||||||
prev_clock = curr_clock;
|
prev_clock = curr_clock;
|
||||||
curr_clock = clock();
|
curr_clock = clock();
|
||||||
cout << " Shell stuff synchronization used "
|
cout << " Shell stuff synchronization used "
|
||||||
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
||||||
<< " seconds! " << endl;
|
<< " seconds! " << endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1586,22 +1649,6 @@ void Z4c_class::Step(int lev, int YN)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
Parallel::Sync_finish(sync_cache_pre[lev], async_pre, SynchList_pre, Symmetry);
|
|
||||||
|
|
||||||
// Complete non-blocking error reduction and check
|
|
||||||
MPI_Wait(&err_req_pre, MPI_STATUS_IGNORE);
|
|
||||||
if (ERROR)
|
|
||||||
{
|
|
||||||
Parallel::Dump_Data(GH->PatL[lev], StateList, 0, PhysTime, dT_lev);
|
|
||||||
SH->Dump_Data(StateList, 0, PhysTime, dT_lev);
|
|
||||||
if (myrank == 0)
|
|
||||||
{
|
|
||||||
if (ErrorMonitor->outfile)
|
|
||||||
ErrorMonitor->outfile << "find NaN in state variables at t = " << PhysTime
|
|
||||||
<< ", lev = " << lev << endl;
|
|
||||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// for black hole position
|
// for black hole position
|
||||||
if (BH_num > 0 && lev == GH->levels - 1)
|
if (BH_num > 0 && lev == GH->levels - 1)
|
||||||
@@ -1823,7 +1870,23 @@ void Z4c_class::Step(int lev, int YN)
|
|||||||
Pp = Pp->next;
|
Pp = Pp->next;
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE: error check deferred to after Shell Patch computation to reduce MPI_Allreduce calls
|
// check error information
|
||||||
|
{
|
||||||
|
int erh = ERROR;
|
||||||
|
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
||||||
|
}
|
||||||
|
if (ERROR)
|
||||||
|
{
|
||||||
|
Parallel::Dump_Data(GH->PatL[lev], SynchList_pre, 0, PhysTime, dT_lev);
|
||||||
|
if (myrank == 0)
|
||||||
|
{
|
||||||
|
if (ErrorMonitor->outfile)
|
||||||
|
ErrorMonitor->outfile << "find NaN in RK4 substep#" << iter_count
|
||||||
|
<< " variables at t = " << PhysTime
|
||||||
|
<< ", lev = " << lev << endl;
|
||||||
|
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// evolve Shell Patches
|
// evolve Shell Patches
|
||||||
if (lev == 0)
|
if (lev == 0)
|
||||||
@@ -2069,15 +2132,29 @@ void Z4c_class::Step(int lev, int YN)
|
|||||||
sPp = sPp->next;
|
sPp = sPp->next;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Non-blocking error reduction overlapped with Sync to hide Allreduce latency
|
// Start async ghost zone exchange - overlaps with error check
|
||||||
MPI_Request err_req_cor;
|
Parallel::SyncHandle *sync_cor = Parallel::SyncBeginWithPlan(sync_plan, SynchList_cor);
|
||||||
|
|
||||||
|
// check error information (overlaps with MPI transfer)
|
||||||
{
|
{
|
||||||
int erh = ERROR;
|
int erh = ERROR;
|
||||||
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &err_req_cor);
|
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
||||||
|
}
|
||||||
|
if (ERROR)
|
||||||
|
{
|
||||||
|
Parallel::SyncEnd(sync_cor); sync_cor = 0;
|
||||||
|
SH->Dump_Data(SynchList_pre, 0, PhysTime, dT_lev);
|
||||||
|
if (myrank == 0)
|
||||||
|
{
|
||||||
|
if (ErrorMonitor->outfile)
|
||||||
|
ErrorMonitor->outfile << "find NaN on Shell Patches in RK4 substep#" << iter_count
|
||||||
|
<< " variables at t = " << PhysTime << endl;
|
||||||
|
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Parallel::AsyncSyncState async_cor;
|
// Complete async ghost zone exchange
|
||||||
Parallel::Sync_start(GH->PatL[lev], SynchList_cor, Symmetry, sync_cache_cor[lev], async_cor);
|
if (sync_cor) Parallel::SyncEnd(sync_cor);
|
||||||
|
|
||||||
if (lev == 0)
|
if (lev == 0)
|
||||||
{
|
{
|
||||||
@@ -2089,8 +2166,8 @@ void Z4c_class::Step(int lev, int YN)
|
|||||||
{
|
{
|
||||||
prev_clock = curr_clock;
|
prev_clock = curr_clock;
|
||||||
curr_clock = clock();
|
curr_clock = clock();
|
||||||
cout << " Shell stuff synchronization used "
|
cout << " Shell stuff synchronization used "
|
||||||
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
||||||
<< " seconds! " << endl;
|
<< " seconds! " << endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -2127,23 +2204,6 @@ void Z4c_class::Step(int lev, int YN)
|
|||||||
}
|
}
|
||||||
// end smooth
|
// end smooth
|
||||||
#endif
|
#endif
|
||||||
Parallel::Sync_finish(sync_cache_cor[lev], async_cor, SynchList_cor, Symmetry);
|
|
||||||
|
|
||||||
// Complete non-blocking error reduction and check
|
|
||||||
MPI_Wait(&err_req_cor, MPI_STATUS_IGNORE);
|
|
||||||
if (ERROR)
|
|
||||||
{
|
|
||||||
Parallel::Dump_Data(GH->PatL[lev], SynchList_pre, 0, PhysTime, dT_lev);
|
|
||||||
SH->Dump_Data(SynchList_pre, 0, PhysTime, dT_lev);
|
|
||||||
if (myrank == 0)
|
|
||||||
{
|
|
||||||
if (ErrorMonitor->outfile)
|
|
||||||
ErrorMonitor->outfile << "find NaN in RK4 substep#" << iter_count
|
|
||||||
<< " variables at t = " << PhysTime
|
|
||||||
<< ", lev = " << lev << endl;
|
|
||||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// for black hole position
|
// for black hole position
|
||||||
if (BH_num > 0 && lev == GH->levels - 1)
|
if (BH_num > 0 && lev == GH->levels - 1)
|
||||||
@@ -2320,6 +2380,8 @@ void Z4c_class::Step(int lev, int YN)
|
|||||||
DG_List->clearList();
|
DG_List->clearList();
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
Parallel::SyncFreePlan(sync_plan);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
#undef MRBD
|
#undef MRBD
|
||||||
|
|||||||
@@ -730,12 +730,6 @@ void bssn_class::Initialize()
|
|||||||
PhysTime = StartTime;
|
PhysTime = StartTime;
|
||||||
Setup_Black_Hole_position();
|
Setup_Black_Hole_position();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize sync caches (per-level, for predictor and corrector)
|
|
||||||
sync_cache_pre = new Parallel::SyncCache[GH->levels];
|
|
||||||
sync_cache_cor = new Parallel::SyncCache[GH->levels];
|
|
||||||
sync_cache_rp_coarse = new Parallel::SyncCache[GH->levels];
|
|
||||||
sync_cache_rp_fine = new Parallel::SyncCache[GH->levels];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//================================================================================================
|
//================================================================================================
|
||||||
@@ -987,32 +981,6 @@ bssn_class::~bssn_class()
|
|||||||
delete Azzz;
|
delete Azzz;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Destroy sync caches before GH
|
|
||||||
if (sync_cache_pre)
|
|
||||||
{
|
|
||||||
for (int i = 0; i < GH->levels; i++)
|
|
||||||
sync_cache_pre[i].destroy();
|
|
||||||
delete[] sync_cache_pre;
|
|
||||||
}
|
|
||||||
if (sync_cache_cor)
|
|
||||||
{
|
|
||||||
for (int i = 0; i < GH->levels; i++)
|
|
||||||
sync_cache_cor[i].destroy();
|
|
||||||
delete[] sync_cache_cor;
|
|
||||||
}
|
|
||||||
if (sync_cache_rp_coarse)
|
|
||||||
{
|
|
||||||
for (int i = 0; i < GH->levels; i++)
|
|
||||||
sync_cache_rp_coarse[i].destroy();
|
|
||||||
delete[] sync_cache_rp_coarse;
|
|
||||||
}
|
|
||||||
if (sync_cache_rp_fine)
|
|
||||||
{
|
|
||||||
for (int i = 0; i < GH->levels; i++)
|
|
||||||
sync_cache_rp_fine[i].destroy();
|
|
||||||
delete[] sync_cache_rp_fine;
|
|
||||||
}
|
|
||||||
|
|
||||||
delete GH;
|
delete GH;
|
||||||
#ifdef WithShell
|
#ifdef WithShell
|
||||||
delete SH;
|
delete SH;
|
||||||
@@ -2213,7 +2181,6 @@ void bssn_class::Evolve(int Steps)
|
|||||||
GH->Regrid(Symmetry, BH_num, Porgbr, Porg0,
|
GH->Regrid(Symmetry, BH_num, Porgbr, Porg0,
|
||||||
SynchList_cor, OldStateList, StateList, SynchList_pre,
|
SynchList_cor, OldStateList, StateList, SynchList_pre,
|
||||||
fgt(PhysTime - dT_mon, StartTime, dT_mon / 2), ErrorMonitor);
|
fgt(PhysTime - dT_mon, StartTime, dT_mon / 2), ErrorMonitor);
|
||||||
for (int il = 0; il < GH->levels; il++) { sync_cache_pre[il].invalidate(); sync_cache_cor[il].invalidate(); sync_cache_rp_coarse[il].invalidate(); sync_cache_rp_fine[il].invalidate(); }
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if (REGLEV == 0 && (PSTR == 1 || PSTR == 2))
|
#if (REGLEV == 0 && (PSTR == 1 || PSTR == 2))
|
||||||
@@ -2429,7 +2396,6 @@ void bssn_class::RecursiveStep(int lev)
|
|||||||
GH->Regrid_Onelevel(lev, Symmetry, BH_num, Porgbr, Porg0,
|
GH->Regrid_Onelevel(lev, Symmetry, BH_num, Porgbr, Porg0,
|
||||||
SynchList_cor, OldStateList, StateList, SynchList_pre,
|
SynchList_cor, OldStateList, StateList, SynchList_pre,
|
||||||
fgt(PhysTime - dT_lev, StartTime, dT_lev / 2), ErrorMonitor);
|
fgt(PhysTime - dT_lev, StartTime, dT_lev / 2), ErrorMonitor);
|
||||||
for (int il = 0; il < GH->levels; il++) { sync_cache_pre[il].invalidate(); sync_cache_cor[il].invalidate(); sync_cache_rp_coarse[il].invalidate(); sync_cache_rp_fine[il].invalidate(); }
|
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2608,7 +2574,6 @@ void bssn_class::ParallelStep()
|
|||||||
GH->Regrid_Onelevel(GH->mylev, Symmetry, BH_num, Porgbr, Porg0,
|
GH->Regrid_Onelevel(GH->mylev, Symmetry, BH_num, Porgbr, Porg0,
|
||||||
SynchList_cor, OldStateList, StateList, SynchList_pre,
|
SynchList_cor, OldStateList, StateList, SynchList_pre,
|
||||||
fgt(PhysTime - dT_lev, StartTime, dT_lev / 2), ErrorMonitor);
|
fgt(PhysTime - dT_lev, StartTime, dT_lev / 2), ErrorMonitor);
|
||||||
for (int il = 0; il < GH->levels; il++) { sync_cache_pre[il].invalidate(); sync_cache_cor[il].invalidate(); sync_cache_rp_coarse[il].invalidate(); sync_cache_rp_fine[il].invalidate(); }
|
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2775,7 +2740,6 @@ void bssn_class::ParallelStep()
|
|||||||
GH->Regrid_Onelevel(lev + 1, Symmetry, BH_num, Porgbr, Porg0,
|
GH->Regrid_Onelevel(lev + 1, Symmetry, BH_num, Porgbr, Porg0,
|
||||||
SynchList_cor, OldStateList, StateList, SynchList_pre,
|
SynchList_cor, OldStateList, StateList, SynchList_pre,
|
||||||
fgt(PhysTime - dT_levp1, StartTime, dT_levp1 / 2), ErrorMonitor);
|
fgt(PhysTime - dT_levp1, StartTime, dT_levp1 / 2), ErrorMonitor);
|
||||||
for (int il = 0; il < GH->levels; il++) { sync_cache_pre[il].invalidate(); sync_cache_cor[il].invalidate(); sync_cache_rp_coarse[il].invalidate(); sync_cache_rp_fine[il].invalidate(); }
|
|
||||||
|
|
||||||
// a_stream.clear();
|
// a_stream.clear();
|
||||||
// a_stream.str("");
|
// a_stream.str("");
|
||||||
@@ -2790,7 +2754,6 @@ void bssn_class::ParallelStep()
|
|||||||
GH->Regrid_Onelevel(lev, Symmetry, BH_num, Porgbr, Porg0,
|
GH->Regrid_Onelevel(lev, Symmetry, BH_num, Porgbr, Porg0,
|
||||||
SynchList_cor, OldStateList, StateList, SynchList_pre,
|
SynchList_cor, OldStateList, StateList, SynchList_pre,
|
||||||
fgt(PhysTime - dT_lev, StartTime, dT_lev / 2), ErrorMonitor);
|
fgt(PhysTime - dT_lev, StartTime, dT_lev / 2), ErrorMonitor);
|
||||||
for (int il = 0; il < GH->levels; il++) { sync_cache_pre[il].invalidate(); sync_cache_cor[il].invalidate(); sync_cache_rp_coarse[il].invalidate(); sync_cache_rp_fine[il].invalidate(); }
|
|
||||||
|
|
||||||
// a_stream.clear();
|
// a_stream.clear();
|
||||||
// a_stream.str("");
|
// a_stream.str("");
|
||||||
@@ -2809,7 +2772,6 @@ void bssn_class::ParallelStep()
|
|||||||
GH->Regrid_Onelevel(lev - 1, Symmetry, BH_num, Porgbr, Porg0,
|
GH->Regrid_Onelevel(lev - 1, Symmetry, BH_num, Porgbr, Porg0,
|
||||||
SynchList_cor, OldStateList, StateList, SynchList_pre,
|
SynchList_cor, OldStateList, StateList, SynchList_pre,
|
||||||
fgt(PhysTime - dT_lev, StartTime, dT_levm1 / 2), ErrorMonitor);
|
fgt(PhysTime - dT_lev, StartTime, dT_levm1 / 2), ErrorMonitor);
|
||||||
for (int il = 0; il < GH->levels; il++) { sync_cache_pre[il].invalidate(); sync_cache_cor[il].invalidate(); sync_cache_rp_coarse[il].invalidate(); sync_cache_rp_fine[il].invalidate(); }
|
|
||||||
|
|
||||||
// a_stream.clear();
|
// a_stream.clear();
|
||||||
// a_stream.str("");
|
// a_stream.str("");
|
||||||
@@ -2825,7 +2787,6 @@ void bssn_class::ParallelStep()
|
|||||||
GH->Regrid_Onelevel(lev - 1, Symmetry, BH_num, Porgbr, Porg0,
|
GH->Regrid_Onelevel(lev - 1, Symmetry, BH_num, Porgbr, Porg0,
|
||||||
SynchList_cor, OldStateList, StateList, SynchList_pre,
|
SynchList_cor, OldStateList, StateList, SynchList_pre,
|
||||||
fgt(PhysTime - dT_lev, StartTime, dT_levm1 / 2), ErrorMonitor);
|
fgt(PhysTime - dT_lev, StartTime, dT_levm1 / 2), ErrorMonitor);
|
||||||
for (int il = 0; il < GH->levels; il++) { sync_cache_pre[il].invalidate(); sync_cache_cor[il].invalidate(); sync_cache_rp_coarse[il].invalidate(); sync_cache_rp_fine[il].invalidate(); }
|
|
||||||
|
|
||||||
// a_stream.clear();
|
// a_stream.clear();
|
||||||
// a_stream.str("");
|
// a_stream.str("");
|
||||||
@@ -3074,6 +3035,12 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
int ERROR = 0;
|
int ERROR = 0;
|
||||||
|
|
||||||
MyList<ss_patch> *sPp;
|
MyList<ss_patch> *sPp;
|
||||||
|
|
||||||
|
// Pre-build grid segment lists once for this level's patches.
|
||||||
|
// These are reused across predictor + 3 corrector SyncBegin calls,
|
||||||
|
// avoiding O(cpusize * blocks^2) rebuild each time.
|
||||||
|
Parallel::SyncPlan *sync_plan = Parallel::SyncPreparePlan(GH->PatL[lev], Symmetry);
|
||||||
|
|
||||||
// Predictor
|
// Predictor
|
||||||
MyList<Patch> *Pp = GH->PatL[lev];
|
MyList<Patch> *Pp = GH->PatL[lev];
|
||||||
while (Pp)
|
while (Pp)
|
||||||
@@ -3197,7 +3164,26 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
}
|
}
|
||||||
Pp = Pp->next;
|
Pp = Pp->next;
|
||||||
}
|
}
|
||||||
// NOTE: error check deferred to after Shell Patch computation to reduce MPI_Allreduce calls
|
|
||||||
|
// Start async ghost zone exchange - overlaps with error check and Shell computation
|
||||||
|
Parallel::SyncHandle *sync_pre = Parallel::SyncBeginWithPlan(sync_plan, SynchList_pre);
|
||||||
|
|
||||||
|
// check error information (overlaps with MPI transfer)
|
||||||
|
{
|
||||||
|
int erh = ERROR;
|
||||||
|
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
||||||
|
}
|
||||||
|
if (ERROR)
|
||||||
|
{
|
||||||
|
Parallel::SyncEnd(sync_pre); sync_pre = 0;
|
||||||
|
Parallel::Dump_Data(GH->PatL[lev], StateList, 0, PhysTime, dT_lev);
|
||||||
|
if (myrank == 0)
|
||||||
|
{
|
||||||
|
if (ErrorMonitor->outfile)
|
||||||
|
ErrorMonitor->outfile << "find NaN in state variables at t = " << PhysTime << ", lev = " << lev << endl;
|
||||||
|
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef WithShell
|
#ifdef WithShell
|
||||||
// evolve Shell Patches
|
// evolve Shell Patches
|
||||||
@@ -3215,9 +3201,9 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
{
|
{
|
||||||
#if (AGM == 0)
|
#if (AGM == 0)
|
||||||
f_enforce_ga(cg->shape,
|
f_enforce_ga(cg->shape,
|
||||||
cg->fgfs[gxx0->sgfn], cg->fgfs[gxy0->sgfn], cg->fgfs[gxz0->sgfn],
|
cg->fgfs[gxx0->sgfn], cg->fgfs[gxy0->sgfn], cg->fgfs[gxz0->sgfn],
|
||||||
cg->fgfs[gyy0->sgfn], cg->fgfs[gyz0->sgfn], cg->fgfs[gzz0->sgfn],
|
cg->fgfs[gyy0->sgfn], cg->fgfs[gyz0->sgfn], cg->fgfs[gzz0->sgfn],
|
||||||
cg->fgfs[Axx0->sgfn], cg->fgfs[Axy0->sgfn], cg->fgfs[Axz0->sgfn],
|
cg->fgfs[Axx0->sgfn], cg->fgfs[Axy0->sgfn], cg->fgfs[Axz0->sgfn],
|
||||||
cg->fgfs[Ayy0->sgfn], cg->fgfs[Ayz0->sgfn], cg->fgfs[Azz0->sgfn]);
|
cg->fgfs[Ayy0->sgfn], cg->fgfs[Ayz0->sgfn], cg->fgfs[Azz0->sgfn]);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@@ -3341,16 +3327,27 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
// Non-blocking error reduction overlapped with Sync to hide Allreduce latency
|
// check error information
|
||||||
MPI_Request err_req;
|
|
||||||
{
|
{
|
||||||
int erh = ERROR;
|
int erh = ERROR;
|
||||||
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &err_req);
|
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ERROR)
|
||||||
|
{
|
||||||
|
Parallel::SyncEnd(sync_pre); sync_pre = 0;
|
||||||
|
SH->Dump_Data(StateList, 0, PhysTime, dT_lev);
|
||||||
|
if (myrank == 0)
|
||||||
|
{
|
||||||
|
if (ErrorMonitor->outfile)
|
||||||
|
ErrorMonitor->outfile << "find NaN in state variables on Shell Patches at t = " << PhysTime << endl;
|
||||||
|
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
Parallel::AsyncSyncState async_pre;
|
// Complete async ghost zone exchange
|
||||||
Parallel::Sync_start(GH->PatL[lev], SynchList_pre, Symmetry, sync_cache_pre[lev], async_pre);
|
if (sync_pre) Parallel::SyncEnd(sync_pre);
|
||||||
|
|
||||||
#ifdef WithShell
|
#ifdef WithShell
|
||||||
if (lev == 0)
|
if (lev == 0)
|
||||||
@@ -3363,29 +3360,12 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
{
|
{
|
||||||
prev_clock = curr_clock;
|
prev_clock = curr_clock;
|
||||||
curr_clock = clock();
|
curr_clock = clock();
|
||||||
cout << " Shell stuff synchronization used "
|
cout << " Shell stuff synchronization used "
|
||||||
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
||||||
<< " seconds! " << endl;
|
<< " seconds! " << endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
Parallel::Sync_finish(sync_cache_pre[lev], async_pre, SynchList_pre, Symmetry);
|
|
||||||
|
|
||||||
#ifdef WithShell
|
|
||||||
// Complete non-blocking error reduction and check
|
|
||||||
MPI_Wait(&err_req, MPI_STATUS_IGNORE);
|
|
||||||
if (ERROR)
|
|
||||||
{
|
|
||||||
Parallel::Dump_Data(GH->PatL[lev], StateList, 0, PhysTime, dT_lev);
|
|
||||||
SH->Dump_Data(StateList, 0, PhysTime, dT_lev);
|
|
||||||
if (myrank == 0)
|
|
||||||
{
|
|
||||||
if (ErrorMonitor->outfile)
|
|
||||||
ErrorMonitor->outfile << "find NaN in state variables at t = " << PhysTime << ", lev = " << lev << endl;
|
|
||||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (MAPBH == 0)
|
#if (MAPBH == 0)
|
||||||
// for black hole position
|
// for black hole position
|
||||||
@@ -3561,7 +3541,28 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
Pp = Pp->next;
|
Pp = Pp->next;
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE: error check deferred to after Shell Patch computation to reduce MPI_Allreduce calls
|
// Start async ghost zone exchange - overlaps with error check and Shell computation
|
||||||
|
Parallel::SyncHandle *sync_cor = Parallel::SyncBeginWithPlan(sync_plan, SynchList_cor);
|
||||||
|
|
||||||
|
// check error information (overlaps with MPI transfer)
|
||||||
|
{
|
||||||
|
int erh = ERROR;
|
||||||
|
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ERROR)
|
||||||
|
{
|
||||||
|
Parallel::SyncEnd(sync_cor); sync_cor = 0;
|
||||||
|
Parallel::Dump_Data(GH->PatL[lev], SynchList_pre, 0, PhysTime, dT_lev);
|
||||||
|
if (myrank == 0)
|
||||||
|
{
|
||||||
|
if (ErrorMonitor->outfile)
|
||||||
|
ErrorMonitor->outfile << "find NaN in RK4 substep#" << iter_count
|
||||||
|
<< " variables at t = " << PhysTime
|
||||||
|
<< ", lev = " << lev << endl;
|
||||||
|
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef WithShell
|
#ifdef WithShell
|
||||||
// evolve Shell Patches
|
// evolve Shell Patches
|
||||||
@@ -3579,9 +3580,9 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
{
|
{
|
||||||
#if (AGM == 0)
|
#if (AGM == 0)
|
||||||
f_enforce_ga(cg->shape,
|
f_enforce_ga(cg->shape,
|
||||||
cg->fgfs[gxx->sgfn], cg->fgfs[gxy->sgfn], cg->fgfs[gxz->sgfn],
|
cg->fgfs[gxx->sgfn], cg->fgfs[gxy->sgfn], cg->fgfs[gxz->sgfn],
|
||||||
cg->fgfs[gyy->sgfn], cg->fgfs[gyz->sgfn], cg->fgfs[gzz->sgfn],
|
cg->fgfs[gyy->sgfn], cg->fgfs[gyz->sgfn], cg->fgfs[gzz->sgfn],
|
||||||
cg->fgfs[Axx->sgfn], cg->fgfs[Axy->sgfn], cg->fgfs[Axz->sgfn],
|
cg->fgfs[Axx->sgfn], cg->fgfs[Axy->sgfn], cg->fgfs[Axz->sgfn],
|
||||||
cg->fgfs[Ayy->sgfn], cg->fgfs[Ayz->sgfn], cg->fgfs[Azz->sgfn]);
|
cg->fgfs[Ayy->sgfn], cg->fgfs[Ayz->sgfn], cg->fgfs[Azz->sgfn]);
|
||||||
#elif (AGM == 1)
|
#elif (AGM == 1)
|
||||||
if (iter_count == 3)
|
if (iter_count == 3)
|
||||||
@@ -3701,16 +3702,28 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
sPp = sPp->next;
|
sPp = sPp->next;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Non-blocking error reduction overlapped with Sync to hide Allreduce latency
|
// check error information
|
||||||
MPI_Request err_req_cor;
|
|
||||||
{
|
{
|
||||||
int erh = ERROR;
|
int erh = ERROR;
|
||||||
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &err_req_cor);
|
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
||||||
|
}
|
||||||
|
if (ERROR)
|
||||||
|
{
|
||||||
|
Parallel::SyncEnd(sync_cor); sync_cor = 0;
|
||||||
|
SH->Dump_Data(SynchList_pre, 0, PhysTime, dT_lev);
|
||||||
|
if (myrank == 0)
|
||||||
|
{
|
||||||
|
if (ErrorMonitor->outfile)
|
||||||
|
ErrorMonitor->outfile << "find NaN on Shell Patches in RK4 substep#"
|
||||||
|
<< iter_count << " variables at t = "
|
||||||
|
<< PhysTime << endl;
|
||||||
|
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
Parallel::AsyncSyncState async_cor;
|
// Complete async ghost zone exchange
|
||||||
Parallel::Sync_start(GH->PatL[lev], SynchList_cor, Symmetry, sync_cache_cor[lev], async_cor);
|
if (sync_cor) Parallel::SyncEnd(sync_cor);
|
||||||
|
|
||||||
#ifdef WithShell
|
#ifdef WithShell
|
||||||
if (lev == 0)
|
if (lev == 0)
|
||||||
@@ -3723,31 +3736,12 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
{
|
{
|
||||||
prev_clock = curr_clock;
|
prev_clock = curr_clock;
|
||||||
curr_clock = clock();
|
curr_clock = clock();
|
||||||
cout << " Shell stuff synchronization used "
|
cout << " Shell stuff synchronization used "
|
||||||
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
||||||
<< " seconds! " << endl;
|
<< " seconds! " << endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
Parallel::Sync_finish(sync_cache_cor[lev], async_cor, SynchList_cor, Symmetry);
|
|
||||||
|
|
||||||
#ifdef WithShell
|
|
||||||
// Complete non-blocking error reduction and check
|
|
||||||
MPI_Wait(&err_req_cor, MPI_STATUS_IGNORE);
|
|
||||||
if (ERROR)
|
|
||||||
{
|
|
||||||
Parallel::Dump_Data(GH->PatL[lev], SynchList_pre, 0, PhysTime, dT_lev);
|
|
||||||
SH->Dump_Data(SynchList_pre, 0, PhysTime, dT_lev);
|
|
||||||
if (myrank == 0)
|
|
||||||
{
|
|
||||||
if (ErrorMonitor->outfile)
|
|
||||||
ErrorMonitor->outfile << "find NaN in RK4 substep#" << iter_count
|
|
||||||
<< " variables at t = " << PhysTime
|
|
||||||
<< ", lev = " << lev << endl;
|
|
||||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (MAPBH == 0)
|
#if (MAPBH == 0)
|
||||||
// for black hole position
|
// for black hole position
|
||||||
@@ -3920,6 +3914,8 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
Porg0[ithBH][2] = Porg1[ithBH][2];
|
Porg0[ithBH][2] = Porg1[ithBH][2];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Parallel::SyncFreePlan(sync_plan);
|
||||||
}
|
}
|
||||||
|
|
||||||
//================================================================================================
|
//================================================================================================
|
||||||
@@ -4059,7 +4055,22 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
}
|
}
|
||||||
Pp = Pp->next;
|
Pp = Pp->next;
|
||||||
}
|
}
|
||||||
// NOTE: error check deferred to after Shell Patch computation to reduce MPI_Allreduce calls
|
// check error information
|
||||||
|
{
|
||||||
|
int erh = ERROR;
|
||||||
|
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
||||||
|
}
|
||||||
|
if (ERROR)
|
||||||
|
{
|
||||||
|
Parallel::Dump_Data(GH->PatL[lev], StateList, 0, PhysTime, dT_lev);
|
||||||
|
if (myrank == 0)
|
||||||
|
{
|
||||||
|
if (ErrorMonitor->outfile)
|
||||||
|
ErrorMonitor->outfile << "find NaN in state variables at t = " << PhysTime
|
||||||
|
<< ", lev = " << lev << endl;
|
||||||
|
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef WithShell
|
#ifdef WithShell
|
||||||
// evolve Shell Patches
|
// evolve Shell Patches
|
||||||
@@ -4077,15 +4088,15 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
{
|
{
|
||||||
#if (AGM == 0)
|
#if (AGM == 0)
|
||||||
f_enforce_ga(cg->shape,
|
f_enforce_ga(cg->shape,
|
||||||
cg->fgfs[gxx0->sgfn], cg->fgfs[gxy0->sgfn], cg->fgfs[gxz0->sgfn],
|
cg->fgfs[gxx0->sgfn], cg->fgfs[gxy0->sgfn], cg->fgfs[gxz0->sgfn],
|
||||||
cg->fgfs[gyy0->sgfn], cg->fgfs[gyz0->sgfn], cg->fgfs[gzz0->sgfn],
|
cg->fgfs[gyy0->sgfn], cg->fgfs[gyz0->sgfn], cg->fgfs[gzz0->sgfn],
|
||||||
cg->fgfs[Axx0->sgfn], cg->fgfs[Axy0->sgfn], cg->fgfs[Axz0->sgfn],
|
cg->fgfs[Axx0->sgfn], cg->fgfs[Axy0->sgfn], cg->fgfs[Axz0->sgfn],
|
||||||
cg->fgfs[Ayy0->sgfn], cg->fgfs[Ayz0->sgfn], cg->fgfs[Azz0->sgfn]);
|
cg->fgfs[Ayy0->sgfn], cg->fgfs[Ayz0->sgfn], cg->fgfs[Azz0->sgfn]);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (f_compute_rhs_bssn_ss(cg->shape, TRK4, cg->X[0], cg->X[1], cg->X[2],
|
if (f_compute_rhs_bssn_ss(cg->shape, TRK4, cg->X[0], cg->X[1], cg->X[2],
|
||||||
cg->fgfs[fngfs + ShellPatch::gx],
|
cg->fgfs[fngfs + ShellPatch::gx],
|
||||||
cg->fgfs[fngfs + ShellPatch::gy],
|
cg->fgfs[fngfs + ShellPatch::gy],
|
||||||
cg->fgfs[fngfs + ShellPatch::gz],
|
cg->fgfs[fngfs + ShellPatch::gz],
|
||||||
cg->fgfs[fngfs + ShellPatch::drhodx],
|
cg->fgfs[fngfs + ShellPatch::drhodx],
|
||||||
cg->fgfs[fngfs + ShellPatch::drhody],
|
cg->fgfs[fngfs + ShellPatch::drhody],
|
||||||
@@ -4200,16 +4211,25 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
// Non-blocking error reduction overlapped with Sync to hide Allreduce latency
|
// check error information
|
||||||
MPI_Request err_req;
|
|
||||||
{
|
{
|
||||||
int erh = ERROR;
|
int erh = ERROR;
|
||||||
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &err_req);
|
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
||||||
|
}
|
||||||
|
if (ERROR)
|
||||||
|
{
|
||||||
|
SH->Dump_Data(StateList, 0, PhysTime, dT_lev);
|
||||||
|
if (myrank == 0)
|
||||||
|
{
|
||||||
|
if (ErrorMonitor->outfile)
|
||||||
|
ErrorMonitor->outfile << "find NaN in state variables on Shell Patches at t = "
|
||||||
|
<< PhysTime << endl;
|
||||||
|
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
Parallel::AsyncSyncState async_pre;
|
Parallel::Sync(GH->PatL[lev], SynchList_pre, Symmetry);
|
||||||
Parallel::Sync_start(GH->PatL[lev], SynchList_pre, Symmetry, sync_cache_pre[lev], async_pre);
|
|
||||||
|
|
||||||
#ifdef WithShell
|
#ifdef WithShell
|
||||||
if (lev == 0)
|
if (lev == 0)
|
||||||
@@ -4222,27 +4242,9 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
{
|
{
|
||||||
prev_clock = curr_clock;
|
prev_clock = curr_clock;
|
||||||
curr_clock = clock();
|
curr_clock = clock();
|
||||||
cout << " Shell stuff synchronization used "
|
cout << " Shell stuff synchronization used "
|
||||||
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
||||||
<< " seconds! " << endl;
|
<< " seconds! " << endl;
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
Parallel::Sync_finish(sync_cache_pre[lev], async_pre, SynchList_pre, Symmetry);
|
|
||||||
|
|
||||||
#ifdef WithShell
|
|
||||||
// Complete non-blocking error reduction and check
|
|
||||||
MPI_Wait(&err_req, MPI_STATUS_IGNORE);
|
|
||||||
if (ERROR)
|
|
||||||
{
|
|
||||||
Parallel::Dump_Data(GH->PatL[lev], StateList, 0, PhysTime, dT_lev);
|
|
||||||
SH->Dump_Data(StateList, 0, PhysTime, dT_lev);
|
|
||||||
if (myrank == 0)
|
|
||||||
{
|
|
||||||
if (ErrorMonitor->outfile)
|
|
||||||
ErrorMonitor->outfile << "find NaN in state variables at t = " << PhysTime
|
|
||||||
<< ", lev = " << lev << endl;
|
|
||||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@@ -4405,7 +4407,23 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
Pp = Pp->next;
|
Pp = Pp->next;
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE: error check deferred to after Shell Patch computation to reduce MPI_Allreduce calls
|
// check error information
|
||||||
|
{
|
||||||
|
int erh = ERROR;
|
||||||
|
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
||||||
|
}
|
||||||
|
if (ERROR)
|
||||||
|
{
|
||||||
|
Parallel::Dump_Data(GH->PatL[lev], SynchList_pre, 0, PhysTime, dT_lev);
|
||||||
|
if (myrank == 0)
|
||||||
|
{
|
||||||
|
if (ErrorMonitor->outfile)
|
||||||
|
ErrorMonitor->outfile << "find NaN in RK4 substep#" << iter_count
|
||||||
|
<< " variables at t = " << PhysTime
|
||||||
|
<< ", lev = " << lev << endl;
|
||||||
|
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef WithShell
|
#ifdef WithShell
|
||||||
// evolve Shell Patches
|
// evolve Shell Patches
|
||||||
@@ -4423,9 +4441,9 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
{
|
{
|
||||||
#if (AGM == 0)
|
#if (AGM == 0)
|
||||||
f_enforce_ga(cg->shape,
|
f_enforce_ga(cg->shape,
|
||||||
cg->fgfs[gxx->sgfn], cg->fgfs[gxy->sgfn], cg->fgfs[gxz->sgfn],
|
cg->fgfs[gxx->sgfn], cg->fgfs[gxy->sgfn], cg->fgfs[gxz->sgfn],
|
||||||
cg->fgfs[gyy->sgfn], cg->fgfs[gyz->sgfn], cg->fgfs[gzz->sgfn],
|
cg->fgfs[gyy->sgfn], cg->fgfs[gyz->sgfn], cg->fgfs[gzz->sgfn],
|
||||||
cg->fgfs[Axx->sgfn], cg->fgfs[Axy->sgfn], cg->fgfs[Axz->sgfn],
|
cg->fgfs[Axx->sgfn], cg->fgfs[Axy->sgfn], cg->fgfs[Axz->sgfn],
|
||||||
cg->fgfs[Ayy->sgfn], cg->fgfs[Ayz->sgfn], cg->fgfs[Azz->sgfn]);
|
cg->fgfs[Ayy->sgfn], cg->fgfs[Ayz->sgfn], cg->fgfs[Azz->sgfn]);
|
||||||
#elif (AGM == 1)
|
#elif (AGM == 1)
|
||||||
if (iter_count == 3)
|
if (iter_count == 3)
|
||||||
@@ -4545,16 +4563,25 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
sPp = sPp->next;
|
sPp = sPp->next;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Non-blocking error reduction overlapped with Sync to hide Allreduce latency
|
// check error information
|
||||||
MPI_Request err_req_cor;
|
|
||||||
{
|
{
|
||||||
int erh = ERROR;
|
int erh = ERROR;
|
||||||
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &err_req_cor);
|
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
||||||
|
}
|
||||||
|
if (ERROR)
|
||||||
|
{
|
||||||
|
SH->Dump_Data(SynchList_pre, 0, PhysTime, dT_lev);
|
||||||
|
if (myrank == 0)
|
||||||
|
{
|
||||||
|
if (ErrorMonitor->outfile)
|
||||||
|
ErrorMonitor->outfile << "find NaN on Shell Patches in RK4 substep#" << iter_count
|
||||||
|
<< " variables at t = " << PhysTime << endl;
|
||||||
|
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
Parallel::AsyncSyncState async_cor;
|
Parallel::Sync(GH->PatL[lev], SynchList_cor, Symmetry);
|
||||||
Parallel::Sync_start(GH->PatL[lev], SynchList_cor, Symmetry, sync_cache_cor[lev], async_cor);
|
|
||||||
|
|
||||||
#ifdef WithShell
|
#ifdef WithShell
|
||||||
if (lev == 0)
|
if (lev == 0)
|
||||||
@@ -4567,30 +4594,11 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
{
|
{
|
||||||
prev_clock = curr_clock;
|
prev_clock = curr_clock;
|
||||||
curr_clock = clock();
|
curr_clock = clock();
|
||||||
cout << " Shell stuff synchronization used "
|
cout << " Shell stuff synchronization used "
|
||||||
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
||||||
<< " seconds! " << endl;
|
<< " seconds! " << endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
Parallel::Sync_finish(sync_cache_cor[lev], async_cor, SynchList_cor, Symmetry);
|
|
||||||
|
|
||||||
#ifdef WithShell
|
|
||||||
// Complete non-blocking error reduction and check
|
|
||||||
MPI_Wait(&err_req_cor, MPI_STATUS_IGNORE);
|
|
||||||
if (ERROR)
|
|
||||||
{
|
|
||||||
Parallel::Dump_Data(GH->PatL[lev], SynchList_pre, 0, PhysTime, dT_lev);
|
|
||||||
SH->Dump_Data(SynchList_pre, 0, PhysTime, dT_lev);
|
|
||||||
if (myrank == 0)
|
|
||||||
{
|
|
||||||
if (ErrorMonitor->outfile)
|
|
||||||
ErrorMonitor->outfile << "find NaN in RK4 substep#" << iter_count
|
|
||||||
<< " variables at t = " << PhysTime
|
|
||||||
<< ", lev = " << lev << endl;
|
|
||||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
// for black hole position
|
// for black hole position
|
||||||
if (BH_num > 0 && lev == GH->levels - 1)
|
if (BH_num > 0 && lev == GH->levels - 1)
|
||||||
@@ -4830,6 +4838,12 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
int ERROR = 0;
|
int ERROR = 0;
|
||||||
|
|
||||||
MyList<ss_patch> *sPp;
|
MyList<ss_patch> *sPp;
|
||||||
|
|
||||||
|
// Pre-build grid segment lists once for this level's patches.
|
||||||
|
// These are reused across predictor + 3 corrector SyncBegin calls,
|
||||||
|
// avoiding O(cpusize * blocks^2) rebuild each time.
|
||||||
|
Parallel::SyncPlan *sync_plan = Parallel::SyncPreparePlan(GH->PatL[lev], Symmetry);
|
||||||
|
|
||||||
// Predictor
|
// Predictor
|
||||||
MyList<Patch> *Pp = GH->PatL[lev];
|
MyList<Patch> *Pp = GH->PatL[lev];
|
||||||
while (Pp)
|
while (Pp)
|
||||||
@@ -4956,21 +4970,17 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
|
|
||||||
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"after Predictor rhs calculation");
|
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"after Predictor rhs calculation");
|
||||||
|
|
||||||
// Non-blocking error reduction overlapped with Sync to hide Allreduce latency
|
// Start async ghost zone exchange - overlaps with error check and BH position
|
||||||
MPI_Request err_req;
|
Parallel::SyncHandle *sync_pre = Parallel::SyncBeginWithPlan(sync_plan, SynchList_pre);
|
||||||
|
|
||||||
|
// check error information (overlaps with MPI transfer)
|
||||||
{
|
{
|
||||||
int erh = ERROR;
|
int erh = ERROR;
|
||||||
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, GH->Commlev[lev], &err_req);
|
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, GH->Commlev[lev]);
|
||||||
}
|
}
|
||||||
|
|
||||||
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"before Predictor sync");
|
|
||||||
|
|
||||||
Parallel::Sync_cached(GH->PatL[lev], SynchList_pre, Symmetry, sync_cache_pre[lev]);
|
|
||||||
|
|
||||||
// Complete non-blocking error reduction and check
|
|
||||||
MPI_Wait(&err_req, MPI_STATUS_IGNORE);
|
|
||||||
if (ERROR)
|
if (ERROR)
|
||||||
{
|
{
|
||||||
|
Parallel::SyncEnd(sync_pre); sync_pre = 0;
|
||||||
Parallel::Dump_Data(GH->PatL[lev], StateList, 0, PhysTime, dT_lev);
|
Parallel::Dump_Data(GH->PatL[lev], StateList, 0, PhysTime, dT_lev);
|
||||||
if (myrank == 0)
|
if (myrank == 0)
|
||||||
{
|
{
|
||||||
@@ -4980,6 +4990,11 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"before Predictor sync");
|
||||||
|
|
||||||
|
// Complete async ghost zone exchange
|
||||||
|
if (sync_pre) Parallel::SyncEnd(sync_pre);
|
||||||
|
|
||||||
#if (MAPBH == 0)
|
#if (MAPBH == 0)
|
||||||
// for black hole position
|
// for black hole position
|
||||||
if (BH_num > 0 && lev == GH->levels - 1)
|
if (BH_num > 0 && lev == GH->levels - 1)
|
||||||
@@ -5157,34 +5172,35 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
|
|
||||||
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"before Corrector error check");
|
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"before Corrector error check");
|
||||||
|
|
||||||
// Non-blocking error reduction overlapped with Sync to hide Allreduce latency
|
// Start async ghost zone exchange - overlaps with error check and BH position
|
||||||
MPI_Request err_req_cor;
|
Parallel::SyncHandle *sync_cor = Parallel::SyncBeginWithPlan(sync_plan, SynchList_cor);
|
||||||
|
|
||||||
|
// check error information (overlaps with MPI transfer)
|
||||||
{
|
{
|
||||||
int erh = ERROR;
|
int erh = ERROR;
|
||||||
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, GH->Commlev[lev], &err_req_cor);
|
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, GH->Commlev[lev]);
|
||||||
}
|
}
|
||||||
|
|
||||||
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"before Corrector sync");
|
|
||||||
|
|
||||||
Parallel::Sync_cached(GH->PatL[lev], SynchList_cor, Symmetry, sync_cache_cor[lev]);
|
|
||||||
|
|
||||||
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"after Corrector sync");
|
|
||||||
|
|
||||||
// Complete non-blocking error reduction and check
|
|
||||||
MPI_Wait(&err_req_cor, MPI_STATUS_IGNORE);
|
|
||||||
if (ERROR)
|
if (ERROR)
|
||||||
{
|
{
|
||||||
|
Parallel::SyncEnd(sync_cor); sync_cor = 0;
|
||||||
Parallel::Dump_Data(GH->PatL[lev], SynchList_pre, 0, PhysTime, dT_lev);
|
Parallel::Dump_Data(GH->PatL[lev], SynchList_pre, 0, PhysTime, dT_lev);
|
||||||
if (myrank == 0)
|
if (myrank == 0)
|
||||||
{
|
{
|
||||||
if (ErrorMonitor->outfile)
|
if (ErrorMonitor->outfile)
|
||||||
ErrorMonitor->outfile << "find NaN in RK4 substep#" << iter_count
|
ErrorMonitor->outfile << "find NaN in RK4 substep#" << iter_count
|
||||||
<< " variables at t = " << PhysTime
|
<< " variables at t = " << PhysTime
|
||||||
<< ", lev = " << lev << endl;
|
<< ", lev = " << lev << endl;
|
||||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"before Corrector sync");
|
||||||
|
|
||||||
|
// Complete async ghost zone exchange
|
||||||
|
if (sync_cor) Parallel::SyncEnd(sync_cor);
|
||||||
|
|
||||||
|
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"after Corrector sync");
|
||||||
|
|
||||||
#if (MAPBH == 0)
|
#if (MAPBH == 0)
|
||||||
// for black hole position
|
// for black hole position
|
||||||
if (BH_num > 0 && lev == GH->levels - 1)
|
if (BH_num > 0 && lev == GH->levels - 1)
|
||||||
@@ -5297,6 +5313,8 @@ void bssn_class::Step(int lev, int YN)
|
|||||||
|
|
||||||
// if(myrank==GH->start_rank[lev]) cout<<GH->mylev<<endl;
|
// if(myrank==GH->start_rank[lev]) cout<<GH->mylev<<endl;
|
||||||
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"complet GH Step");
|
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"complet GH Step");
|
||||||
|
|
||||||
|
Parallel::SyncFreePlan(sync_plan);
|
||||||
}
|
}
|
||||||
|
|
||||||
//================================================================================================
|
//================================================================================================
|
||||||
@@ -5468,11 +5486,21 @@ void bssn_class::SHStep()
|
|||||||
#if (PSTR == 1 || PSTR == 2)
|
#if (PSTR == 1 || PSTR == 2)
|
||||||
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"before Predictor's error check");
|
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"before Predictor's error check");
|
||||||
#endif
|
#endif
|
||||||
// Non-blocking error reduction overlapped with Synch to hide Allreduce latency
|
// check error information
|
||||||
MPI_Request err_req;
|
|
||||||
{
|
{
|
||||||
int erh = ERROR;
|
int erh = ERROR;
|
||||||
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &err_req);
|
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ERROR)
|
||||||
|
{
|
||||||
|
SH->Dump_Data(StateList, 0, PhysTime, dT_lev);
|
||||||
|
if (myrank == 0)
|
||||||
|
{
|
||||||
|
if (ErrorMonitor->outfile)
|
||||||
|
ErrorMonitor->outfile << "find NaN in state variables on Shell Patches at t = " << PhysTime << endl;
|
||||||
|
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
@@ -5484,25 +5512,12 @@ void bssn_class::SHStep()
|
|||||||
{
|
{
|
||||||
prev_clock = curr_clock;
|
prev_clock = curr_clock;
|
||||||
curr_clock = clock();
|
curr_clock = clock();
|
||||||
cout << " Shell stuff synchronization used "
|
cout << " Shell stuff synchronization used "
|
||||||
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
||||||
<< " seconds! " << endl;
|
<< " seconds! " << endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Complete non-blocking error reduction and check
|
|
||||||
MPI_Wait(&err_req, MPI_STATUS_IGNORE);
|
|
||||||
if (ERROR)
|
|
||||||
{
|
|
||||||
SH->Dump_Data(StateList, 0, PhysTime, dT_lev);
|
|
||||||
if (myrank == 0)
|
|
||||||
{
|
|
||||||
if (ErrorMonitor->outfile)
|
|
||||||
ErrorMonitor->outfile << "find NaN in state variables on Shell Patches at t = " << PhysTime << endl;
|
|
||||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// corrector
|
// corrector
|
||||||
for (iter_count = 1; iter_count < 4; iter_count++)
|
for (iter_count = 1; iter_count < 4; iter_count++)
|
||||||
{
|
{
|
||||||
@@ -5645,11 +5660,21 @@ void bssn_class::SHStep()
|
|||||||
sPp = sPp->next;
|
sPp = sPp->next;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Non-blocking error reduction overlapped with Synch to hide Allreduce latency
|
// check error information
|
||||||
MPI_Request err_req_cor;
|
|
||||||
{
|
{
|
||||||
int erh = ERROR;
|
int erh = ERROR;
|
||||||
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &err_req_cor);
|
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
|
||||||
|
}
|
||||||
|
if (ERROR)
|
||||||
|
{
|
||||||
|
SH->Dump_Data(SynchList_pre, 0, PhysTime, dT_lev);
|
||||||
|
if (myrank == 0)
|
||||||
|
{
|
||||||
|
if (ErrorMonitor->outfile)
|
||||||
|
ErrorMonitor->outfile << "find NaN on Shell Patches in RK4 substep#" << iter_count
|
||||||
|
<< " variables at t = " << PhysTime << endl;
|
||||||
|
MPI_Abort(MPI_COMM_WORLD, 1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
@@ -5661,26 +5686,12 @@ void bssn_class::SHStep()
|
|||||||
{
|
{
|
||||||
prev_clock = curr_clock;
|
prev_clock = curr_clock;
|
||||||
curr_clock = clock();
|
curr_clock = clock();
|
||||||
cout << " Shell stuff synchronization used "
|
cout << " Shell stuff synchronization used "
|
||||||
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
|
||||||
<< " seconds! " << endl;
|
<< " seconds! " << endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Complete non-blocking error reduction and check
|
|
||||||
MPI_Wait(&err_req_cor, MPI_STATUS_IGNORE);
|
|
||||||
if (ERROR)
|
|
||||||
{
|
|
||||||
SH->Dump_Data(SynchList_pre, 0, PhysTime, dT_lev);
|
|
||||||
if (myrank == 0)
|
|
||||||
{
|
|
||||||
if (ErrorMonitor->outfile)
|
|
||||||
ErrorMonitor->outfile << "find NaN on Shell Patches in RK4 substep#" << iter_count
|
|
||||||
<< " variables at t = " << PhysTime << endl;
|
|
||||||
MPI_Abort(MPI_COMM_WORLD, 1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sPp = SH->PatL;
|
sPp = SH->PatL;
|
||||||
while (sPp)
|
while (sPp)
|
||||||
{
|
{
|
||||||
@@ -5809,7 +5820,7 @@ void bssn_class::RestrictProlong(int lev, int YN, bool BB,
|
|||||||
// misc::tillherecheck(GH->Commlev[GH->mylev],GH->start_rank[GH->mylev],a_stream.str());
|
// misc::tillherecheck(GH->Commlev[GH->mylev],GH->start_rank[GH->mylev],a_stream.str());
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
Parallel::Sync_cached(GH->PatL[lev - 1], SynchList_pre, Symmetry, sync_cache_rp_coarse[lev]);
|
Parallel::Sync(GH->PatL[lev - 1], SynchList_pre, Symmetry);
|
||||||
|
|
||||||
#if (PSTR == 1 || PSTR == 2)
|
#if (PSTR == 1 || PSTR == 2)
|
||||||
// a_stream.clear();
|
// a_stream.clear();
|
||||||
@@ -5819,11 +5830,21 @@ void bssn_class::RestrictProlong(int lev, int YN, bool BB,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if (RPB == 0)
|
#if (RPB == 0)
|
||||||
|
Ppc = GH->PatL[lev - 1];
|
||||||
|
while (Ppc)
|
||||||
|
{
|
||||||
|
Pp = GH->PatL[lev];
|
||||||
|
while (Pp)
|
||||||
|
{
|
||||||
#if (MIXOUTB == 0)
|
#if (MIXOUTB == 0)
|
||||||
Parallel::OutBdLow2Hi(GH->PatL[lev - 1], GH->PatL[lev], SynchList_pre, SL, Symmetry);
|
Parallel::OutBdLow2Hi(Ppc->data, Pp->data, SynchList_pre, SL, Symmetry);
|
||||||
#elif (MIXOUTB == 1)
|
#elif (MIXOUTB == 1)
|
||||||
Parallel::OutBdLow2Himix(GH->PatL[lev - 1], GH->PatL[lev], SynchList_pre, SL, Symmetry);
|
Parallel::OutBdLow2Himix(Ppc->data, Pp->data, SynchList_pre, SL, Symmetry);
|
||||||
#endif
|
#endif
|
||||||
|
Pp = Pp->next;
|
||||||
|
}
|
||||||
|
Ppc = Ppc->next;
|
||||||
|
}
|
||||||
#elif (RPB == 1)
|
#elif (RPB == 1)
|
||||||
// Parallel::OutBdLow2Hi_bam(GH->PatL[lev-1],GH->PatL[lev],SynchList_pre,SL,Symmetry);
|
// Parallel::OutBdLow2Hi_bam(GH->PatL[lev-1],GH->PatL[lev],SynchList_pre,SL,Symmetry);
|
||||||
Parallel::OutBdLow2Hi_bam(GH->PatL[lev - 1], GH->PatL[lev], SynchList_pre, SL, GH->bdsul[lev], Symmetry);
|
Parallel::OutBdLow2Hi_bam(GH->PatL[lev - 1], GH->PatL[lev], SynchList_pre, SL, GH->bdsul[lev], Symmetry);
|
||||||
@@ -5860,7 +5881,7 @@ void bssn_class::RestrictProlong(int lev, int YN, bool BB,
|
|||||||
// misc::tillherecheck(GH->Commlev[GH->mylev],GH->start_rank[GH->mylev],a_stream.str());
|
// misc::tillherecheck(GH->Commlev[GH->mylev],GH->start_rank[GH->mylev],a_stream.str());
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
Parallel::Sync_cached(GH->PatL[lev - 1], SL, Symmetry, sync_cache_rp_coarse[lev]);
|
Parallel::Sync(GH->PatL[lev - 1], SL, Symmetry);
|
||||||
|
|
||||||
#if (PSTR == 1 || PSTR == 2)
|
#if (PSTR == 1 || PSTR == 2)
|
||||||
// a_stream.clear();
|
// a_stream.clear();
|
||||||
@@ -5870,11 +5891,21 @@ void bssn_class::RestrictProlong(int lev, int YN, bool BB,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if (RPB == 0)
|
#if (RPB == 0)
|
||||||
|
Ppc = GH->PatL[lev - 1];
|
||||||
|
while (Ppc)
|
||||||
|
{
|
||||||
|
Pp = GH->PatL[lev];
|
||||||
|
while (Pp)
|
||||||
|
{
|
||||||
#if (MIXOUTB == 0)
|
#if (MIXOUTB == 0)
|
||||||
Parallel::OutBdLow2Hi(GH->PatL[lev - 1], GH->PatL[lev], SL, SL, Symmetry);
|
Parallel::OutBdLow2Hi(Ppc->data, Pp->data, SL, SL, Symmetry);
|
||||||
#elif (MIXOUTB == 1)
|
#elif (MIXOUTB == 1)
|
||||||
Parallel::OutBdLow2Himix(GH->PatL[lev - 1], GH->PatL[lev], SL, SL, Symmetry);
|
Parallel::OutBdLow2Himix(Ppc->data, Pp->data, SL, SL, Symmetry);
|
||||||
#endif
|
#endif
|
||||||
|
Pp = Pp->next;
|
||||||
|
}
|
||||||
|
Ppc = Ppc->next;
|
||||||
|
}
|
||||||
#elif (RPB == 1)
|
#elif (RPB == 1)
|
||||||
// Parallel::OutBdLow2Hi_bam(GH->PatL[lev-1],GH->PatL[lev],SL,SL,Symmetry);
|
// Parallel::OutBdLow2Hi_bam(GH->PatL[lev-1],GH->PatL[lev],SL,SL,Symmetry);
|
||||||
Parallel::OutBdLow2Hi_bam(GH->PatL[lev - 1], GH->PatL[lev], SL, SL, GH->bdsul[lev], Symmetry);
|
Parallel::OutBdLow2Hi_bam(GH->PatL[lev - 1], GH->PatL[lev], SL, SL, GH->bdsul[lev], Symmetry);
|
||||||
@@ -5888,7 +5919,7 @@ void bssn_class::RestrictProlong(int lev, int YN, bool BB,
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
Parallel::Sync_cached(GH->PatL[lev], SL, Symmetry, sync_cache_rp_fine[lev]);
|
Parallel::Sync(GH->PatL[lev], SL, Symmetry);
|
||||||
|
|
||||||
#if (PSTR == 1 || PSTR == 2)
|
#if (PSTR == 1 || PSTR == 2)
|
||||||
// a_stream.clear();
|
// a_stream.clear();
|
||||||
@@ -5946,14 +5977,24 @@ void bssn_class::RestrictProlong_aux(int lev, int YN, bool BB,
|
|||||||
Parallel::Restrict_bam(GH->PatL[lev - 1], GH->PatL[lev], SL, SynchList_pre, GH->rsul[lev], Symmetry);
|
Parallel::Restrict_bam(GH->PatL[lev - 1], GH->PatL[lev], SL, SynchList_pre, GH->rsul[lev], Symmetry);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
Parallel::Sync_cached(GH->PatL[lev - 1], SynchList_pre, Symmetry, sync_cache_rp_coarse[lev]);
|
Parallel::Sync(GH->PatL[lev - 1], SynchList_pre, Symmetry);
|
||||||
|
|
||||||
#if (RPB == 0)
|
#if (RPB == 0)
|
||||||
|
Ppc = GH->PatL[lev - 1];
|
||||||
|
while (Ppc)
|
||||||
|
{
|
||||||
|
Pp = GH->PatL[lev];
|
||||||
|
while (Pp)
|
||||||
|
{
|
||||||
#if (MIXOUTB == 0)
|
#if (MIXOUTB == 0)
|
||||||
Parallel::OutBdLow2Hi(GH->PatL[lev - 1], GH->PatL[lev], SynchList_pre, SL, Symmetry);
|
Parallel::OutBdLow2Hi(Ppc->data, Pp->data, SynchList_pre, SL, Symmetry);
|
||||||
#elif (MIXOUTB == 1)
|
#elif (MIXOUTB == 1)
|
||||||
Parallel::OutBdLow2Himix(GH->PatL[lev - 1], GH->PatL[lev], SynchList_pre, SL, Symmetry);
|
Parallel::OutBdLow2Himix(Ppc->data, Pp->data, SynchList_pre, SL, Symmetry);
|
||||||
#endif
|
#endif
|
||||||
|
Pp = Pp->next;
|
||||||
|
}
|
||||||
|
Ppc = Ppc->next;
|
||||||
|
}
|
||||||
#elif (RPB == 1)
|
#elif (RPB == 1)
|
||||||
// Parallel::OutBdLow2Hi_bam(GH->PatL[lev-1],GH->PatL[lev],SynchList_pre,SL,Symmetry);
|
// Parallel::OutBdLow2Hi_bam(GH->PatL[lev-1],GH->PatL[lev],SynchList_pre,SL,Symmetry);
|
||||||
Parallel::OutBdLow2Hi_bam(GH->PatL[lev - 1], GH->PatL[lev], SynchList_pre, SL, GH->bdsul[lev], Symmetry);
|
Parallel::OutBdLow2Hi_bam(GH->PatL[lev - 1], GH->PatL[lev], SynchList_pre, SL, GH->bdsul[lev], Symmetry);
|
||||||
@@ -5968,21 +6009,31 @@ void bssn_class::RestrictProlong_aux(int lev, int YN, bool BB,
|
|||||||
Parallel::Restrict_bam(GH->PatL[lev - 1], GH->PatL[lev], SL, SL, GH->rsul[lev], Symmetry);
|
Parallel::Restrict_bam(GH->PatL[lev - 1], GH->PatL[lev], SL, SL, GH->rsul[lev], Symmetry);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
Parallel::Sync_cached(GH->PatL[lev - 1], SL, Symmetry, sync_cache_rp_coarse[lev]);
|
Parallel::Sync(GH->PatL[lev - 1], SL, Symmetry);
|
||||||
|
|
||||||
#if (RPB == 0)
|
#if (RPB == 0)
|
||||||
|
Ppc = GH->PatL[lev - 1];
|
||||||
|
while (Ppc)
|
||||||
|
{
|
||||||
|
Pp = GH->PatL[lev];
|
||||||
|
while (Pp)
|
||||||
|
{
|
||||||
#if (MIXOUTB == 0)
|
#if (MIXOUTB == 0)
|
||||||
Parallel::OutBdLow2Hi(GH->PatL[lev - 1], GH->PatL[lev], SL, SL, Symmetry);
|
Parallel::OutBdLow2Hi(Ppc->data, Pp->data, SL, SL, Symmetry);
|
||||||
#elif (MIXOUTB == 1)
|
#elif (MIXOUTB == 1)
|
||||||
Parallel::OutBdLow2Himix(GH->PatL[lev - 1], GH->PatL[lev], SL, SL, Symmetry);
|
Parallel::OutBdLow2Himix(Ppc->data, Pp->data, SL, SL, Symmetry);
|
||||||
#endif
|
#endif
|
||||||
|
Pp = Pp->next;
|
||||||
|
}
|
||||||
|
Ppc = Ppc->next;
|
||||||
|
}
|
||||||
#elif (RPB == 1)
|
#elif (RPB == 1)
|
||||||
// Parallel::OutBdLow2Hi_bam(GH->PatL[lev-1],GH->PatL[lev],SL,SL,Symmetry);
|
// Parallel::OutBdLow2Hi_bam(GH->PatL[lev-1],GH->PatL[lev],SL,SL,Symmetry);
|
||||||
Parallel::OutBdLow2Hi_bam(GH->PatL[lev - 1], GH->PatL[lev], SL, SL, GH->bdsul[lev], Symmetry);
|
Parallel::OutBdLow2Hi_bam(GH->PatL[lev - 1], GH->PatL[lev], SL, SL, GH->bdsul[lev], Symmetry);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
Parallel::Sync_cached(GH->PatL[lev], SL, Symmetry, sync_cache_rp_fine[lev]);
|
Parallel::Sync(GH->PatL[lev], SL, Symmetry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -6033,14 +6084,24 @@ void bssn_class::RestrictProlong(int lev, int YN, bool BB)
|
|||||||
Parallel::Restrict_bam(GH->PatL[lev - 1], GH->PatL[lev], SynchList_cor, SynchList_pre, GH->rsul[lev], Symmetry);
|
Parallel::Restrict_bam(GH->PatL[lev - 1], GH->PatL[lev], SynchList_cor, SynchList_pre, GH->rsul[lev], Symmetry);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
Parallel::Sync_cached(GH->PatL[lev - 1], SynchList_pre, Symmetry, sync_cache_rp_coarse[lev]);
|
Parallel::Sync(GH->PatL[lev - 1], SynchList_pre, Symmetry);
|
||||||
|
|
||||||
#if (RPB == 0)
|
#if (RPB == 0)
|
||||||
|
Ppc = GH->PatL[lev - 1];
|
||||||
|
while (Ppc)
|
||||||
|
{
|
||||||
|
Pp = GH->PatL[lev];
|
||||||
|
while (Pp)
|
||||||
|
{
|
||||||
#if (MIXOUTB == 0)
|
#if (MIXOUTB == 0)
|
||||||
Parallel::OutBdLow2Hi(GH->PatL[lev - 1], GH->PatL[lev], SynchList_pre, SynchList_cor, Symmetry);
|
Parallel::OutBdLow2Hi(Ppc->data, Pp->data, SynchList_pre, SynchList_cor, Symmetry);
|
||||||
#elif (MIXOUTB == 1)
|
#elif (MIXOUTB == 1)
|
||||||
Parallel::OutBdLow2Himix(GH->PatL[lev - 1], GH->PatL[lev], SynchList_pre, SynchList_cor, Symmetry);
|
Parallel::OutBdLow2Himix(Ppc->data, Pp->data, SynchList_pre, SynchList_cor, Symmetry);
|
||||||
#endif
|
#endif
|
||||||
|
Pp = Pp->next;
|
||||||
|
}
|
||||||
|
Ppc = Ppc->next;
|
||||||
|
}
|
||||||
#elif (RPB == 1)
|
#elif (RPB == 1)
|
||||||
// Parallel::OutBdLow2Hi_bam(GH->PatL[lev-1],GH->PatL[lev],SynchList_pre,SynchList_cor,Symmetry);
|
// Parallel::OutBdLow2Hi_bam(GH->PatL[lev-1],GH->PatL[lev],SynchList_pre,SynchList_cor,Symmetry);
|
||||||
Parallel::OutBdLow2Hi_bam(GH->PatL[lev - 1], GH->PatL[lev], SynchList_pre, SynchList_cor, GH->bdsul[lev], Symmetry);
|
Parallel::OutBdLow2Hi_bam(GH->PatL[lev - 1], GH->PatL[lev], SynchList_pre, SynchList_cor, GH->bdsul[lev], Symmetry);
|
||||||
@@ -6057,21 +6118,31 @@ void bssn_class::RestrictProlong(int lev, int YN, bool BB)
|
|||||||
Parallel::Restrict_bam(GH->PatL[lev - 1], GH->PatL[lev], SynchList_cor, StateList, GH->rsul[lev], Symmetry);
|
Parallel::Restrict_bam(GH->PatL[lev - 1], GH->PatL[lev], SynchList_cor, StateList, GH->rsul[lev], Symmetry);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
Parallel::Sync_cached(GH->PatL[lev - 1], StateList, Symmetry, sync_cache_rp_coarse[lev]);
|
Parallel::Sync(GH->PatL[lev - 1], StateList, Symmetry);
|
||||||
|
|
||||||
#if (RPB == 0)
|
#if (RPB == 0)
|
||||||
|
Ppc = GH->PatL[lev - 1];
|
||||||
|
while (Ppc)
|
||||||
|
{
|
||||||
|
Pp = GH->PatL[lev];
|
||||||
|
while (Pp)
|
||||||
|
{
|
||||||
#if (MIXOUTB == 0)
|
#if (MIXOUTB == 0)
|
||||||
Parallel::OutBdLow2Hi(GH->PatL[lev - 1], GH->PatL[lev], StateList, SynchList_cor, Symmetry);
|
Parallel::OutBdLow2Hi(Ppc->data, Pp->data, StateList, SynchList_cor, Symmetry);
|
||||||
#elif (MIXOUTB == 1)
|
#elif (MIXOUTB == 1)
|
||||||
Parallel::OutBdLow2Himix(GH->PatL[lev - 1], GH->PatL[lev], StateList, SynchList_cor, Symmetry);
|
Parallel::OutBdLow2Himix(Ppc->data, Pp->data, StateList, SynchList_cor, Symmetry);
|
||||||
#endif
|
#endif
|
||||||
|
Pp = Pp->next;
|
||||||
|
}
|
||||||
|
Ppc = Ppc->next;
|
||||||
|
}
|
||||||
#elif (RPB == 1)
|
#elif (RPB == 1)
|
||||||
// Parallel::OutBdLow2Hi_bam(GH->PatL[lev-1],GH->PatL[lev],StateList,SynchList_cor,Symmetry);
|
// Parallel::OutBdLow2Hi_bam(GH->PatL[lev-1],GH->PatL[lev],StateList,SynchList_cor,Symmetry);
|
||||||
Parallel::OutBdLow2Hi_bam(GH->PatL[lev - 1], GH->PatL[lev], StateList, SynchList_cor, GH->bdsul[lev], Symmetry);
|
Parallel::OutBdLow2Hi_bam(GH->PatL[lev - 1], GH->PatL[lev], StateList, SynchList_cor, GH->bdsul[lev], Symmetry);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
Parallel::Sync_cached(GH->PatL[lev], SynchList_cor, Symmetry, sync_cache_rp_fine[lev]);
|
Parallel::Sync(GH->PatL[lev], SynchList_cor, Symmetry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -6101,11 +6172,21 @@ void bssn_class::ProlongRestrict(int lev, int YN, bool BB)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#if (RPB == 0)
|
#if (RPB == 0)
|
||||||
|
Ppc = GH->PatL[lev - 1];
|
||||||
|
while (Ppc)
|
||||||
|
{
|
||||||
|
Pp = GH->PatL[lev];
|
||||||
|
while (Pp)
|
||||||
|
{
|
||||||
#if (MIXOUTB == 0)
|
#if (MIXOUTB == 0)
|
||||||
Parallel::OutBdLow2Hi(GH->PatL[lev - 1], GH->PatL[lev], SynchList_pre, SynchList_cor, Symmetry);
|
Parallel::OutBdLow2Hi(Ppc->data, Pp->data, SynchList_pre, SynchList_cor, Symmetry);
|
||||||
#elif (MIXOUTB == 1)
|
#elif (MIXOUTB == 1)
|
||||||
Parallel::OutBdLow2Himix(GH->PatL[lev - 1], GH->PatL[lev], SynchList_pre, SynchList_cor, Symmetry);
|
Parallel::OutBdLow2Himix(Ppc->data, Pp->data, SynchList_pre, SynchList_cor, Symmetry);
|
||||||
#endif
|
#endif
|
||||||
|
Pp = Pp->next;
|
||||||
|
}
|
||||||
|
Ppc = Ppc->next;
|
||||||
|
}
|
||||||
#elif (RPB == 1)
|
#elif (RPB == 1)
|
||||||
// Parallel::OutBdLow2Hi_bam(GH->PatL[lev-1],GH->PatL[lev],SynchList_pre,SynchList_cor,Symmetry);
|
// Parallel::OutBdLow2Hi_bam(GH->PatL[lev-1],GH->PatL[lev],SynchList_pre,SynchList_cor,Symmetry);
|
||||||
Parallel::OutBdLow2Hi_bam(GH->PatL[lev - 1], GH->PatL[lev], SynchList_pre, SynchList_cor, GH->bdsul[lev], Symmetry);
|
Parallel::OutBdLow2Hi_bam(GH->PatL[lev - 1], GH->PatL[lev], SynchList_pre, SynchList_cor, GH->bdsul[lev], Symmetry);
|
||||||
@@ -6114,11 +6195,21 @@ void bssn_class::ProlongRestrict(int lev, int YN, bool BB)
|
|||||||
else // no time refinement levels and for all same time levels
|
else // no time refinement levels and for all same time levels
|
||||||
{
|
{
|
||||||
#if (RPB == 0)
|
#if (RPB == 0)
|
||||||
|
Ppc = GH->PatL[lev - 1];
|
||||||
|
while (Ppc)
|
||||||
|
{
|
||||||
|
Pp = GH->PatL[lev];
|
||||||
|
while (Pp)
|
||||||
|
{
|
||||||
#if (MIXOUTB == 0)
|
#if (MIXOUTB == 0)
|
||||||
Parallel::OutBdLow2Hi(GH->PatL[lev - 1], GH->PatL[lev], StateList, SynchList_cor, Symmetry);
|
Parallel::OutBdLow2Hi(Ppc->data, Pp->data, StateList, SynchList_cor, Symmetry);
|
||||||
#elif (MIXOUTB == 1)
|
#elif (MIXOUTB == 1)
|
||||||
Parallel::OutBdLow2Himix(GH->PatL[lev - 1], GH->PatL[lev], StateList, SynchList_cor, Symmetry);
|
Parallel::OutBdLow2Himix(Ppc->data, Pp->data, StateList, SynchList_cor, Symmetry);
|
||||||
#endif
|
#endif
|
||||||
|
Pp = Pp->next;
|
||||||
|
}
|
||||||
|
Ppc = Ppc->next;
|
||||||
|
}
|
||||||
#elif (RPB == 1)
|
#elif (RPB == 1)
|
||||||
// Parallel::OutBdLow2Hi_bam(GH->PatL[lev-1],GH->PatL[lev],StateList,SynchList_cor,Symmetry);
|
// Parallel::OutBdLow2Hi_bam(GH->PatL[lev-1],GH->PatL[lev],StateList,SynchList_cor,Symmetry);
|
||||||
Parallel::OutBdLow2Hi_bam(GH->PatL[lev - 1], GH->PatL[lev], StateList, SynchList_cor, GH->bdsul[lev], Symmetry);
|
Parallel::OutBdLow2Hi_bam(GH->PatL[lev - 1], GH->PatL[lev], StateList, SynchList_cor, GH->bdsul[lev], Symmetry);
|
||||||
@@ -6134,10 +6225,10 @@ void bssn_class::ProlongRestrict(int lev, int YN, bool BB)
|
|||||||
#else
|
#else
|
||||||
Parallel::Restrict_after(GH->PatL[lev - 1], GH->PatL[lev], SynchList_cor, StateList, Symmetry);
|
Parallel::Restrict_after(GH->PatL[lev - 1], GH->PatL[lev], SynchList_cor, StateList, Symmetry);
|
||||||
#endif
|
#endif
|
||||||
Parallel::Sync_cached(GH->PatL[lev - 1], StateList, Symmetry, sync_cache_rp_coarse[lev]);
|
Parallel::Sync(GH->PatL[lev - 1], StateList, Symmetry);
|
||||||
}
|
}
|
||||||
|
|
||||||
Parallel::Sync_cached(GH->PatL[lev], SynchList_cor, Symmetry, sync_cache_rp_fine[lev]);
|
Parallel::Sync(GH->PatL[lev], SynchList_cor, Symmetry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#undef MIXOUTB
|
#undef MIXOUTB
|
||||||
|
|||||||
@@ -126,11 +126,6 @@ public:
|
|||||||
MyList<var> *OldStateList, *DumpList;
|
MyList<var> *OldStateList, *DumpList;
|
||||||
MyList<var> *ConstraintList;
|
MyList<var> *ConstraintList;
|
||||||
|
|
||||||
Parallel::SyncCache *sync_cache_pre; // per-level cache for predictor sync
|
|
||||||
Parallel::SyncCache *sync_cache_cor; // per-level cache for corrector sync
|
|
||||||
Parallel::SyncCache *sync_cache_rp_coarse; // RestrictProlong sync on PatL[lev-1]
|
|
||||||
Parallel::SyncCache *sync_cache_rp_fine; // RestrictProlong sync on PatL[lev]
|
|
||||||
|
|
||||||
monitor *ErrorMonitor, *Psi4Monitor, *BHMonitor, *MAPMonitor;
|
monitor *ErrorMonitor, *Psi4Monitor, *BHMonitor, *MAPMonitor;
|
||||||
monitor *ConVMonitor;
|
monitor *ConVMonitor;
|
||||||
surface_integral *Waveshell;
|
surface_integral *Waveshell;
|
||||||
|
|||||||
@@ -10,21 +10,20 @@ filein = -I/usr/include/ -I${MKLROOT}/include
|
|||||||
## Added -lifcore for Intel Fortran runtime and -limf for Intel math library
|
## Added -lifcore for Intel Fortran runtime and -limf for Intel math library
|
||||||
LDLIBS = -L${MKLROOT}/lib -lmkl_intel_lp64 -lmkl_sequential -lmkl_core -lifcore -limf -lpthread -lm -ldl
|
LDLIBS = -L${MKLROOT}/lib -lmkl_intel_lp64 -lmkl_sequential -lmkl_core -lifcore -limf -lpthread -lm -ldl
|
||||||
|
|
||||||
## Aggressive optimization flags + PGO Phase 2 (profile-guided optimization)
|
## Aggressive optimization flags:
|
||||||
## -fprofile-instr-use: use collected profile data to guide optimization decisions
|
## -O3: Maximum optimization
|
||||||
## (branch prediction, basic block layout, inlining, loop unrolling)
|
## -xHost: Optimize for the host CPU architecture (Intel/AMD compatible)
|
||||||
PROFDATA = ../../pgo_profile/default.profdata
|
## -fp-model fast=2: Aggressive floating-point optimizations
|
||||||
|
## -fma: Enable fused multiply-add instructions
|
||||||
CXXAPPFLAGS = -O3 -xHost -fp-model fast=2 -fma -ipo \
|
CXXAPPFLAGS = -O3 -xHost -fp-model fast=2 -fma -ipo \
|
||||||
-fprofile-instr-use=$(PROFDATA) \
|
|
||||||
-Dfortran3 -Dnewc -I${MKLROOT}/include
|
-Dfortran3 -Dnewc -I${MKLROOT}/include
|
||||||
f90appflags = -O3 -xHost -fp-model fast=2 -fma -ipo \
|
f90appflags = -O3 -xHost -fp-model fast=2 -fma -ipo \
|
||||||
-fprofile-instr-use=$(PROFDATA) \
|
|
||||||
-align array64byte -fpp -I${MKLROOT}/include
|
-align array64byte -fpp -I${MKLROOT}/include
|
||||||
f90 = ifx
|
f90 = ifx
|
||||||
f77 = ifx
|
f77 = ifx
|
||||||
CXX = icpx
|
CXX = icpx
|
||||||
CC = icx
|
CC = icx
|
||||||
CLINKER = mpiicpx
|
CLINKER = mpiicpx
|
||||||
|
|
||||||
Cu = nvcc
|
Cu = nvcc
|
||||||
CUDA_LIB_PATH = -L/usr/lib/cuda/lib64 -I/usr/include -I/usr/lib/cuda/include
|
CUDA_LIB_PATH = -L/usr/lib/cuda/lib64 -I/usr/include -I/usr/lib/cuda/include
|
||||||
|
|||||||
@@ -220,9 +220,16 @@ void surface_integral::surf_Wave(double rex, int lev, cgh *GH, var *Rpsi4, var *
|
|||||||
pox[2][n] = rex * nz_g[n];
|
pox[2][n] = rex * nz_g[n];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
double *shellf;
|
||||||
|
shellf = new double[n_tot * InList];
|
||||||
|
|
||||||
|
GH->PatL[lev]->data->Interp_Points(DG_List, n_tot, pox, shellf, Symmetry);
|
||||||
|
|
||||||
int mp, Lp, Nmin, Nmax;
|
int mp, Lp, Nmin, Nmax;
|
||||||
|
|
||||||
mp = n_tot / cpusize;
|
mp = n_tot / cpusize;
|
||||||
Lp = n_tot - cpusize * mp;
|
Lp = n_tot - cpusize * mp;
|
||||||
|
|
||||||
if (Lp > myrank)
|
if (Lp > myrank)
|
||||||
{
|
{
|
||||||
Nmin = myrank * mp + myrank;
|
Nmin = myrank * mp + myrank;
|
||||||
@@ -234,11 +241,6 @@ void surface_integral::surf_Wave(double rex, int lev, cgh *GH, var *Rpsi4, var *
|
|||||||
Nmax = Nmin + mp - 1;
|
Nmax = Nmin + mp - 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
double *shellf;
|
|
||||||
shellf = new double[n_tot * InList];
|
|
||||||
|
|
||||||
GH->PatL[lev]->data->Interp_Points(DG_List, n_tot, pox, shellf, Symmetry, Nmin, Nmax);
|
|
||||||
|
|
||||||
//|~~~~~> Integrate the dot product of Dphi with the surface normal.
|
//|~~~~~> Integrate the dot product of Dphi with the surface normal.
|
||||||
|
|
||||||
double *RP_out, *IP_out;
|
double *RP_out, *IP_out;
|
||||||
@@ -361,17 +363,8 @@ void surface_integral::surf_Wave(double rex, int lev, cgh *GH, var *Rpsi4, var *
|
|||||||
}
|
}
|
||||||
//|------+ Communicate and sum the results from each processor.
|
//|------+ Communicate and sum the results from each processor.
|
||||||
|
|
||||||
{
|
MPI_Allreduce(RP_out, RP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
double *RPIP_out = new double[2 * NN];
|
MPI_Allreduce(IP_out, IP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
double *RPIP = new double[2 * NN];
|
|
||||||
memcpy(RPIP_out, RP_out, NN * sizeof(double));
|
|
||||||
memcpy(RPIP_out + NN, IP_out, NN * sizeof(double));
|
|
||||||
MPI_Allreduce(RPIP_out, RPIP, 2 * NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
|
||||||
memcpy(RP, RPIP, NN * sizeof(double));
|
|
||||||
memcpy(IP, RPIP + NN, NN * sizeof(double));
|
|
||||||
delete[] RPIP_out;
|
|
||||||
delete[] RPIP;
|
|
||||||
}
|
|
||||||
|
|
||||||
//|------= Free memory.
|
//|------= Free memory.
|
||||||
|
|
||||||
@@ -563,17 +556,8 @@ void surface_integral::surf_Wave(double rex, int lev, cgh *GH, var *Rpsi4, var *
|
|||||||
}
|
}
|
||||||
//|------+ Communicate and sum the results from each processor.
|
//|------+ Communicate and sum the results from each processor.
|
||||||
|
|
||||||
{
|
MPI_Allreduce(RP_out, RP, NN, MPI_DOUBLE, MPI_SUM, Comm_here);
|
||||||
double *RPIP_out = new double[2 * NN];
|
MPI_Allreduce(IP_out, IP, NN, MPI_DOUBLE, MPI_SUM, Comm_here);
|
||||||
double *RPIP = new double[2 * NN];
|
|
||||||
memcpy(RPIP_out, RP_out, NN * sizeof(double));
|
|
||||||
memcpy(RPIP_out + NN, IP_out, NN * sizeof(double));
|
|
||||||
MPI_Allreduce(RPIP_out, RPIP, 2 * NN, MPI_DOUBLE, MPI_SUM, Comm_here);
|
|
||||||
memcpy(RP, RPIP, NN * sizeof(double));
|
|
||||||
memcpy(IP, RPIP + NN, NN * sizeof(double));
|
|
||||||
delete[] RPIP_out;
|
|
||||||
delete[] RPIP;
|
|
||||||
}
|
|
||||||
|
|
||||||
//|------= Free memory.
|
//|------= Free memory.
|
||||||
|
|
||||||
@@ -751,17 +735,8 @@ void surface_integral::surf_Wave(double rex, int lev, ShellPatch *GH, var *Rpsi4
|
|||||||
}
|
}
|
||||||
//|------+ Communicate and sum the results from each processor.
|
//|------+ Communicate and sum the results from each processor.
|
||||||
|
|
||||||
{
|
MPI_Allreduce(RP_out, RP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
double *RPIP_out = new double[2 * NN];
|
MPI_Allreduce(IP_out, IP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
double *RPIP = new double[2 * NN];
|
|
||||||
memcpy(RPIP_out, RP_out, NN * sizeof(double));
|
|
||||||
memcpy(RPIP_out + NN, IP_out, NN * sizeof(double));
|
|
||||||
MPI_Allreduce(RPIP_out, RPIP, 2 * NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
|
||||||
memcpy(RP, RPIP, NN * sizeof(double));
|
|
||||||
memcpy(IP, RPIP + NN, NN * sizeof(double));
|
|
||||||
delete[] RPIP_out;
|
|
||||||
delete[] RPIP;
|
|
||||||
}
|
|
||||||
|
|
||||||
//|------= Free memory.
|
//|------= Free memory.
|
||||||
|
|
||||||
@@ -1009,17 +984,8 @@ void surface_integral::surf_Wave(double rex, int lev, ShellPatch *GH,
|
|||||||
}
|
}
|
||||||
//|------+ Communicate and sum the results from each processor.
|
//|------+ Communicate and sum the results from each processor.
|
||||||
|
|
||||||
{
|
MPI_Allreduce(RP_out, RP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
double *RPIP_out = new double[2 * NN];
|
MPI_Allreduce(IP_out, IP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
double *RPIP = new double[2 * NN];
|
|
||||||
memcpy(RPIP_out, RP_out, NN * sizeof(double));
|
|
||||||
memcpy(RPIP_out + NN, IP_out, NN * sizeof(double));
|
|
||||||
MPI_Allreduce(RPIP_out, RPIP, 2 * NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
|
||||||
memcpy(RP, RPIP, NN * sizeof(double));
|
|
||||||
memcpy(IP, RPIP + NN, NN * sizeof(double));
|
|
||||||
delete[] RPIP_out;
|
|
||||||
delete[] RPIP;
|
|
||||||
}
|
|
||||||
|
|
||||||
//|------= Free memory.
|
//|------= Free memory.
|
||||||
|
|
||||||
@@ -1453,17 +1419,8 @@ void surface_integral::surf_Wave(double rex, int lev, ShellPatch *GH,
|
|||||||
}
|
}
|
||||||
//|------+ Communicate and sum the results from each processor.
|
//|------+ Communicate and sum the results from each processor.
|
||||||
|
|
||||||
{
|
MPI_Allreduce(RP_out, RP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
double *RPIP_out = new double[2 * NN];
|
MPI_Allreduce(IP_out, IP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
double *RPIP = new double[2 * NN];
|
|
||||||
memcpy(RPIP_out, RP_out, NN * sizeof(double));
|
|
||||||
memcpy(RPIP_out + NN, IP_out, NN * sizeof(double));
|
|
||||||
MPI_Allreduce(RPIP_out, RPIP, 2 * NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
|
||||||
memcpy(RP, RPIP, NN * sizeof(double));
|
|
||||||
memcpy(IP, RPIP + NN, NN * sizeof(double));
|
|
||||||
delete[] RPIP_out;
|
|
||||||
delete[] RPIP;
|
|
||||||
}
|
|
||||||
|
|
||||||
//|------= Free memory.
|
//|------= Free memory.
|
||||||
|
|
||||||
@@ -1897,17 +1854,8 @@ void surface_integral::surf_Wave(double rex, int lev, cgh *GH,
|
|||||||
}
|
}
|
||||||
//|------+ Communicate and sum the results from each processor.
|
//|------+ Communicate and sum the results from each processor.
|
||||||
|
|
||||||
{
|
MPI_Allreduce(RP_out, RP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
double *RPIP_out = new double[2 * NN];
|
MPI_Allreduce(IP_out, IP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
double *RPIP = new double[2 * NN];
|
|
||||||
memcpy(RPIP_out, RP_out, NN * sizeof(double));
|
|
||||||
memcpy(RPIP_out + NN, IP_out, NN * sizeof(double));
|
|
||||||
MPI_Allreduce(RPIP_out, RPIP, 2 * NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
|
||||||
memcpy(RP, RPIP, NN * sizeof(double));
|
|
||||||
memcpy(IP, RPIP + NN, NN * sizeof(double));
|
|
||||||
delete[] RPIP_out;
|
|
||||||
delete[] RPIP;
|
|
||||||
}
|
|
||||||
|
|
||||||
//|------= Free memory.
|
//|------= Free memory.
|
||||||
|
|
||||||
@@ -2092,17 +2040,8 @@ void surface_integral::surf_Wave(double rex, int lev, NullShellPatch2 *GH, var *
|
|||||||
}
|
}
|
||||||
//|------+ Communicate and sum the results from each processor.
|
//|------+ Communicate and sum the results from each processor.
|
||||||
|
|
||||||
{
|
MPI_Allreduce(RP_out, RP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
double *RPIP_out = new double[2 * NN];
|
MPI_Allreduce(IP_out, IP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
double *RPIP = new double[2 * NN];
|
|
||||||
memcpy(RPIP_out, RP_out, NN * sizeof(double));
|
|
||||||
memcpy(RPIP_out + NN, IP_out, NN * sizeof(double));
|
|
||||||
MPI_Allreduce(RPIP_out, RPIP, 2 * NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
|
||||||
memcpy(RP, RPIP, NN * sizeof(double));
|
|
||||||
memcpy(IP, RPIP + NN, NN * sizeof(double));
|
|
||||||
delete[] RPIP_out;
|
|
||||||
delete[] RPIP;
|
|
||||||
}
|
|
||||||
|
|
||||||
//|------= Free memory.
|
//|------= Free memory.
|
||||||
|
|
||||||
@@ -2287,17 +2226,8 @@ void surface_integral::surf_Wave(double rex, int lev, NullShellPatch *GH, var *R
|
|||||||
}
|
}
|
||||||
//|------+ Communicate and sum the results from each processor.
|
//|------+ Communicate and sum the results from each processor.
|
||||||
|
|
||||||
{
|
MPI_Allreduce(RP_out, RP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
double *RPIP_out = new double[2 * NN];
|
MPI_Allreduce(IP_out, IP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
double *RPIP = new double[2 * NN];
|
|
||||||
memcpy(RPIP_out, RP_out, NN * sizeof(double));
|
|
||||||
memcpy(RPIP_out + NN, IP_out, NN * sizeof(double));
|
|
||||||
MPI_Allreduce(RPIP_out, RPIP, 2 * NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
|
||||||
memcpy(RP, RPIP, NN * sizeof(double));
|
|
||||||
memcpy(IP, RPIP + NN, NN * sizeof(double));
|
|
||||||
delete[] RPIP_out;
|
|
||||||
delete[] RPIP;
|
|
||||||
}
|
|
||||||
|
|
||||||
//|------= Free memory.
|
//|------= Free memory.
|
||||||
|
|
||||||
@@ -2384,9 +2314,25 @@ void surface_integral::surf_MassPAng(double rex, int lev, cgh *GH, var *chi, var
|
|||||||
pox[2][n] = rex * nz_g[n];
|
pox[2][n] = rex * nz_g[n];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
double *shellf;
|
||||||
|
shellf = new double[n_tot * InList];
|
||||||
|
|
||||||
|
// we have assumed there is only one box on this level,
|
||||||
|
// so we do not need loop boxes
|
||||||
|
GH->PatL[lev]->data->Interp_Points(DG_List, n_tot, pox, shellf, Symmetry);
|
||||||
|
|
||||||
|
double Mass_out = 0;
|
||||||
|
double ang_outx, ang_outy, ang_outz;
|
||||||
|
double p_outx, p_outy, p_outz;
|
||||||
|
ang_outx = ang_outy = ang_outz = 0.0;
|
||||||
|
p_outx = p_outy = p_outz = 0.0;
|
||||||
|
const double f1o8 = 0.125;
|
||||||
|
|
||||||
int mp, Lp, Nmin, Nmax;
|
int mp, Lp, Nmin, Nmax;
|
||||||
|
|
||||||
mp = n_tot / cpusize;
|
mp = n_tot / cpusize;
|
||||||
Lp = n_tot - cpusize * mp;
|
Lp = n_tot - cpusize * mp;
|
||||||
|
|
||||||
if (Lp > myrank)
|
if (Lp > myrank)
|
||||||
{
|
{
|
||||||
Nmin = myrank * mp + myrank;
|
Nmin = myrank * mp + myrank;
|
||||||
@@ -2398,20 +2344,6 @@ void surface_integral::surf_MassPAng(double rex, int lev, cgh *GH, var *chi, var
|
|||||||
Nmax = Nmin + mp - 1;
|
Nmax = Nmin + mp - 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
double *shellf;
|
|
||||||
shellf = new double[n_tot * InList];
|
|
||||||
|
|
||||||
// we have assumed there is only one box on this level,
|
|
||||||
// so we do not need loop boxes
|
|
||||||
GH->PatL[lev]->data->Interp_Points(DG_List, n_tot, pox, shellf, Symmetry, Nmin, Nmax);
|
|
||||||
|
|
||||||
double Mass_out = 0;
|
|
||||||
double ang_outx, ang_outy, ang_outz;
|
|
||||||
double p_outx, p_outy, p_outz;
|
|
||||||
ang_outx = ang_outy = ang_outz = 0.0;
|
|
||||||
p_outx = p_outy = p_outz = 0.0;
|
|
||||||
const double f1o8 = 0.125;
|
|
||||||
|
|
||||||
double Chi, Psi;
|
double Chi, Psi;
|
||||||
double Gxx, Gxy, Gxz, Gyy, Gyz, Gzz;
|
double Gxx, Gxy, Gxz, Gyy, Gyz, Gzz;
|
||||||
double gupxx, gupxy, gupxz, gupyy, gupyz, gupzz;
|
double gupxx, gupxy, gupxz, gupyy, gupyz, gupzz;
|
||||||
@@ -2532,13 +2464,15 @@ void surface_integral::surf_MassPAng(double rex, int lev, cgh *GH, var *chi, var
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
MPI_Allreduce(&Mass_out, &mass, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
double scalar_out[7] = {Mass_out, ang_outx, ang_outy, ang_outz, p_outx, p_outy, p_outz};
|
|
||||||
double scalar_in[7];
|
MPI_Allreduce(&ang_outx, &sx, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
MPI_Allreduce(scalar_out, scalar_in, 7, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
MPI_Allreduce(&ang_outy, &sy, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
mass = scalar_in[0]; sx = scalar_in[1]; sy = scalar_in[2]; sz = scalar_in[3];
|
MPI_Allreduce(&ang_outz, &sz, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
px = scalar_in[4]; py = scalar_in[5]; pz = scalar_in[6];
|
|
||||||
}
|
MPI_Allreduce(&p_outx, &px, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
|
MPI_Allreduce(&p_outy, &py, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
|
MPI_Allreduce(&p_outz, &pz, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
|
|
||||||
#ifdef GaussInt
|
#ifdef GaussInt
|
||||||
mass = mass * rex * rex * dphi * factor;
|
mass = mass * rex * rex * dphi * factor;
|
||||||
@@ -2801,13 +2735,15 @@ void surface_integral::surf_MassPAng(double rex, int lev, cgh *GH, var *chi, var
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
MPI_Allreduce(&Mass_out, &mass, 1, MPI_DOUBLE, MPI_SUM, Comm_here);
|
||||||
double scalar_out[7] = {Mass_out, ang_outx, ang_outy, ang_outz, p_outx, p_outy, p_outz};
|
|
||||||
double scalar_in[7];
|
MPI_Allreduce(&ang_outx, &sx, 1, MPI_DOUBLE, MPI_SUM, Comm_here);
|
||||||
MPI_Allreduce(scalar_out, scalar_in, 7, MPI_DOUBLE, MPI_SUM, Comm_here);
|
MPI_Allreduce(&ang_outy, &sy, 1, MPI_DOUBLE, MPI_SUM, Comm_here);
|
||||||
mass = scalar_in[0]; sx = scalar_in[1]; sy = scalar_in[2]; sz = scalar_in[3];
|
MPI_Allreduce(&ang_outz, &sz, 1, MPI_DOUBLE, MPI_SUM, Comm_here);
|
||||||
px = scalar_in[4]; py = scalar_in[5]; pz = scalar_in[6];
|
|
||||||
}
|
MPI_Allreduce(&p_outx, &px, 1, MPI_DOUBLE, MPI_SUM, Comm_here);
|
||||||
|
MPI_Allreduce(&p_outy, &py, 1, MPI_DOUBLE, MPI_SUM, Comm_here);
|
||||||
|
MPI_Allreduce(&p_outz, &pz, 1, MPI_DOUBLE, MPI_SUM, Comm_here);
|
||||||
|
|
||||||
#ifdef GaussInt
|
#ifdef GaussInt
|
||||||
mass = mass * rex * rex * dphi * factor;
|
mass = mass * rex * rex * dphi * factor;
|
||||||
@@ -3084,13 +3020,15 @@ void surface_integral::surf_MassPAng(double rex, int lev, ShellPatch *GH, var *c
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
MPI_Allreduce(&Mass_out, &mass, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
double scalar_out[7] = {Mass_out, ang_outx, ang_outy, ang_outz, p_outx, p_outy, p_outz};
|
|
||||||
double scalar_in[7];
|
MPI_Allreduce(&ang_outx, &sx, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
MPI_Allreduce(scalar_out, scalar_in, 7, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
MPI_Allreduce(&ang_outy, &sy, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
mass = scalar_in[0]; sx = scalar_in[1]; sy = scalar_in[2]; sz = scalar_in[3];
|
MPI_Allreduce(&ang_outz, &sz, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
px = scalar_in[4]; py = scalar_in[5]; pz = scalar_in[6];
|
|
||||||
}
|
MPI_Allreduce(&p_outx, &px, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
|
MPI_Allreduce(&p_outy, &py, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
|
MPI_Allreduce(&p_outz, &pz, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
|
|
||||||
#ifdef GaussInt
|
#ifdef GaussInt
|
||||||
mass = mass * rex * rex * dphi * factor;
|
mass = mass * rex * rex * dphi * factor;
|
||||||
@@ -3669,17 +3607,8 @@ void surface_integral::surf_Wave(double rex, cgh *GH, ShellPatch *SH,
|
|||||||
}
|
}
|
||||||
//|------+ Communicate and sum the results from each processor.
|
//|------+ Communicate and sum the results from each processor.
|
||||||
|
|
||||||
{
|
MPI_Allreduce(RP_out, RP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
double *RPIP_out = new double[2 * NN];
|
MPI_Allreduce(IP_out, IP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
||||||
double *RPIP = new double[2 * NN];
|
|
||||||
memcpy(RPIP_out, RP_out, NN * sizeof(double));
|
|
||||||
memcpy(RPIP_out + NN, IP_out, NN * sizeof(double));
|
|
||||||
MPI_Allreduce(RPIP_out, RPIP, 2 * NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
|
|
||||||
memcpy(RP, RPIP, NN * sizeof(double));
|
|
||||||
memcpy(IP, RPIP + NN, NN * sizeof(double));
|
|
||||||
delete[] RPIP_out;
|
|
||||||
delete[] RPIP;
|
|
||||||
}
|
|
||||||
|
|
||||||
//|------= Free memory.
|
//|------= Free memory.
|
||||||
|
|
||||||
|
|||||||
@@ -15,13 +15,12 @@ import time
|
|||||||
## taskset ensures all child processes inherit the CPU affinity mask
|
## taskset ensures all child processes inherit the CPU affinity mask
|
||||||
## This forces make and all compiler processes to use only nohz_full cores (4-55, 60-111)
|
## This forces make and all compiler processes to use only nohz_full cores (4-55, 60-111)
|
||||||
## Format: taskset -c 4-55,60-111 ensures processes only run on these cores
|
## Format: taskset -c 4-55,60-111 ensures processes only run on these cores
|
||||||
#NUMACTL_CPU_BIND = "taskset -c 0-111"
|
NUMACTL_CPU_BIND = "taskset -c 0-111"
|
||||||
NUMACTL_CPU_BIND = "taskset -c 16-47,64-95"
|
|
||||||
|
|
||||||
## Build parallelism configuration
|
## Build parallelism configuration
|
||||||
## Use nohz_full cores (4-55, 60-111) for compilation: 52 + 52 = 104 cores
|
## Use nohz_full cores (4-55, 60-111) for compilation: 52 + 52 = 104 cores
|
||||||
## Set make -j to utilize available cores for faster builds
|
## Set make -j to utilize available cores for faster builds
|
||||||
BUILD_JOBS = 96
|
BUILD_JOBS = 104
|
||||||
|
|
||||||
|
|
||||||
##################################################################
|
##################################################################
|
||||||
@@ -118,7 +117,6 @@ def run_ABE():
|
|||||||
|
|
||||||
if (input_data.GPU_Calculation == "no"):
|
if (input_data.GPU_Calculation == "no"):
|
||||||
mpi_command = NUMACTL_CPU_BIND + " mpirun -np " + str(input_data.MPI_processes) + " ./ABE"
|
mpi_command = NUMACTL_CPU_BIND + " mpirun -np " + str(input_data.MPI_processes) + " ./ABE"
|
||||||
#mpi_command = " mpirun -np " + str(input_data.MPI_processes) + " ./ABE"
|
|
||||||
mpi_command_outfile = "ABE_out.log"
|
mpi_command_outfile = "ABE_out.log"
|
||||||
elif (input_data.GPU_Calculation == "yes"):
|
elif (input_data.GPU_Calculation == "yes"):
|
||||||
mpi_command = NUMACTL_CPU_BIND + " mpirun -np " + str(input_data.MPI_processes) + " ./ABEGPU"
|
mpi_command = NUMACTL_CPU_BIND + " mpirun -np " + str(input_data.MPI_processes) + " ./ABEGPU"
|
||||||
@@ -160,8 +158,7 @@ def run_TwoPunctureABE():
|
|||||||
print( )
|
print( )
|
||||||
|
|
||||||
## Define the command to run
|
## Define the command to run
|
||||||
#TwoPuncture_command = NUMACTL_CPU_BIND + " ./TwoPunctureABE"
|
TwoPuncture_command = NUMACTL_CPU_BIND + " ./TwoPunctureABE"
|
||||||
TwoPuncture_command = " ./TwoPunctureABE"
|
|
||||||
TwoPuncture_command_outfile = "TwoPunctureABE_out.log"
|
TwoPuncture_command_outfile = "TwoPunctureABE_out.log"
|
||||||
|
|
||||||
## Execute the command with subprocess.Popen and stream output
|
## Execute the command with subprocess.Popen and stream output
|
||||||
|
|||||||
@@ -1,97 +0,0 @@
|
|||||||
# AMSS-NCKU PGO Profile Analysis Report
|
|
||||||
|
|
||||||
## 1. Profiling Environment
|
|
||||||
|
|
||||||
| Item | Value |
|
|
||||||
|------|-------|
|
|
||||||
| Compiler | Intel oneAPI DPC++/C++ 2025.3.0 (icpx/ifx) |
|
|
||||||
| Instrumentation Flag | `-fprofile-instr-generate` |
|
|
||||||
| Optimization Level (instrumented) | `-O2 -xHost -fma` |
|
|
||||||
| MPI Processes | 1 (single process to avoid MPI+instrumentation deadlock) |
|
|
||||||
| Profile File | `default_9725750769337483397_0.profraw` (327 KB) |
|
|
||||||
| Merged Profile | `default.profdata` (394 KB) |
|
|
||||||
| llvm-profdata | `/home/intel/oneapi/compiler/2025.3/bin/compiler/llvm-profdata` |
|
|
||||||
|
|
||||||
## 2. Reduced Simulation Parameters (for profiling run)
|
|
||||||
|
|
||||||
| Parameter | Production Value | Profiling Value |
|
|
||||||
|-----------|-----------------|-----------------|
|
|
||||||
| MPI_processes | 64 | 1 |
|
|
||||||
| grid_level | 9 | 4 |
|
|
||||||
| static_grid_level | 5 | 3 |
|
|
||||||
| static_grid_number | 96 | 24 |
|
|
||||||
| moving_grid_number | 48 | 16 |
|
|
||||||
| largest_box_xyz_max | 320^3 | 160^3 |
|
|
||||||
| Final_Evolution_Time | 1000.0 | 10.0 |
|
|
||||||
| Evolution_Step_Number | 10,000,000 | 1,000 |
|
|
||||||
| Detector_Number | 12 | 2 |
|
|
||||||
|
|
||||||
## 3. Profile Summary
|
|
||||||
|
|
||||||
| Metric | Value |
|
|
||||||
|--------|-------|
|
|
||||||
| Total instrumented functions | 1,392 |
|
|
||||||
| Functions with non-zero counts | 117 (8.4%) |
|
|
||||||
| Functions with zero counts | 1,275 (91.6%) |
|
|
||||||
| Maximum function entry count | 386,459,248 |
|
|
||||||
| Maximum internal block count | 370,477,680 |
|
|
||||||
| Total block count | 4,198,023,118 |
|
|
||||||
|
|
||||||
## 4. Top 20 Hotspot Functions
|
|
||||||
|
|
||||||
| Rank | Total Count | Max Block Count | Function | Category |
|
|
||||||
|------|------------|-----------------|----------|----------|
|
|
||||||
| 1 | 1,241,601,732 | 370,477,680 | `polint_` | Interpolation |
|
|
||||||
| 2 | 755,994,435 | 230,156,640 | `prolong3_` | Grid prolongation |
|
|
||||||
| 3 | 667,964,095 | 3,697,792 | `compute_rhs_bssn_` | BSSN RHS evolution |
|
|
||||||
| 4 | 539,736,051 | 386,459,248 | `symmetry_bd_` | Symmetry boundary |
|
|
||||||
| 5 | 277,310,808 | 53,170,728 | `lopsided_` | Lopsided FD stencil |
|
|
||||||
| 6 | 155,534,488 | 94,535,040 | `decide3d_` | 3D grid decision |
|
|
||||||
| 7 | 119,267,712 | 19,266,048 | `rungekutta4_rout_` | RK4 time integrator |
|
|
||||||
| 8 | 91,574,616 | 48,824,160 | `kodis_` | Kreiss-Oliger dissipation |
|
|
||||||
| 9 | 67,555,389 | 43,243,680 | `fderivs_` | Finite differences |
|
|
||||||
| 10 | 55,296,000 | 42,246,144 | `misc::fact(int)` | Factorial utility |
|
|
||||||
| 11 | 43,191,071 | 27,663,328 | `fdderivs_` | 2nd-order FD derivatives |
|
|
||||||
| 12 | 36,233,965 | 22,429,440 | `restrict3_` | Grid restriction |
|
|
||||||
| 13 | 24,698,512 | 17,231,520 | `polin3_` | Polynomial interpolation |
|
|
||||||
| 14 | 22,962,942 | 20,968,768 | `copy_` | Data copy |
|
|
||||||
| 15 | 20,135,696 | 17,259,168 | `Ansorg::barycentric(...)` | Spectral interpolation |
|
|
||||||
| 16 | 14,650,224 | 7,224,768 | `Ansorg::barycentric_omega(...)` | Spectral weights |
|
|
||||||
| 17 | 13,242,296 | 2,871,920 | `global_interp_` | Global interpolation |
|
|
||||||
| 18 | 12,672,000 | 7,734,528 | `sommerfeld_rout_` | Sommerfeld boundary |
|
|
||||||
| 19 | 6,872,832 | 1,880,064 | `sommerfeld_routbam_` | Sommerfeld boundary (BAM) |
|
|
||||||
| 20 | 5,709,900 | 2,809,632 | `l2normhelper_` | L2 norm computation |
|
|
||||||
|
|
||||||
## 5. Hotspot Category Breakdown
|
|
||||||
|
|
||||||
Top 20 functions account for ~98% of total execution counts:
|
|
||||||
|
|
||||||
| Category | Functions | Combined Count | Share |
|
|
||||||
|----------|-----------|---------------|-------|
|
|
||||||
| Interpolation / Prolongation / Restriction | polint_, prolong3_, restrict3_, polin3_, global_interp_, Ansorg::* | ~2,093M | ~50% |
|
|
||||||
| BSSN RHS + FD stencils | compute_rhs_bssn_, lopsided_, fderivs_, fdderivs_ | ~1,056M | ~25% |
|
|
||||||
| Boundary conditions | symmetry_bd_, sommerfeld_rout_, sommerfeld_routbam_ | ~559M | ~13% |
|
|
||||||
| Time integration | rungekutta4_rout_ | ~119M | ~3% |
|
|
||||||
| Dissipation | kodis_ | ~92M | ~2% |
|
|
||||||
| Utilities | misc::fact, decide3d_, copy_, l2normhelper_ | ~256M | ~6% |
|
|
||||||
|
|
||||||
## 6. Conclusions
|
|
||||||
|
|
||||||
1. **Profile data is valid**: 1,392 functions instrumented, 117 exercised with ~4.2 billion total counts.
|
|
||||||
2. **Hotspot concentration is high**: Top 5 functions alone account for ~76% of all counts, which is ideal for PGO — the compiler has strong branch/layout optimization targets.
|
|
||||||
3. **Fortran numerical kernels dominate**: `polint_`, `prolong3_`, `compute_rhs_bssn_`, `symmetry_bd_`, `lopsided_` are all Fortran routines in the inner evolution loop. PGO will optimize their branch prediction and basic block layout.
|
|
||||||
4. **91.6% of functions have zero counts**: These are code paths for unused features (GPU, BSSN-EScalar, BSSN-EM, Z4C, etc.). PGO will deprioritize them, improving instruction cache utilization.
|
|
||||||
5. **Profile is representative**: Despite the reduced grid size, the code path coverage matches production — the same kernels (RHS, prolongation, restriction, boundary) are exercised. PGO branch probabilities from this profile will transfer well to full-scale runs.
|
|
||||||
|
|
||||||
## 7. PGO Phase 2 Usage
|
|
||||||
|
|
||||||
To apply the profile, use the following flags in `makefile.inc`:
|
|
||||||
|
|
||||||
```makefile
|
|
||||||
CXXAPPFLAGS = -O3 -xHost -fp-model fast=2 -fma -ipo \
|
|
||||||
-fprofile-instr-use=/home/amss/AMSS-NCKU/pgo_profile/default.profdata \
|
|
||||||
-Dfortran3 -Dnewc -I${MKLROOT}/include
|
|
||||||
f90appflags = -O3 -xHost -fp-model fast=2 -fma -ipo \
|
|
||||||
-fprofile-instr-use=/home/amss/AMSS-NCKU/pgo_profile/default.profdata \
|
|
||||||
-align array64byte -fpp -I${MKLROOT}/include
|
|
||||||
```
|
|
||||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Reference in New Issue
Block a user