Compare commits

..

2 Commits

Author SHA1 Message Date
6796384bf4 taskset setting updated 2026-02-07 22:24:02 +08:00
c974a88d6d Pool fh work arrays in compute_rhs_bssn to eliminate allocation churn
Add _fh variants of fderivs, fdderivs, kodis, and lopsided that accept
a caller-provided fh work array instead of allocating one internally.
Declare two shared work arrays in compute_rhs_bssn (fh_work2 for
symmetry_bd(2) callers, fh_work3 for symmetry_bd(3) callers) and pass
them to all ~84 subroutine calls, eliminating ~77 redundant automatic
array allocations (~591 MB churn per RHS call, ~2.3 GB per timestep).

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-07 21:49:12 +08:00
53 changed files with 1444 additions and 2901 deletions

View File

@@ -16,7 +16,7 @@ import numpy
File_directory = "GW150914" ## output file directory
Output_directory = "binary_output" ## binary data file directory
## The file directory name should not be too long
MPI_processes = 1 ## number of processes (MPI removed, single-process mode)
MPI_processes = 64 ## number of mpi processes used in the simulation
GPU_Calculation = "no" ## Use GPU or not
## (prefer "no" in the current version, because the GPU part may have bugs when integrated in this Python interface)

View File

@@ -20,11 +20,7 @@ using namespace std;
#include <map.h>
#endif
#ifdef MPI_STUB
#include "mpi_stub.h"
#else
#include <mpi.h>
#endif
#include "misc.h"
#include "macrodef.h"

View File

@@ -19,11 +19,7 @@ using namespace std;
#include <math.h>
#endif
#ifdef MPI_STUB
#include "mpi_stub.h"
#else
#include <mpi.h>
#endif
#define PI M_PI

View File

@@ -2,11 +2,7 @@
#ifndef BLOCK_H
#define BLOCK_H
#ifdef MPI_STUB
#include "mpi_stub.h"
#else
#include <mpi.h>
#endif
#include "macrodef.h" //need dim here; Vertex or Cell
#include "var.h"
#include "MyList.h"

View File

@@ -4,11 +4,7 @@
#include <stdarg.h>
#include <string.h>
#ifdef MPI_STUB
#include "mpi_stub.h"
#else
#include <mpi.h>
#endif
#include "myglobal.h"

View File

@@ -341,9 +341,8 @@ void Patch::Interp_Points(MyList<var> *VarList,
double *Shellf, int Symmetry)
{
// NOTE: we do not Synchnize variables here, make sure of that before calling this routine
int myrank, nprocs;
int myrank;
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
int ordn = 2 * ghost_width;
MyList<var> *varl;
@@ -355,18 +354,24 @@ void Patch::Interp_Points(MyList<var> *VarList,
varl = varl->next;
}
memset(Shellf, 0, sizeof(double) * NN * num_var);
double *shellf;
shellf = new double[NN * num_var];
memset(shellf, 0, sizeof(double) * NN * num_var);
// owner_rank[j] records which MPI rank owns point j
// All ranks traverse the same block list so they all agree on ownership
int *owner_rank;
owner_rank = new int[NN];
for (int j = 0; j < NN; j++)
owner_rank[j] = -1;
// we use weight to monitor code, later some day we can move it for optimization
int *weight;
weight = new int[NN];
memset(weight, 0, sizeof(int) * NN);
double *DH, *llb, *uub;
DH = new double[dim];
double DH[dim], llb[dim], uub[dim];
for (int i = 0; i < dim; i++)
{
DH[i] = getdX(i);
}
llb = new double[dim];
uub = new double[dim];
for (int j = 0; j < NN; j++) // run along points
{
@@ -398,6 +403,12 @@ void Patch::Interp_Points(MyList<var> *VarList,
bool flag = true;
for (int i = 0; i < dim; i++)
{
// NOTE: our dividing structure is (exclude ghost)
// -1 0
// 1 2
// so (0,1) does not belong to any part for vertex structure
// here we put (0,0.5) to left part and (0.5,1) to right part
// BUT for cell structure the bbox is (-1.5,0.5) and (0.5,2.5), there is no missing region at all
#ifdef Vertex
#ifdef Cell
#error Both Cell and Vertex are defined
@@ -422,7 +433,6 @@ void Patch::Interp_Points(MyList<var> *VarList,
if (flag)
{
notfind = false;
owner_rank[j] = BP->rank;
if (myrank == BP->rank)
{
//---> interpolation
@@ -430,11 +440,14 @@ void Patch::Interp_Points(MyList<var> *VarList,
int k = 0;
while (varl) // run along variables
{
f_global_interp(BP->shape, BP->X[0], BP->X[1], BP->X[2], BP->fgfs[varl->data->sgfn], Shellf[j * num_var + k],
// shellf[j*num_var+k] = Parallel::global_interp(dim,BP->shape,BP->X,BP->fgfs[varl->data->sgfn],
// pox,ordn,varl->data->SoA,Symmetry);
f_global_interp(BP->shape, BP->X[0], BP->X[1], BP->X[2], BP->fgfs[varl->data->sgfn], shellf[j * num_var + k],
pox[0], pox[1], pox[2], ordn, varl->data->SoA, Symmetry);
varl = varl->next;
k++;
}
weight[j] = 1;
}
}
if (Bp == ble)
@@ -443,327 +456,103 @@ void Patch::Interp_Points(MyList<var> *VarList,
}
}
// Replace MPI_Allreduce with per-owner MPI_Bcast:
// Group consecutive points by owner rank and broadcast each group.
// Since each point's data is non-zero only on the owner rank,
// Bcast from owner is equivalent to Allreduce(MPI_SUM) but much cheaper.
MPI_Allreduce(shellf, Shellf, NN * num_var, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
int *Weight;
Weight = new int[NN];
MPI_Allreduce(weight, Weight, NN, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
// misc::tillherecheck("print me");
for (int i = 0; i < NN; i++)
{
int j = 0;
while (j < NN)
if (Weight[i] > 1)
{
int cur_owner = owner_rank[j];
if (cur_owner < 0)
{
if (myrank == 0)
{
cout << "ERROR: Patch::Interp_Points fails to find point (";
for (int d = 0; d < dim; d++)
{
cout << XX[d][j];
if (d < dim - 1)
cout << ",";
else
cout << ")";
}
cout << " on Patch (";
for (int d = 0; d < dim; d++)
{
cout << bbox[d] << "+" << lli[d] * DH[d];
if (d < dim - 1)
cout << ",";
else
cout << ")--";
}
cout << "(";
for (int d = 0; d < dim; d++)
{
cout << bbox[dim + d] << "-" << uui[d] * DH[d];
if (d < dim - 1)
cout << ",";
else
cout << ")" << endl;
}
MPI_Abort(MPI_COMM_WORLD, 1);
}
j++;
continue;
}
// Find contiguous run of points with the same owner
int jstart = j;
while (j < NN && owner_rank[j] == cur_owner)
j++;
int count = (j - jstart) * num_var;
MPI_Bcast(Shellf + jstart * num_var, count, MPI_DOUBLE, cur_owner, MPI_COMM_WORLD);
if (myrank == 0)
cout << "WARNING: Patch::Interp_Points meets multiple weight" << endl;
for (int j = 0; j < num_var; j++)
Shellf[j + i * num_var] = Shellf[j + i * num_var] / Weight[i];
}
}
delete[] owner_rank;
}
void Patch::Interp_Points(MyList<var> *VarList,
int NN, double **XX,
double *Shellf, int Symmetry,
int Nmin_consumer, int Nmax_consumer)
{
// Targeted point-to-point overload: each owner sends each point only to
// the one rank that needs it for integration (consumer), reducing
// communication volume by ~nprocs times compared to the Bcast version.
int myrank, nprocs;
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
int ordn = 2 * ghost_width;
MyList<var> *varl;
int num_var = 0;
varl = VarList;
while (varl)
{
num_var++;
varl = varl->next;
}
memset(Shellf, 0, sizeof(double) * NN * num_var);
// owner_rank[j] records which MPI rank owns point j
int *owner_rank;
owner_rank = new int[NN];
for (int j = 0; j < NN; j++)
owner_rank[j] = -1;
double DH[dim], llb[dim], uub[dim];
for (int i = 0; i < dim; i++)
DH[i] = getdX(i);
// --- Interpolation phase (identical to original) ---
for (int j = 0; j < NN; j++)
{
double pox[dim];
for (int i = 0; i < dim; i++)
{
pox[i] = XX[i][j];
if (myrank == 0 && (XX[i][j] < bbox[i] + lli[i] * DH[i] || XX[i][j] > bbox[dim + i] - uui[i] * DH[i]))
{
cout << "Patch::Interp_Points: point (";
for (int k = 0; k < dim; k++)
{
cout << XX[k][j];
if (k < dim - 1)
cout << ",";
else
cout << ") is out of current Patch." << endl;
}
MPI_Abort(MPI_COMM_WORLD, 1);
}
}
MyList<Block> *Bp = blb;
bool notfind = true;
while (notfind && Bp)
{
Block *BP = Bp->data;
bool flag = true;
for (int i = 0; i < dim; i++)
{
#ifdef Vertex
#ifdef Cell
#error Both Cell and Vertex are defined
#endif
llb[i] = (feq(BP->bbox[i], bbox[i], DH[i] / 2)) ? BP->bbox[i] + lli[i] * DH[i] : BP->bbox[i] + (ghost_width - 0.5) * DH[i];
uub[i] = (feq(BP->bbox[dim + i], bbox[dim + i], DH[i] / 2)) ? BP->bbox[dim + i] - uui[i] * DH[i] : BP->bbox[dim + i] - (ghost_width - 0.5) * DH[i];
#else
#ifdef Cell
llb[i] = (feq(BP->bbox[i], bbox[i], DH[i] / 2)) ? BP->bbox[i] + lli[i] * DH[i] : BP->bbox[i] + ghost_width * DH[i];
uub[i] = (feq(BP->bbox[dim + i], bbox[dim + i], DH[i] / 2)) ? BP->bbox[dim + i] - uui[i] * DH[i] : BP->bbox[dim + i] - ghost_width * DH[i];
#else
#error Not define Vertex nor Cell
#endif
#endif
if (XX[i][j] - llb[i] < -DH[i] / 2 || XX[i][j] - uub[i] > DH[i] / 2)
{
flag = false;
break;
}
}
if (flag)
{
notfind = false;
owner_rank[j] = BP->rank;
if (myrank == BP->rank)
{
varl = VarList;
int k = 0;
while (varl)
{
f_global_interp(BP->shape, BP->X[0], BP->X[1], BP->X[2], BP->fgfs[varl->data->sgfn], Shellf[j * num_var + k],
pox[0], pox[1], pox[2], ordn, varl->data->SoA, Symmetry);
varl = varl->next;
k++;
}
}
}
if (Bp == ble)
break;
Bp = Bp->next;
}
}
// --- Error check for unfound points ---
for (int j = 0; j < NN; j++)
{
if (owner_rank[j] < 0 && myrank == 0)
else if (Weight[i] == 0 && myrank == 0)
{
cout << "ERROR: Patch::Interp_Points fails to find point (";
for (int d = 0; d < dim; d++)
for (int j = 0; j < dim; j++)
{
cout << XX[d][j];
if (d < dim - 1)
cout << XX[j][i];
if (j < dim - 1)
cout << ",";
else
cout << ")";
}
cout << " on Patch (";
for (int d = 0; d < dim; d++)
for (int j = 0; j < dim; j++)
{
cout << bbox[d] << "+" << lli[d] * DH[d];
if (d < dim - 1)
cout << bbox[j] << "+" << lli[j] * getdX(j);
if (j < dim - 1)
cout << ",";
else
cout << ")--";
}
cout << "(";
for (int d = 0; d < dim; d++)
for (int j = 0; j < dim; j++)
{
cout << bbox[dim + d] << "-" << uui[d] * DH[d];
if (d < dim - 1)
cout << bbox[dim + j] << "-" << uui[j] * getdX(j);
if (j < dim - 1)
cout << ",";
else
cout << ")" << endl;
}
#if 0
checkBlock();
#else
cout << "splited domains:" << endl;
{
MyList<Block> *Bp = blb;
while (Bp)
{
Block *BP = Bp->data;
for (int i = 0; i < dim; i++)
{
#ifdef Vertex
#ifdef Cell
#error Both Cell and Vertex are defined
#endif
llb[i] = (feq(BP->bbox[i], bbox[i], DH[i] / 2)) ? BP->bbox[i] + lli[i] * DH[i] : BP->bbox[i] + (ghost_width - 0.5) * DH[i];
uub[i] = (feq(BP->bbox[dim + i], bbox[dim + i], DH[i] / 2)) ? BP->bbox[dim + i] - uui[i] * DH[i] : BP->bbox[dim + i] - (ghost_width - 0.5) * DH[i];
#else
#ifdef Cell
llb[i] = (feq(BP->bbox[i], bbox[i], DH[i] / 2)) ? BP->bbox[i] + lli[i] * DH[i] : BP->bbox[i] + ghost_width * DH[i];
uub[i] = (feq(BP->bbox[dim + i], bbox[dim + i], DH[i] / 2)) ? BP->bbox[dim + i] - uui[i] * DH[i] : BP->bbox[dim + i] - ghost_width * DH[i];
#else
#error Not define Vertex nor Cell
#endif
#endif
}
cout << "(";
for (int j = 0; j < dim; j++)
{
cout << llb[j] << ":" << uub[j];
if (j < dim - 1)
cout << ",";
else
cout << ")" << endl;
}
if (Bp == ble)
break;
Bp = Bp->next;
}
}
#endif
MPI_Abort(MPI_COMM_WORLD, 1);
}
}
// --- Targeted point-to-point communication phase ---
// Compute consumer_rank[j] using the same deterministic formula as surface_integral
int *consumer_rank = new int[NN];
{
int mp = NN / nprocs;
int Lp = NN - nprocs * mp;
for (int j = 0; j < NN; j++)
{
if (j < Lp * (mp + 1))
consumer_rank[j] = j / (mp + 1);
else
consumer_rank[j] = Lp + (j - Lp * (mp + 1)) / mp;
}
}
// Count sends and recvs per rank
int *send_count = new int[nprocs];
int *recv_count = new int[nprocs];
memset(send_count, 0, sizeof(int) * nprocs);
memset(recv_count, 0, sizeof(int) * nprocs);
for (int j = 0; j < NN; j++)
{
int own = owner_rank[j];
int con = consumer_rank[j];
if (own == con)
continue; // local — no communication needed
if (own == myrank)
send_count[con]++;
if (con == myrank)
recv_count[own]++;
}
// Build send buffers: for each destination rank, pack (index, data) pairs
// Each entry: 1 int (point index j) + num_var doubles
int total_send = 0, total_recv = 0;
int *send_offset = new int[nprocs];
int *recv_offset = new int[nprocs];
for (int r = 0; r < nprocs; r++)
{
send_offset[r] = total_send;
total_send += send_count[r];
recv_offset[r] = total_recv;
total_recv += recv_count[r];
}
// Pack send buffers: each message contains (j, data[0..num_var-1]) per point
int stride = 1 + num_var; // 1 double for index + num_var doubles for data
double *sendbuf = new double[total_send * stride];
double *recvbuf = new double[total_recv * stride];
// Temporary counters for packing
int *pack_pos = new int[nprocs];
memset(pack_pos, 0, sizeof(int) * nprocs);
for (int j = 0; j < NN; j++)
{
int own = owner_rank[j];
int con = consumer_rank[j];
if (own != myrank || con == myrank)
continue;
int pos = (send_offset[con] + pack_pos[con]) * stride;
sendbuf[pos] = (double)j; // point index
for (int v = 0; v < num_var; v++)
sendbuf[pos + 1 + v] = Shellf[j * num_var + v];
pack_pos[con]++;
}
// Post non-blocking recvs and sends
int n_req = 0;
for (int r = 0; r < nprocs; r++)
{
if (recv_count[r] > 0) n_req++;
if (send_count[r] > 0) n_req++;
}
MPI_Request *reqs = new MPI_Request[n_req];
int req_idx = 0;
for (int r = 0; r < nprocs; r++)
{
if (recv_count[r] > 0)
{
MPI_Irecv(recvbuf + recv_offset[r] * stride,
recv_count[r] * stride, MPI_DOUBLE,
r, 0, MPI_COMM_WORLD, &reqs[req_idx++]);
}
}
for (int r = 0; r < nprocs; r++)
{
if (send_count[r] > 0)
{
MPI_Isend(sendbuf + send_offset[r] * stride,
send_count[r] * stride, MPI_DOUBLE,
r, 0, MPI_COMM_WORLD, &reqs[req_idx++]);
}
}
if (n_req > 0)
MPI_Waitall(n_req, reqs, MPI_STATUSES_IGNORE);
// Unpack recv buffers into Shellf
for (int i = 0; i < total_recv; i++)
{
int pos = i * stride;
int j = (int)recvbuf[pos];
for (int v = 0; v < num_var; v++)
Shellf[j * num_var + v] = recvbuf[pos + 1 + v];
}
delete[] reqs;
delete[] sendbuf;
delete[] recvbuf;
delete[] pack_pos;
delete[] send_offset;
delete[] recv_offset;
delete[] send_count;
delete[] recv_count;
delete[] consumer_rank;
delete[] owner_rank;
delete[] shellf;
delete[] weight;
delete[] Weight;
delete[] DH;
delete[] llb;
delete[] uub;
}
void Patch::Interp_Points(MyList<var> *VarList,
int NN, double **XX,
@@ -784,22 +573,24 @@ void Patch::Interp_Points(MyList<var> *VarList,
varl = varl->next;
}
memset(Shellf, 0, sizeof(double) * NN * num_var);
double *shellf;
shellf = new double[NN * num_var];
memset(shellf, 0, sizeof(double) * NN * num_var);
// owner_rank[j] stores the global rank that owns point j
int *owner_rank;
owner_rank = new int[NN];
for (int j = 0; j < NN; j++)
owner_rank[j] = -1;
// we use weight to monitor code, later some day we can move it for optimization
int *weight;
weight = new int[NN];
memset(weight, 0, sizeof(int) * NN);
// Build global-to-local rank translation for Comm_here
MPI_Group world_group, local_group;
MPI_Comm_group(MPI_COMM_WORLD, &world_group);
MPI_Comm_group(Comm_here, &local_group);
double *DH, *llb, *uub;
DH = new double[dim];
double DH[dim], llb[dim], uub[dim];
for (int i = 0; i < dim; i++)
{
DH[i] = getdX(i);
}
llb = new double[dim];
uub = new double[dim];
for (int j = 0; j < NN; j++) // run along points
{
@@ -831,6 +622,12 @@ void Patch::Interp_Points(MyList<var> *VarList,
bool flag = true;
for (int i = 0; i < dim; i++)
{
// NOTE: our dividing structure is (exclude ghost)
// -1 0
// 1 2
// so (0,1) does not belong to any part for vertex structure
// here we put (0,0.5) to left part and (0.5,1) to right part
// BUT for cell structure the bbox is (-1.5,0.5) and (0.5,2.5), there is no missing region at all
#ifdef Vertex
#ifdef Cell
#error Both Cell and Vertex are defined
@@ -855,7 +652,6 @@ void Patch::Interp_Points(MyList<var> *VarList,
if (flag)
{
notfind = false;
owner_rank[j] = BP->rank;
if (myrank == BP->rank)
{
//---> interpolation
@@ -863,11 +659,14 @@ void Patch::Interp_Points(MyList<var> *VarList,
int k = 0;
while (varl) // run along variables
{
f_global_interp(BP->shape, BP->X[0], BP->X[1], BP->X[2], BP->fgfs[varl->data->sgfn], Shellf[j * num_var + k],
// shellf[j*num_var+k] = Parallel::global_interp(dim,BP->shape,BP->X,BP->fgfs[varl->data->sgfn],
// pox,ordn,varl->data->SoA,Symmetry);
f_global_interp(BP->shape, BP->X[0], BP->X[1], BP->X[2], BP->fgfs[varl->data->sgfn], shellf[j * num_var + k],
pox[0], pox[1], pox[2], ordn, varl->data->SoA, Symmetry);
varl = varl->next;
k++;
}
weight[j] = 1;
}
}
if (Bp == ble)
@@ -876,35 +675,97 @@ void Patch::Interp_Points(MyList<var> *VarList,
}
}
// Collect unique global owner ranks and translate to local ranks in Comm_here
// Then broadcast each owner's points via MPI_Bcast on Comm_here
{
int j = 0;
while (j < NN)
{
int cur_owner_global = owner_rank[j];
if (cur_owner_global < 0)
{
// Point not found — skip (error check disabled for sub-communicator levels)
j++;
continue;
}
// Translate global rank to local rank in Comm_here
int cur_owner_local;
MPI_Group_translate_ranks(world_group, 1, &cur_owner_global, local_group, &cur_owner_local);
MPI_Allreduce(shellf, Shellf, NN * num_var, MPI_DOUBLE, MPI_SUM, Comm_here);
int *Weight;
Weight = new int[NN];
MPI_Allreduce(weight, Weight, NN, MPI_INT, MPI_SUM, Comm_here);
// Find contiguous run of points with the same owner
int jstart = j;
while (j < NN && owner_rank[j] == cur_owner_global)
j++;
int count = (j - jstart) * num_var;
MPI_Bcast(Shellf + jstart * num_var, count, MPI_DOUBLE, cur_owner_local, Comm_here);
// misc::tillherecheck("print me");
// if(lmyrank == 0) cout<<"myrank = "<<myrank<<"print me"<<endl;
for (int i = 0; i < NN; i++)
{
if (Weight[i] > 1)
{
if (lmyrank == 0)
cout << "WARNING: Patch::Interp_Points meets multiple weight" << endl;
for (int j = 0; j < num_var; j++)
Shellf[j + i * num_var] = Shellf[j + i * num_var] / Weight[i];
}
#if 0 // for not involved levels, this may fail
else if(Weight[i] == 0 && lmyrank == 0)
{
cout<<"ERROR: Patch::Interp_Points fails to find point (";
for(int j=0;j<dim;j++)
{
cout<<XX[j][i];
if(j<dim-1) cout<<",";
else cout<<")";
}
cout<<" on Patch (";
for(int j=0;j<dim;j++)
{
cout<<bbox[j]<<"+"<<lli[j]*getdX(j);
if(j<dim-1) cout<<",";
else cout<<")--";
}
cout<<"(";
for(int j=0;j<dim;j++)
{
cout<<bbox[dim+j]<<"-"<<uui[j]*getdX(j);
if(j<dim-1) cout<<",";
else cout<<")"<<endl;
}
#if 0
checkBlock();
#else
cout<<"splited domains:"<<endl;
{
MyList<Block> *Bp=blb;
while(Bp)
{
Block *BP=Bp->data;
for(int i=0;i<dim;i++)
{
#ifdef Vertex
#ifdef Cell
#error Both Cell and Vertex are defined
#endif
llb[i] = (feq(BP->bbox[i] ,bbox[i] ,DH[i]/2)) ? BP->bbox[i]+lli[i]*DH[i] : BP->bbox[i] +(ghost_width-0.5)*DH[i];
uub[i] = (feq(BP->bbox[dim+i],bbox[dim+i],DH[i]/2)) ? BP->bbox[dim+i]-uui[i]*DH[i] : BP->bbox[dim+i]-(ghost_width-0.5)*DH[i];
#else
#ifdef Cell
llb[i] = (feq(BP->bbox[i] ,bbox[i] ,DH[i]/2)) ? BP->bbox[i]+lli[i]*DH[i] : BP->bbox[i] +ghost_width*DH[i];
uub[i] = (feq(BP->bbox[dim+i],bbox[dim+i],DH[i]/2)) ? BP->bbox[dim+i]-uui[i]*DH[i] : BP->bbox[dim+i]-ghost_width*DH[i];
#else
#error Not define Vertex nor Cell
#endif
#endif
}
cout<<"(";
for(int j=0;j<dim;j++)
{
cout<<llb[j]<<":"<<uub[j];
if(j<dim-1) cout<<",";
else cout<<")"<<endl;
}
if(Bp == ble) break;
Bp=Bp->next;
}
}
#endif
MPI_Abort(MPI_COMM_WORLD,1);
}
#endif
}
MPI_Group_free(&world_group);
MPI_Group_free(&local_group);
delete[] owner_rank;
delete[] shellf;
delete[] weight;
delete[] Weight;
delete[] DH;
delete[] llb;
delete[] uub;
}
void Patch::checkBlock()
{

View File

@@ -2,11 +2,7 @@
#ifndef PATCH_H
#define PATCH_H
#ifdef MPI_STUB
#include "mpi_stub.h"
#else
#include <mpi.h>
#endif
#include "MyList.h"
#include "Block.h"
#include "var.h"
@@ -43,10 +39,6 @@ public:
bool Find_Point(double *XX);
void Interp_Points(MyList<var> *VarList,
int NN, double **XX,
double *Shellf, int Symmetry,
int Nmin_consumer, int Nmax_consumer);
void Interp_Points(MyList<var> *VarList,
int NN, double **XX,
double *Shellf, int Symmetry, MPI_Comm Comm_here);

View File

@@ -8,11 +8,7 @@
#include <limits.h>
#include <float.h>
#include <math.h>
#ifdef MPI_STUB
#include "mpi_stub.h"
#else
#include <mpi.h>
#endif
#include "util_Table.h"
#include "cctk.h"

View File

@@ -23,11 +23,7 @@ using namespace std;
#include <complex.h>
#endif
#ifdef MPI_STUB
#include "mpi_stub.h"
#else
#include <mpi.h>
#endif
#include "MyList.h"
#include "Block.h"
#include "Parallel.h"

View File

@@ -23,11 +23,7 @@ using namespace std;
#include <complex.h>
#endif
#ifdef MPI_STUB
#include "mpi_stub.h"
#else
#include <mpi.h>
#endif
#include "MyList.h"
#include "Block.h"
#include "Parallel.h"

View File

@@ -3756,484 +3756,6 @@ void Parallel::Sync(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry)
delete[] transfer_src;
delete[] transfer_dst;
}
// Merged Sync: collect all intra-patch and inter-patch grid segment lists,
// then issue a single transfer() call instead of N+1 separate ones.
void Parallel::Sync_merged(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry)
{
int cpusize;
MPI_Comm_size(MPI_COMM_WORLD, &cpusize);
MyList<Parallel::gridseg> **combined_src = new MyList<Parallel::gridseg> *[cpusize];
MyList<Parallel::gridseg> **combined_dst = new MyList<Parallel::gridseg> *[cpusize];
for (int node = 0; node < cpusize; node++)
combined_src[node] = combined_dst[node] = 0;
// Phase A: Intra-patch ghost exchange segments
MyList<Patch> *Pp = PatL;
while (Pp)
{
Patch *Pat = Pp->data;
MyList<Parallel::gridseg> *dst_ghost = build_ghost_gsl(Pat);
for (int node = 0; node < cpusize; node++)
{
MyList<Parallel::gridseg> *src_owned = build_owned_gsl0(Pat, node);
MyList<Parallel::gridseg> *tsrc = 0, *tdst = 0;
build_gstl(src_owned, dst_ghost, &tsrc, &tdst);
if (tsrc)
{
if (combined_src[node])
combined_src[node]->catList(tsrc);
else
combined_src[node] = tsrc;
}
if (tdst)
{
if (combined_dst[node])
combined_dst[node]->catList(tdst);
else
combined_dst[node] = tdst;
}
if (src_owned)
src_owned->destroyList();
}
if (dst_ghost)
dst_ghost->destroyList();
Pp = Pp->next;
}
// Phase B: Inter-patch buffer exchange segments
MyList<Parallel::gridseg> *dst_buffer = build_buffer_gsl(PatL);
for (int node = 0; node < cpusize; node++)
{
MyList<Parallel::gridseg> *src_owned = build_owned_gsl(PatL, node, 5, Symmetry);
MyList<Parallel::gridseg> *tsrc = 0, *tdst = 0;
build_gstl(src_owned, dst_buffer, &tsrc, &tdst);
if (tsrc)
{
if (combined_src[node])
combined_src[node]->catList(tsrc);
else
combined_src[node] = tsrc;
}
if (tdst)
{
if (combined_dst[node])
combined_dst[node]->catList(tdst);
else
combined_dst[node] = tdst;
}
if (src_owned)
src_owned->destroyList();
}
if (dst_buffer)
dst_buffer->destroyList();
// Phase C: Single transfer
transfer(combined_src, combined_dst, VarList, VarList, Symmetry);
// Phase D: Cleanup
for (int node = 0; node < cpusize; node++)
{
if (combined_src[node])
combined_src[node]->destroyList();
if (combined_dst[node])
combined_dst[node]->destroyList();
}
delete[] combined_src;
delete[] combined_dst;
}
// SyncCache constructor
Parallel::SyncCache::SyncCache()
: valid(false), cpusize(0), combined_src(0), combined_dst(0),
send_lengths(0), recv_lengths(0), send_bufs(0), recv_bufs(0),
send_buf_caps(0), recv_buf_caps(0), reqs(0), stats(0), max_reqs(0)
{
}
// SyncCache invalidate: free grid segment lists but keep buffers
void Parallel::SyncCache::invalidate()
{
if (!valid)
return;
for (int i = 0; i < cpusize; i++)
{
if (combined_src[i])
combined_src[i]->destroyList();
if (combined_dst[i])
combined_dst[i]->destroyList();
combined_src[i] = combined_dst[i] = 0;
send_lengths[i] = recv_lengths[i] = 0;
}
valid = false;
}
// SyncCache destroy: free everything
void Parallel::SyncCache::destroy()
{
invalidate();
if (combined_src) delete[] combined_src;
if (combined_dst) delete[] combined_dst;
if (send_lengths) delete[] send_lengths;
if (recv_lengths) delete[] recv_lengths;
if (send_buf_caps) delete[] send_buf_caps;
if (recv_buf_caps) delete[] recv_buf_caps;
for (int i = 0; i < cpusize; i++)
{
if (send_bufs && send_bufs[i]) delete[] send_bufs[i];
if (recv_bufs && recv_bufs[i]) delete[] recv_bufs[i];
}
if (send_bufs) delete[] send_bufs;
if (recv_bufs) delete[] recv_bufs;
if (reqs) delete[] reqs;
if (stats) delete[] stats;
combined_src = combined_dst = 0;
send_lengths = recv_lengths = 0;
send_buf_caps = recv_buf_caps = 0;
send_bufs = recv_bufs = 0;
reqs = 0; stats = 0;
cpusize = 0; max_reqs = 0;
}
// transfer_cached: reuse pre-allocated buffers from SyncCache
void Parallel::transfer_cached(MyList<Parallel::gridseg> **src, MyList<Parallel::gridseg> **dst,
MyList<var> *VarList1, MyList<var> *VarList2,
int Symmetry, SyncCache &cache)
{
int myrank;
MPI_Comm_size(MPI_COMM_WORLD, &cache.cpusize);
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
int cpusize = cache.cpusize;
int req_no = 0;
int node;
for (node = 0; node < cpusize; node++)
{
if (node == myrank)
{
int length = data_packer(0, src[myrank], dst[myrank], node, PACK, VarList1, VarList2, Symmetry);
cache.recv_lengths[node] = length;
if (length > 0)
{
if (length > cache.recv_buf_caps[node])
{
if (cache.recv_bufs[node]) delete[] cache.recv_bufs[node];
cache.recv_bufs[node] = new double[length];
cache.recv_buf_caps[node] = length;
}
data_packer(cache.recv_bufs[node], src[myrank], dst[myrank], node, PACK, VarList1, VarList2, Symmetry);
}
}
else
{
// send
int slength = data_packer(0, src[myrank], dst[myrank], node, PACK, VarList1, VarList2, Symmetry);
cache.send_lengths[node] = slength;
if (slength > 0)
{
if (slength > cache.send_buf_caps[node])
{
if (cache.send_bufs[node]) delete[] cache.send_bufs[node];
cache.send_bufs[node] = new double[slength];
cache.send_buf_caps[node] = slength;
}
data_packer(cache.send_bufs[node], src[myrank], dst[myrank], node, PACK, VarList1, VarList2, Symmetry);
MPI_Isend((void *)cache.send_bufs[node], slength, MPI_DOUBLE, node, 1, MPI_COMM_WORLD, cache.reqs + req_no++);
}
// recv
int rlength = data_packer(0, src[node], dst[node], node, UNPACK, VarList1, VarList2, Symmetry);
cache.recv_lengths[node] = rlength;
if (rlength > 0)
{
if (rlength > cache.recv_buf_caps[node])
{
if (cache.recv_bufs[node]) delete[] cache.recv_bufs[node];
cache.recv_bufs[node] = new double[rlength];
cache.recv_buf_caps[node] = rlength;
}
MPI_Irecv((void *)cache.recv_bufs[node], rlength, MPI_DOUBLE, node, 1, MPI_COMM_WORLD, cache.reqs + req_no++);
}
}
}
MPI_Waitall(req_no, cache.reqs, cache.stats);
for (node = 0; node < cpusize; node++)
if (cache.recv_bufs[node] && cache.recv_lengths[node] > 0)
data_packer(cache.recv_bufs[node], src[node], dst[node], node, UNPACK, VarList1, VarList2, Symmetry);
}
// Sync_cached: build grid segment lists on first call, reuse on subsequent calls
void Parallel::Sync_cached(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry, SyncCache &cache)
{
if (!cache.valid)
{
int cpusize;
MPI_Comm_size(MPI_COMM_WORLD, &cpusize);
cache.cpusize = cpusize;
// Allocate cache arrays if needed
if (!cache.combined_src)
{
cache.combined_src = new MyList<Parallel::gridseg> *[cpusize];
cache.combined_dst = new MyList<Parallel::gridseg> *[cpusize];
cache.send_lengths = new int[cpusize];
cache.recv_lengths = new int[cpusize];
cache.send_bufs = new double *[cpusize];
cache.recv_bufs = new double *[cpusize];
cache.send_buf_caps = new int[cpusize];
cache.recv_buf_caps = new int[cpusize];
for (int i = 0; i < cpusize; i++)
{
cache.send_bufs[i] = cache.recv_bufs[i] = 0;
cache.send_buf_caps[i] = cache.recv_buf_caps[i] = 0;
}
cache.max_reqs = 2 * cpusize;
cache.reqs = new MPI_Request[cache.max_reqs];
cache.stats = new MPI_Status[cache.max_reqs];
}
for (int node = 0; node < cpusize; node++)
{
cache.combined_src[node] = cache.combined_dst[node] = 0;
cache.send_lengths[node] = cache.recv_lengths[node] = 0;
}
// Build intra-patch segments (same as Sync_merged Phase A)
MyList<Patch> *Pp = PatL;
while (Pp)
{
Patch *Pat = Pp->data;
MyList<Parallel::gridseg> *dst_ghost = build_ghost_gsl(Pat);
for (int node = 0; node < cpusize; node++)
{
MyList<Parallel::gridseg> *src_owned = build_owned_gsl0(Pat, node);
MyList<Parallel::gridseg> *tsrc = 0, *tdst = 0;
build_gstl(src_owned, dst_ghost, &tsrc, &tdst);
if (tsrc)
{
if (cache.combined_src[node])
cache.combined_src[node]->catList(tsrc);
else
cache.combined_src[node] = tsrc;
}
if (tdst)
{
if (cache.combined_dst[node])
cache.combined_dst[node]->catList(tdst);
else
cache.combined_dst[node] = tdst;
}
if (src_owned) src_owned->destroyList();
}
if (dst_ghost) dst_ghost->destroyList();
Pp = Pp->next;
}
// Build inter-patch segments (same as Sync_merged Phase B)
MyList<Parallel::gridseg> *dst_buffer = build_buffer_gsl(PatL);
for (int node = 0; node < cpusize; node++)
{
MyList<Parallel::gridseg> *src_owned = build_owned_gsl(PatL, node, 5, Symmetry);
MyList<Parallel::gridseg> *tsrc = 0, *tdst = 0;
build_gstl(src_owned, dst_buffer, &tsrc, &tdst);
if (tsrc)
{
if (cache.combined_src[node])
cache.combined_src[node]->catList(tsrc);
else
cache.combined_src[node] = tsrc;
}
if (tdst)
{
if (cache.combined_dst[node])
cache.combined_dst[node]->catList(tdst);
else
cache.combined_dst[node] = tdst;
}
if (src_owned) src_owned->destroyList();
}
if (dst_buffer) dst_buffer->destroyList();
cache.valid = true;
}
// Use cached lists with buffer-reusing transfer
transfer_cached(cache.combined_src, cache.combined_dst, VarList, VarList, Symmetry, cache);
}
// Sync_start: pack and post MPI_Isend/Irecv, return immediately
void Parallel::Sync_start(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry,
SyncCache &cache, AsyncSyncState &state)
{
// Ensure cache is built
if (!cache.valid)
{
// Build cache (same logic as Sync_cached)
int cpusize;
MPI_Comm_size(MPI_COMM_WORLD, &cpusize);
cache.cpusize = cpusize;
if (!cache.combined_src)
{
cache.combined_src = new MyList<Parallel::gridseg> *[cpusize];
cache.combined_dst = new MyList<Parallel::gridseg> *[cpusize];
cache.send_lengths = new int[cpusize];
cache.recv_lengths = new int[cpusize];
cache.send_bufs = new double *[cpusize];
cache.recv_bufs = new double *[cpusize];
cache.send_buf_caps = new int[cpusize];
cache.recv_buf_caps = new int[cpusize];
for (int i = 0; i < cpusize; i++)
{
cache.send_bufs[i] = cache.recv_bufs[i] = 0;
cache.send_buf_caps[i] = cache.recv_buf_caps[i] = 0;
}
cache.max_reqs = 2 * cpusize;
cache.reqs = new MPI_Request[cache.max_reqs];
cache.stats = new MPI_Status[cache.max_reqs];
}
for (int node = 0; node < cpusize; node++)
{
cache.combined_src[node] = cache.combined_dst[node] = 0;
cache.send_lengths[node] = cache.recv_lengths[node] = 0;
}
MyList<Patch> *Pp = PatL;
while (Pp)
{
Patch *Pat = Pp->data;
MyList<Parallel::gridseg> *dst_ghost = build_ghost_gsl(Pat);
for (int node = 0; node < cpusize; node++)
{
MyList<Parallel::gridseg> *src_owned = build_owned_gsl0(Pat, node);
MyList<Parallel::gridseg> *tsrc = 0, *tdst = 0;
build_gstl(src_owned, dst_ghost, &tsrc, &tdst);
if (tsrc)
{
if (cache.combined_src[node])
cache.combined_src[node]->catList(tsrc);
else
cache.combined_src[node] = tsrc;
}
if (tdst)
{
if (cache.combined_dst[node])
cache.combined_dst[node]->catList(tdst);
else
cache.combined_dst[node] = tdst;
}
if (src_owned) src_owned->destroyList();
}
if (dst_ghost) dst_ghost->destroyList();
Pp = Pp->next;
}
MyList<Parallel::gridseg> *dst_buffer = build_buffer_gsl(PatL);
for (int node = 0; node < cpusize; node++)
{
MyList<Parallel::gridseg> *src_owned = build_owned_gsl(PatL, node, 5, Symmetry);
MyList<Parallel::gridseg> *tsrc = 0, *tdst = 0;
build_gstl(src_owned, dst_buffer, &tsrc, &tdst);
if (tsrc)
{
if (cache.combined_src[node])
cache.combined_src[node]->catList(tsrc);
else
cache.combined_src[node] = tsrc;
}
if (tdst)
{
if (cache.combined_dst[node])
cache.combined_dst[node]->catList(tdst);
else
cache.combined_dst[node] = tdst;
}
if (src_owned) src_owned->destroyList();
}
if (dst_buffer) dst_buffer->destroyList();
cache.valid = true;
}
// Now pack and post async MPI operations
int myrank;
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
int cpusize = cache.cpusize;
state.req_no = 0;
state.active = true;
MyList<Parallel::gridseg> **src = cache.combined_src;
MyList<Parallel::gridseg> **dst = cache.combined_dst;
for (int node = 0; node < cpusize; node++)
{
if (node == myrank)
{
int length = data_packer(0, src[myrank], dst[myrank], node, PACK, VarList, VarList, Symmetry);
cache.recv_lengths[node] = length;
if (length > 0)
{
if (length > cache.recv_buf_caps[node])
{
if (cache.recv_bufs[node]) delete[] cache.recv_bufs[node];
cache.recv_bufs[node] = new double[length];
cache.recv_buf_caps[node] = length;
}
data_packer(cache.recv_bufs[node], src[myrank], dst[myrank], node, PACK, VarList, VarList, Symmetry);
}
}
else
{
int slength = data_packer(0, src[myrank], dst[myrank], node, PACK, VarList, VarList, Symmetry);
cache.send_lengths[node] = slength;
if (slength > 0)
{
if (slength > cache.send_buf_caps[node])
{
if (cache.send_bufs[node]) delete[] cache.send_bufs[node];
cache.send_bufs[node] = new double[slength];
cache.send_buf_caps[node] = slength;
}
data_packer(cache.send_bufs[node], src[myrank], dst[myrank], node, PACK, VarList, VarList, Symmetry);
MPI_Isend((void *)cache.send_bufs[node], slength, MPI_DOUBLE, node, 2, MPI_COMM_WORLD, cache.reqs + state.req_no++);
}
int rlength = data_packer(0, src[node], dst[node], node, UNPACK, VarList, VarList, Symmetry);
cache.recv_lengths[node] = rlength;
if (rlength > 0)
{
if (rlength > cache.recv_buf_caps[node])
{
if (cache.recv_bufs[node]) delete[] cache.recv_bufs[node];
cache.recv_bufs[node] = new double[rlength];
cache.recv_buf_caps[node] = rlength;
}
MPI_Irecv((void *)cache.recv_bufs[node], rlength, MPI_DOUBLE, node, 2, MPI_COMM_WORLD, cache.reqs + state.req_no++);
}
}
}
}
// Sync_finish: wait for async MPI operations and unpack
void Parallel::Sync_finish(SyncCache &cache, AsyncSyncState &state,
MyList<var> *VarList, int Symmetry)
{
if (!state.active)
return;
MPI_Waitall(state.req_no, cache.reqs, cache.stats);
int cpusize = cache.cpusize;
MyList<Parallel::gridseg> **src = cache.combined_src;
MyList<Parallel::gridseg> **dst = cache.combined_dst;
for (int node = 0; node < cpusize; node++)
if (cache.recv_bufs[node] && cache.recv_lengths[node] > 0)
data_packer(cache.recv_bufs[node], src[node], dst[node], node, UNPACK, VarList, VarList, Symmetry);
state.active = false;
}
// collect buffer grid segments or blocks for the periodic boundary condition of given patch
// ---------------------------------------------------
// |con | |con |

View File

@@ -81,42 +81,6 @@ namespace Parallel
int Symmetry);
void Sync(Patch *Pat, MyList<var> *VarList, int Symmetry);
void Sync(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry);
void Sync_merged(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry);
struct SyncCache {
bool valid;
int cpusize;
MyList<gridseg> **combined_src;
MyList<gridseg> **combined_dst;
int *send_lengths;
int *recv_lengths;
double **send_bufs;
double **recv_bufs;
int *send_buf_caps;
int *recv_buf_caps;
MPI_Request *reqs;
MPI_Status *stats;
int max_reqs;
SyncCache();
void invalidate();
void destroy();
};
void Sync_cached(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry, SyncCache &cache);
void transfer_cached(MyList<gridseg> **src, MyList<gridseg> **dst,
MyList<var> *VarList1, MyList<var> *VarList2,
int Symmetry, SyncCache &cache);
struct AsyncSyncState {
int req_no;
bool active;
AsyncSyncState() : req_no(0), active(false) {}
};
void Sync_start(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry,
SyncCache &cache, AsyncSyncState &state);
void Sync_finish(SyncCache &cache, AsyncSyncState &state,
MyList<var> *VarList, int Symmetry);
void OutBdLow2Hi(Patch *Patc, Patch *Patf,
MyList<var> *VarList1 /* source */, MyList<var> *VarList2 /* target */,
int Symmetry);

View File

@@ -2,11 +2,7 @@
#ifndef SHELLPATCH_H
#define SHELLPATCH_H
#ifdef MPI_STUB
#include "mpi_stub.h"
#else
#include <mpi.h>
#endif
#include "MyList.h"
#include "Block.h"
#include "Parallel.h"

File diff suppressed because it is too large Load Diff

View File

@@ -1,8 +1,7 @@
#ifndef TWO_PUNCTURES_H
#define TWO_PUNCTURES_H
#include <omp.h>
#define StencilSize 19
#define N_PlaneRelax 1
#define NRELAX 200
@@ -33,7 +32,7 @@ private:
int npoints_A, npoints_B, npoints_phi;
double target_M_plus, target_M_minus;
double admMass;
double adm_tol;
@@ -43,17 +42,32 @@ private:
int ntotal;
// ===== Precomputed spectral derivative matrices =====
double *D1_A, *D2_A;
double *D1_B, *D2_B;
double *DF1_phi, *DF2_phi;
// ===== Pre-allocated workspace for LineRelax (per-thread) =====
int max_threads;
double **ws_diag_be, **ws_e_be, **ws_f_be, **ws_b_be, **ws_x_be;
double **ws_l_be, **ws_u_be, **ws_d_be, **ws_y_be;
double **ws_diag_al, **ws_e_al, **ws_f_al, **ws_b_al, **ws_x_al;
double **ws_l_al, **ws_u_al, **ws_d_al, **ws_y_al;
// Pre-allocated workspace buffers for hot-path allocation elimination
// LineRelax_be workspace (sized for n2)
double *ws_diag_be, *ws_e_be, *ws_f_be, *ws_b_be, *ws_x_be;
// LineRelax_al workspace (sized for n1)
double *ws_diag_al, *ws_e_al, *ws_f_al, *ws_b_al, *ws_x_al;
// ThomasAlgorithm workspace (sized for max(n1,n2))
double *ws_thomas_y;
// JFD_times_dv workspace (sized for nvar)
double *ws_jfd_values;
derivs ws_jfd_dU, ws_jfd_U;
// chebft_Zeros workspace (sized for max(n1,n2,n3)+1)
double *ws_cheb_c;
// fourft workspace (sized for max(n1,n2,n3)/2+1 each)
double *ws_four_a, *ws_four_b;
// Derivatives_AB3 workspace
double *ws_deriv_p, *ws_deriv_dp, *ws_deriv_d2p;
double *ws_deriv_q, *ws_deriv_dq;
double *ws_deriv_r, *ws_deriv_dr;
int *ws_deriv_indx;
// F_of_v workspace
double *ws_fov_sources;
double *ws_fov_values;
derivs ws_fov_U;
// J_times_dv workspace
double *ws_jtdv_values;
derivs ws_jtdv_dU, ws_jtdv_U;
struct parameters
{
@@ -71,28 +85,6 @@ public:
int Newtonmaxit);
~TwoPunctures();
// 02/07: New/modified methods
void allocate_workspace();
void free_workspace();
void precompute_derivative_matrices();
void build_cheb_deriv_matrices(int n, double *D1, double *D2);
void build_fourier_deriv_matrices(int N, double *DF1, double *DF2);
void Derivatives_AB3_MatMul(int nvar, int n1, int n2, int n3, derivs v);
void ThomasAlgorithm_ws(int N, double *b, double *a, double *c, double *x, double *q,
double *l, double *u_ws, double *d, double *y);
void LineRelax_be_omp(double *dv,
int const i, int const k, int const nvar,
int const n1, int const n2, int const n3,
double const *rhs, int const *ncols, int **cols,
double **JFD, int tid);
void LineRelax_al_omp(double *dv,
int const j, int const k, int const nvar,
int const n1, int const n2, int const n3,
double const *rhs, int const *ncols,
int **cols, double **JFD, int tid);
void relax_omp(double *dv, int const nvar, int const n1, int const n2, int const n3,
double const *rhs, int const *ncols, int **cols, double **JFD);
void Solve();
void set_initial_guess(derivs v);
int index(int i, int j, int k, int l, int a, int b, int c, int d);
@@ -151,11 +143,23 @@ public:
double BY_KKofxyz(double x, double y, double z);
void SetMatrix_JFD(int nvar, int n1, int n2, int n3, derivs u, int *ncols, int **cols, double **Matrix);
void J_times_dv(int nvar, int n1, int n2, int n3, derivs dv, double *Jdv, derivs u);
void relax(double *dv, int const nvar, int const n1, int const n2, int const n3,
double const *rhs, int const *ncols, int **cols, double **JFD);
void LineRelax_be(double *dv,
int const i, int const k, int const nvar,
int const n1, int const n2, int const n3,
double const *rhs, int const *ncols, int **cols,
double **JFD);
void JFD_times_dv(int i, int j, int k, int nvar, int n1, int n2,
int n3, derivs dv, derivs u, double *values);
void LinEquations(double A, double B, double X, double R,
double x, double r, double phi,
double y, double z, derivs dU, derivs U, double *values);
void LineRelax_al(double *dv,
int const j, int const k, int const nvar,
int const n1, int const n2, int const n3,
double const *rhs, int const *ncols,
int **cols, double **JFD);
void ThomasAlgorithm(int N, double *b, double *a, double *c, double *x, double *q);
void Save(char *fname);
// provided by Vasileios Paschalidis (vpaschal@illinois.edu)
@@ -164,4 +168,4 @@ public:
void SpecCoef(parameters par, int ivar, double *v, double *cf);
};
#endif /* TWO_PUNCTURES_H */
#endif /* TWO_PUNCTURES_H */

View File

@@ -19,11 +19,7 @@ using namespace std;
#include <math.h>
#endif
#ifdef MPI_STUB
#include "mpi_stub.h"
#else
#include <mpi.h>
#endif
#include "cgh.h"
#include "ShellPatch.h"

View File

@@ -19,11 +19,7 @@ using namespace std;
#include <math.h>
#endif
#ifdef MPI_STUB
#include "mpi_stub.h"
#else
#include <mpi.h>
#endif
#include "cgh.h"
#include "ShellPatch.h"

View File

@@ -19,11 +19,7 @@ using namespace std;
#include <math.h>
#endif
#ifdef MPI_STUB
#include "mpi_stub.h"
#else
#include <mpi.h>
#endif
#include "cgh.h"
#include "ShellPatch.h"

View File

@@ -730,12 +730,6 @@ void bssn_class::Initialize()
PhysTime = StartTime;
Setup_Black_Hole_position();
}
// Initialize sync caches (per-level, for predictor and corrector)
sync_cache_pre = new Parallel::SyncCache[GH->levels];
sync_cache_cor = new Parallel::SyncCache[GH->levels];
sync_cache_rp_coarse = new Parallel::SyncCache[GH->levels];
sync_cache_rp_fine = new Parallel::SyncCache[GH->levels];
}
//================================================================================================
@@ -987,32 +981,6 @@ bssn_class::~bssn_class()
delete Azzz;
#endif
// Destroy sync caches before GH
if (sync_cache_pre)
{
for (int i = 0; i < GH->levels; i++)
sync_cache_pre[i].destroy();
delete[] sync_cache_pre;
}
if (sync_cache_cor)
{
for (int i = 0; i < GH->levels; i++)
sync_cache_cor[i].destroy();
delete[] sync_cache_cor;
}
if (sync_cache_rp_coarse)
{
for (int i = 0; i < GH->levels; i++)
sync_cache_rp_coarse[i].destroy();
delete[] sync_cache_rp_coarse;
}
if (sync_cache_rp_fine)
{
for (int i = 0; i < GH->levels; i++)
sync_cache_rp_fine[i].destroy();
delete[] sync_cache_rp_fine;
}
delete GH;
#ifdef WithShell
delete SH;
@@ -2213,7 +2181,6 @@ void bssn_class::Evolve(int Steps)
GH->Regrid(Symmetry, BH_num, Porgbr, Porg0,
SynchList_cor, OldStateList, StateList, SynchList_pre,
fgt(PhysTime - dT_mon, StartTime, dT_mon / 2), ErrorMonitor);
for (int il = 0; il < GH->levels; il++) { sync_cache_pre[il].invalidate(); sync_cache_cor[il].invalidate(); sync_cache_rp_coarse[il].invalidate(); sync_cache_rp_fine[il].invalidate(); }
#endif
#if (REGLEV == 0 && (PSTR == 1 || PSTR == 2))
@@ -2429,7 +2396,6 @@ void bssn_class::RecursiveStep(int lev)
GH->Regrid_Onelevel(lev, Symmetry, BH_num, Porgbr, Porg0,
SynchList_cor, OldStateList, StateList, SynchList_pre,
fgt(PhysTime - dT_lev, StartTime, dT_lev / 2), ErrorMonitor);
for (int il = 0; il < GH->levels; il++) { sync_cache_pre[il].invalidate(); sync_cache_cor[il].invalidate(); sync_cache_rp_coarse[il].invalidate(); sync_cache_rp_fine[il].invalidate(); }
#endif
}
@@ -2608,7 +2574,6 @@ void bssn_class::ParallelStep()
GH->Regrid_Onelevel(GH->mylev, Symmetry, BH_num, Porgbr, Porg0,
SynchList_cor, OldStateList, StateList, SynchList_pre,
fgt(PhysTime - dT_lev, StartTime, dT_lev / 2), ErrorMonitor);
for (int il = 0; il < GH->levels; il++) { sync_cache_pre[il].invalidate(); sync_cache_cor[il].invalidate(); sync_cache_rp_coarse[il].invalidate(); sync_cache_rp_fine[il].invalidate(); }
#endif
}
@@ -2775,7 +2740,6 @@ void bssn_class::ParallelStep()
GH->Regrid_Onelevel(lev + 1, Symmetry, BH_num, Porgbr, Porg0,
SynchList_cor, OldStateList, StateList, SynchList_pre,
fgt(PhysTime - dT_levp1, StartTime, dT_levp1 / 2), ErrorMonitor);
for (int il = 0; il < GH->levels; il++) { sync_cache_pre[il].invalidate(); sync_cache_cor[il].invalidate(); sync_cache_rp_coarse[il].invalidate(); sync_cache_rp_fine[il].invalidate(); }
// a_stream.clear();
// a_stream.str("");
@@ -2790,7 +2754,6 @@ void bssn_class::ParallelStep()
GH->Regrid_Onelevel(lev, Symmetry, BH_num, Porgbr, Porg0,
SynchList_cor, OldStateList, StateList, SynchList_pre,
fgt(PhysTime - dT_lev, StartTime, dT_lev / 2), ErrorMonitor);
for (int il = 0; il < GH->levels; il++) { sync_cache_pre[il].invalidate(); sync_cache_cor[il].invalidate(); sync_cache_rp_coarse[il].invalidate(); sync_cache_rp_fine[il].invalidate(); }
// a_stream.clear();
// a_stream.str("");
@@ -2809,7 +2772,6 @@ void bssn_class::ParallelStep()
GH->Regrid_Onelevel(lev - 1, Symmetry, BH_num, Porgbr, Porg0,
SynchList_cor, OldStateList, StateList, SynchList_pre,
fgt(PhysTime - dT_lev, StartTime, dT_levm1 / 2), ErrorMonitor);
for (int il = 0; il < GH->levels; il++) { sync_cache_pre[il].invalidate(); sync_cache_cor[il].invalidate(); sync_cache_rp_coarse[il].invalidate(); sync_cache_rp_fine[il].invalidate(); }
// a_stream.clear();
// a_stream.str("");
@@ -2825,7 +2787,6 @@ void bssn_class::ParallelStep()
GH->Regrid_Onelevel(lev - 1, Symmetry, BH_num, Porgbr, Porg0,
SynchList_cor, OldStateList, StateList, SynchList_pre,
fgt(PhysTime - dT_lev, StartTime, dT_levm1 / 2), ErrorMonitor);
for (int il = 0; il < GH->levels; il++) { sync_cache_pre[il].invalidate(); sync_cache_cor[il].invalidate(); sync_cache_rp_coarse[il].invalidate(); sync_cache_rp_fine[il].invalidate(); }
// a_stream.clear();
// a_stream.str("");
@@ -3197,7 +3158,21 @@ void bssn_class::Step(int lev, int YN)
}
Pp = Pp->next;
}
// NOTE: error check deferred to after Shell Patch computation to reduce MPI_Allreduce calls
// check error information
{
int erh = ERROR;
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
}
if (ERROR)
{
Parallel::Dump_Data(GH->PatL[lev], StateList, 0, PhysTime, dT_lev);
if (myrank == 0)
{
if (ErrorMonitor->outfile)
ErrorMonitor->outfile << "find NaN in state variables at t = " << PhysTime << ", lev = " << lev << endl;
MPI_Abort(MPI_COMM_WORLD, 1);
}
}
#ifdef WithShell
// evolve Shell Patches
@@ -3215,9 +3190,9 @@ void bssn_class::Step(int lev, int YN)
{
#if (AGM == 0)
f_enforce_ga(cg->shape,
cg->fgfs[gxx0->sgfn], cg->fgfs[gxy0->sgfn], cg->fgfs[gxz0->sgfn],
cg->fgfs[gxx0->sgfn], cg->fgfs[gxy0->sgfn], cg->fgfs[gxz0->sgfn],
cg->fgfs[gyy0->sgfn], cg->fgfs[gyz0->sgfn], cg->fgfs[gzz0->sgfn],
cg->fgfs[Axx0->sgfn], cg->fgfs[Axy0->sgfn], cg->fgfs[Axz0->sgfn],
cg->fgfs[Axx0->sgfn], cg->fgfs[Axy0->sgfn], cg->fgfs[Axz0->sgfn],
cg->fgfs[Ayy0->sgfn], cg->fgfs[Ayz0->sgfn], cg->fgfs[Azz0->sgfn]);
#endif
@@ -3341,16 +3316,25 @@ void bssn_class::Step(int lev, int YN)
#endif
}
// Non-blocking error reduction overlapped with Sync to hide Allreduce latency
MPI_Request err_req;
// check error information
{
int erh = ERROR;
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &err_req);
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
}
if (ERROR)
{
SH->Dump_Data(StateList, 0, PhysTime, dT_lev);
if (myrank == 0)
{
if (ErrorMonitor->outfile)
ErrorMonitor->outfile << "find NaN in state variables on Shell Patches at t = " << PhysTime << endl;
MPI_Abort(MPI_COMM_WORLD, 1);
}
}
#endif
Parallel::AsyncSyncState async_pre;
Parallel::Sync_start(GH->PatL[lev], SynchList_pre, Symmetry, sync_cache_pre[lev], async_pre);
Parallel::Sync(GH->PatL[lev], SynchList_pre, Symmetry);
#ifdef WithShell
if (lev == 0)
@@ -3363,29 +3347,12 @@ void bssn_class::Step(int lev, int YN)
{
prev_clock = curr_clock;
curr_clock = clock();
cout << " Shell stuff synchronization used "
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
cout << " Shell stuff synchronization used "
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
<< " seconds! " << endl;
}
}
#endif
Parallel::Sync_finish(sync_cache_pre[lev], async_pre, SynchList_pre, Symmetry);
#ifdef WithShell
// Complete non-blocking error reduction and check
MPI_Wait(&err_req, MPI_STATUS_IGNORE);
if (ERROR)
{
Parallel::Dump_Data(GH->PatL[lev], StateList, 0, PhysTime, dT_lev);
SH->Dump_Data(StateList, 0, PhysTime, dT_lev);
if (myrank == 0)
{
if (ErrorMonitor->outfile)
ErrorMonitor->outfile << "find NaN in state variables at t = " << PhysTime << ", lev = " << lev << endl;
MPI_Abort(MPI_COMM_WORLD, 1);
}
}
#endif
#if (MAPBH == 0)
// for black hole position
@@ -3561,7 +3528,24 @@ void bssn_class::Step(int lev, int YN)
Pp = Pp->next;
}
// NOTE: error check deferred to after Shell Patch computation to reduce MPI_Allreduce calls
// check error information
{
int erh = ERROR;
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
}
if (ERROR)
{
Parallel::Dump_Data(GH->PatL[lev], SynchList_pre, 0, PhysTime, dT_lev);
if (myrank == 0)
{
if (ErrorMonitor->outfile)
ErrorMonitor->outfile << "find NaN in RK4 substep#" << iter_count
<< " variables at t = " << PhysTime
<< ", lev = " << lev << endl;
MPI_Abort(MPI_COMM_WORLD, 1);
}
}
#ifdef WithShell
// evolve Shell Patches
@@ -3579,9 +3563,9 @@ void bssn_class::Step(int lev, int YN)
{
#if (AGM == 0)
f_enforce_ga(cg->shape,
cg->fgfs[gxx->sgfn], cg->fgfs[gxy->sgfn], cg->fgfs[gxz->sgfn],
cg->fgfs[gxx->sgfn], cg->fgfs[gxy->sgfn], cg->fgfs[gxz->sgfn],
cg->fgfs[gyy->sgfn], cg->fgfs[gyz->sgfn], cg->fgfs[gzz->sgfn],
cg->fgfs[Axx->sgfn], cg->fgfs[Axy->sgfn], cg->fgfs[Axz->sgfn],
cg->fgfs[Axx->sgfn], cg->fgfs[Axy->sgfn], cg->fgfs[Axz->sgfn],
cg->fgfs[Ayy->sgfn], cg->fgfs[Ayz->sgfn], cg->fgfs[Azz->sgfn]);
#elif (AGM == 1)
if (iter_count == 3)
@@ -3701,16 +3685,26 @@ void bssn_class::Step(int lev, int YN)
sPp = sPp->next;
}
}
// Non-blocking error reduction overlapped with Sync to hide Allreduce latency
MPI_Request err_req_cor;
// check error information
{
int erh = ERROR;
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &err_req_cor);
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
}
if (ERROR)
{
SH->Dump_Data(SynchList_pre, 0, PhysTime, dT_lev);
if (myrank == 0)
{
if (ErrorMonitor->outfile)
ErrorMonitor->outfile << "find NaN on Shell Patches in RK4 substep#"
<< iter_count << " variables at t = "
<< PhysTime << endl;
MPI_Abort(MPI_COMM_WORLD, 1);
}
}
#endif
Parallel::AsyncSyncState async_cor;
Parallel::Sync_start(GH->PatL[lev], SynchList_cor, Symmetry, sync_cache_cor[lev], async_cor);
Parallel::Sync(GH->PatL[lev], SynchList_cor, Symmetry);
#ifdef WithShell
if (lev == 0)
@@ -3723,31 +3717,12 @@ void bssn_class::Step(int lev, int YN)
{
prev_clock = curr_clock;
curr_clock = clock();
cout << " Shell stuff synchronization used "
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
cout << " Shell stuff synchronization used "
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
<< " seconds! " << endl;
}
}
#endif
Parallel::Sync_finish(sync_cache_cor[lev], async_cor, SynchList_cor, Symmetry);
#ifdef WithShell
// Complete non-blocking error reduction and check
MPI_Wait(&err_req_cor, MPI_STATUS_IGNORE);
if (ERROR)
{
Parallel::Dump_Data(GH->PatL[lev], SynchList_pre, 0, PhysTime, dT_lev);
SH->Dump_Data(SynchList_pre, 0, PhysTime, dT_lev);
if (myrank == 0)
{
if (ErrorMonitor->outfile)
ErrorMonitor->outfile << "find NaN in RK4 substep#" << iter_count
<< " variables at t = " << PhysTime
<< ", lev = " << lev << endl;
MPI_Abort(MPI_COMM_WORLD, 1);
}
}
#endif
#if (MAPBH == 0)
// for black hole position
@@ -4059,7 +4034,22 @@ void bssn_class::Step(int lev, int YN)
}
Pp = Pp->next;
}
// NOTE: error check deferred to after Shell Patch computation to reduce MPI_Allreduce calls
// check error information
{
int erh = ERROR;
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
}
if (ERROR)
{
Parallel::Dump_Data(GH->PatL[lev], StateList, 0, PhysTime, dT_lev);
if (myrank == 0)
{
if (ErrorMonitor->outfile)
ErrorMonitor->outfile << "find NaN in state variables at t = " << PhysTime
<< ", lev = " << lev << endl;
MPI_Abort(MPI_COMM_WORLD, 1);
}
}
#ifdef WithShell
// evolve Shell Patches
@@ -4077,15 +4067,15 @@ void bssn_class::Step(int lev, int YN)
{
#if (AGM == 0)
f_enforce_ga(cg->shape,
cg->fgfs[gxx0->sgfn], cg->fgfs[gxy0->sgfn], cg->fgfs[gxz0->sgfn],
cg->fgfs[gxx0->sgfn], cg->fgfs[gxy0->sgfn], cg->fgfs[gxz0->sgfn],
cg->fgfs[gyy0->sgfn], cg->fgfs[gyz0->sgfn], cg->fgfs[gzz0->sgfn],
cg->fgfs[Axx0->sgfn], cg->fgfs[Axy0->sgfn], cg->fgfs[Axz0->sgfn],
cg->fgfs[Axx0->sgfn], cg->fgfs[Axy0->sgfn], cg->fgfs[Axz0->sgfn],
cg->fgfs[Ayy0->sgfn], cg->fgfs[Ayz0->sgfn], cg->fgfs[Azz0->sgfn]);
#endif
if (f_compute_rhs_bssn_ss(cg->shape, TRK4, cg->X[0], cg->X[1], cg->X[2],
cg->fgfs[fngfs + ShellPatch::gx],
cg->fgfs[fngfs + ShellPatch::gy],
cg->fgfs[fngfs + ShellPatch::gx],
cg->fgfs[fngfs + ShellPatch::gy],
cg->fgfs[fngfs + ShellPatch::gz],
cg->fgfs[fngfs + ShellPatch::drhodx],
cg->fgfs[fngfs + ShellPatch::drhody],
@@ -4200,16 +4190,25 @@ void bssn_class::Step(int lev, int YN)
}
#endif
}
// Non-blocking error reduction overlapped with Sync to hide Allreduce latency
MPI_Request err_req;
// check error information
{
int erh = ERROR;
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &err_req);
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
}
if (ERROR)
{
SH->Dump_Data(StateList, 0, PhysTime, dT_lev);
if (myrank == 0)
{
if (ErrorMonitor->outfile)
ErrorMonitor->outfile << "find NaN in state variables on Shell Patches at t = "
<< PhysTime << endl;
MPI_Abort(MPI_COMM_WORLD, 1);
}
}
#endif
Parallel::AsyncSyncState async_pre;
Parallel::Sync_start(GH->PatL[lev], SynchList_pre, Symmetry, sync_cache_pre[lev], async_pre);
Parallel::Sync(GH->PatL[lev], SynchList_pre, Symmetry);
#ifdef WithShell
if (lev == 0)
@@ -4222,27 +4221,9 @@ void bssn_class::Step(int lev, int YN)
{
prev_clock = curr_clock;
curr_clock = clock();
cout << " Shell stuff synchronization used "
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
<< " seconds! " << endl;
}
}
#endif
Parallel::Sync_finish(sync_cache_pre[lev], async_pre, SynchList_pre, Symmetry);
#ifdef WithShell
// Complete non-blocking error reduction and check
MPI_Wait(&err_req, MPI_STATUS_IGNORE);
if (ERROR)
{
Parallel::Dump_Data(GH->PatL[lev], StateList, 0, PhysTime, dT_lev);
SH->Dump_Data(StateList, 0, PhysTime, dT_lev);
if (myrank == 0)
{
if (ErrorMonitor->outfile)
ErrorMonitor->outfile << "find NaN in state variables at t = " << PhysTime
<< ", lev = " << lev << endl;
MPI_Abort(MPI_COMM_WORLD, 1);
cout << " Shell stuff synchronization used "
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
<< " seconds! " << endl;
}
}
#endif
@@ -4405,7 +4386,23 @@ void bssn_class::Step(int lev, int YN)
Pp = Pp->next;
}
// NOTE: error check deferred to after Shell Patch computation to reduce MPI_Allreduce calls
// check error information
{
int erh = ERROR;
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
}
if (ERROR)
{
Parallel::Dump_Data(GH->PatL[lev], SynchList_pre, 0, PhysTime, dT_lev);
if (myrank == 0)
{
if (ErrorMonitor->outfile)
ErrorMonitor->outfile << "find NaN in RK4 substep#" << iter_count
<< " variables at t = " << PhysTime
<< ", lev = " << lev << endl;
MPI_Abort(MPI_COMM_WORLD, 1);
}
}
#ifdef WithShell
// evolve Shell Patches
@@ -4423,9 +4420,9 @@ void bssn_class::Step(int lev, int YN)
{
#if (AGM == 0)
f_enforce_ga(cg->shape,
cg->fgfs[gxx->sgfn], cg->fgfs[gxy->sgfn], cg->fgfs[gxz->sgfn],
cg->fgfs[gxx->sgfn], cg->fgfs[gxy->sgfn], cg->fgfs[gxz->sgfn],
cg->fgfs[gyy->sgfn], cg->fgfs[gyz->sgfn], cg->fgfs[gzz->sgfn],
cg->fgfs[Axx->sgfn], cg->fgfs[Axy->sgfn], cg->fgfs[Axz->sgfn],
cg->fgfs[Axx->sgfn], cg->fgfs[Axy->sgfn], cg->fgfs[Axz->sgfn],
cg->fgfs[Ayy->sgfn], cg->fgfs[Ayz->sgfn], cg->fgfs[Azz->sgfn]);
#elif (AGM == 1)
if (iter_count == 3)
@@ -4545,16 +4542,25 @@ void bssn_class::Step(int lev, int YN)
sPp = sPp->next;
}
}
// Non-blocking error reduction overlapped with Sync to hide Allreduce latency
MPI_Request err_req_cor;
// check error information
{
int erh = ERROR;
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &err_req_cor);
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
}
if (ERROR)
{
SH->Dump_Data(SynchList_pre, 0, PhysTime, dT_lev);
if (myrank == 0)
{
if (ErrorMonitor->outfile)
ErrorMonitor->outfile << "find NaN on Shell Patches in RK4 substep#" << iter_count
<< " variables at t = " << PhysTime << endl;
MPI_Abort(MPI_COMM_WORLD, 1);
}
}
#endif
Parallel::AsyncSyncState async_cor;
Parallel::Sync_start(GH->PatL[lev], SynchList_cor, Symmetry, sync_cache_cor[lev], async_cor);
Parallel::Sync(GH->PatL[lev], SynchList_cor, Symmetry);
#ifdef WithShell
if (lev == 0)
@@ -4567,30 +4573,11 @@ void bssn_class::Step(int lev, int YN)
{
prev_clock = curr_clock;
curr_clock = clock();
cout << " Shell stuff synchronization used "
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
cout << " Shell stuff synchronization used "
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
<< " seconds! " << endl;
}
}
#endif
Parallel::Sync_finish(sync_cache_cor[lev], async_cor, SynchList_cor, Symmetry);
#ifdef WithShell
// Complete non-blocking error reduction and check
MPI_Wait(&err_req_cor, MPI_STATUS_IGNORE);
if (ERROR)
{
Parallel::Dump_Data(GH->PatL[lev], SynchList_pre, 0, PhysTime, dT_lev);
SH->Dump_Data(SynchList_pre, 0, PhysTime, dT_lev);
if (myrank == 0)
{
if (ErrorMonitor->outfile)
ErrorMonitor->outfile << "find NaN in RK4 substep#" << iter_count
<< " variables at t = " << PhysTime
<< ", lev = " << lev << endl;
MPI_Abort(MPI_COMM_WORLD, 1);
}
}
#endif
// for black hole position
if (BH_num > 0 && lev == GH->levels - 1)
@@ -4956,19 +4943,11 @@ void bssn_class::Step(int lev, int YN)
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"after Predictor rhs calculation");
// Non-blocking error reduction overlapped with Sync to hide Allreduce latency
MPI_Request err_req;
// check error information
{
int erh = ERROR;
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, GH->Commlev[lev], &err_req);
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, GH->Commlev[lev]);
}
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"before Predictor sync");
Parallel::Sync_cached(GH->PatL[lev], SynchList_pre, Symmetry, sync_cache_pre[lev]);
// Complete non-blocking error reduction and check
MPI_Wait(&err_req, MPI_STATUS_IGNORE);
if (ERROR)
{
Parallel::Dump_Data(GH->PatL[lev], StateList, 0, PhysTime, dT_lev);
@@ -4980,6 +4959,10 @@ void bssn_class::Step(int lev, int YN)
}
}
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"before Predictor sync");
Parallel::Sync(GH->PatL[lev], SynchList_pre, Symmetry);
#if (MAPBH == 0)
// for black hole position
if (BH_num > 0 && lev == GH->levels - 1)
@@ -5157,34 +5140,30 @@ void bssn_class::Step(int lev, int YN)
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"before Corrector error check");
// Non-blocking error reduction overlapped with Sync to hide Allreduce latency
MPI_Request err_req_cor;
// check error information
{
int erh = ERROR;
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, GH->Commlev[lev], &err_req_cor);
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, GH->Commlev[lev]);
}
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"before Corrector sync");
Parallel::Sync_cached(GH->PatL[lev], SynchList_cor, Symmetry, sync_cache_cor[lev]);
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"after Corrector sync");
// Complete non-blocking error reduction and check
MPI_Wait(&err_req_cor, MPI_STATUS_IGNORE);
if (ERROR)
{
Parallel::Dump_Data(GH->PatL[lev], SynchList_pre, 0, PhysTime, dT_lev);
if (myrank == 0)
{
if (ErrorMonitor->outfile)
ErrorMonitor->outfile << "find NaN in RK4 substep#" << iter_count
<< " variables at t = " << PhysTime
ErrorMonitor->outfile << "find NaN in RK4 substep#" << iter_count
<< " variables at t = " << PhysTime
<< ", lev = " << lev << endl;
MPI_Abort(MPI_COMM_WORLD, 1);
}
}
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"before Corrector sync");
Parallel::Sync(GH->PatL[lev], SynchList_cor, Symmetry);
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"after Corrector sync");
#if (MAPBH == 0)
// for black hole position
if (BH_num > 0 && lev == GH->levels - 1)
@@ -5468,11 +5447,21 @@ void bssn_class::SHStep()
#if (PSTR == 1 || PSTR == 2)
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"before Predictor's error check");
#endif
// Non-blocking error reduction overlapped with Synch to hide Allreduce latency
MPI_Request err_req;
// check error information
{
int erh = ERROR;
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &err_req);
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
}
if (ERROR)
{
SH->Dump_Data(StateList, 0, PhysTime, dT_lev);
if (myrank == 0)
{
if (ErrorMonitor->outfile)
ErrorMonitor->outfile << "find NaN in state variables on Shell Patches at t = " << PhysTime << endl;
MPI_Abort(MPI_COMM_WORLD, 1);
}
}
{
@@ -5484,25 +5473,12 @@ void bssn_class::SHStep()
{
prev_clock = curr_clock;
curr_clock = clock();
cout << " Shell stuff synchronization used "
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
cout << " Shell stuff synchronization used "
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
<< " seconds! " << endl;
}
}
// Complete non-blocking error reduction and check
MPI_Wait(&err_req, MPI_STATUS_IGNORE);
if (ERROR)
{
SH->Dump_Data(StateList, 0, PhysTime, dT_lev);
if (myrank == 0)
{
if (ErrorMonitor->outfile)
ErrorMonitor->outfile << "find NaN in state variables on Shell Patches at t = " << PhysTime << endl;
MPI_Abort(MPI_COMM_WORLD, 1);
}
}
// corrector
for (iter_count = 1; iter_count < 4; iter_count++)
{
@@ -5645,11 +5621,21 @@ void bssn_class::SHStep()
sPp = sPp->next;
}
}
// Non-blocking error reduction overlapped with Synch to hide Allreduce latency
MPI_Request err_req_cor;
// check error information
{
int erh = ERROR;
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &err_req_cor);
MPI_Allreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
}
if (ERROR)
{
SH->Dump_Data(SynchList_pre, 0, PhysTime, dT_lev);
if (myrank == 0)
{
if (ErrorMonitor->outfile)
ErrorMonitor->outfile << "find NaN on Shell Patches in RK4 substep#" << iter_count
<< " variables at t = " << PhysTime << endl;
MPI_Abort(MPI_COMM_WORLD, 1);
}
}
{
@@ -5661,26 +5647,12 @@ void bssn_class::SHStep()
{
prev_clock = curr_clock;
curr_clock = clock();
cout << " Shell stuff synchronization used "
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
cout << " Shell stuff synchronization used "
<< (double)(curr_clock - prev_clock) / ((double)CLOCKS_PER_SEC)
<< " seconds! " << endl;
}
}
// Complete non-blocking error reduction and check
MPI_Wait(&err_req_cor, MPI_STATUS_IGNORE);
if (ERROR)
{
SH->Dump_Data(SynchList_pre, 0, PhysTime, dT_lev);
if (myrank == 0)
{
if (ErrorMonitor->outfile)
ErrorMonitor->outfile << "find NaN on Shell Patches in RK4 substep#" << iter_count
<< " variables at t = " << PhysTime << endl;
MPI_Abort(MPI_COMM_WORLD, 1);
}
}
sPp = SH->PatL;
while (sPp)
{
@@ -5809,7 +5781,7 @@ void bssn_class::RestrictProlong(int lev, int YN, bool BB,
// misc::tillherecheck(GH->Commlev[GH->mylev],GH->start_rank[GH->mylev],a_stream.str());
#endif
Parallel::Sync_cached(GH->PatL[lev - 1], SynchList_pre, Symmetry, sync_cache_rp_coarse[lev]);
Parallel::Sync(GH->PatL[lev - 1], SynchList_pre, Symmetry);
#if (PSTR == 1 || PSTR == 2)
// a_stream.clear();
@@ -5870,7 +5842,7 @@ void bssn_class::RestrictProlong(int lev, int YN, bool BB,
// misc::tillherecheck(GH->Commlev[GH->mylev],GH->start_rank[GH->mylev],a_stream.str());
#endif
Parallel::Sync_cached(GH->PatL[lev - 1], SL, Symmetry, sync_cache_rp_coarse[lev]);
Parallel::Sync(GH->PatL[lev - 1], SL, Symmetry);
#if (PSTR == 1 || PSTR == 2)
// a_stream.clear();
@@ -5908,7 +5880,7 @@ void bssn_class::RestrictProlong(int lev, int YN, bool BB,
#endif
}
Parallel::Sync_cached(GH->PatL[lev], SL, Symmetry, sync_cache_rp_fine[lev]);
Parallel::Sync(GH->PatL[lev], SL, Symmetry);
#if (PSTR == 1 || PSTR == 2)
// a_stream.clear();
@@ -5966,7 +5938,7 @@ void bssn_class::RestrictProlong_aux(int lev, int YN, bool BB,
Parallel::Restrict_bam(GH->PatL[lev - 1], GH->PatL[lev], SL, SynchList_pre, GH->rsul[lev], Symmetry);
#endif
Parallel::Sync_cached(GH->PatL[lev - 1], SynchList_pre, Symmetry, sync_cache_rp_coarse[lev]);
Parallel::Sync(GH->PatL[lev - 1], SynchList_pre, Symmetry);
#if (RPB == 0)
Ppc = GH->PatL[lev - 1];
@@ -5998,7 +5970,7 @@ void bssn_class::RestrictProlong_aux(int lev, int YN, bool BB,
Parallel::Restrict_bam(GH->PatL[lev - 1], GH->PatL[lev], SL, SL, GH->rsul[lev], Symmetry);
#endif
Parallel::Sync_cached(GH->PatL[lev - 1], SL, Symmetry, sync_cache_rp_coarse[lev]);
Parallel::Sync(GH->PatL[lev - 1], SL, Symmetry);
#if (RPB == 0)
Ppc = GH->PatL[lev - 1];
@@ -6022,7 +5994,7 @@ void bssn_class::RestrictProlong_aux(int lev, int YN, bool BB,
#endif
}
Parallel::Sync_cached(GH->PatL[lev], SL, Symmetry, sync_cache_rp_fine[lev]);
Parallel::Sync(GH->PatL[lev], SL, Symmetry);
}
}
@@ -6073,7 +6045,7 @@ void bssn_class::RestrictProlong(int lev, int YN, bool BB)
Parallel::Restrict_bam(GH->PatL[lev - 1], GH->PatL[lev], SynchList_cor, SynchList_pre, GH->rsul[lev], Symmetry);
#endif
Parallel::Sync_cached(GH->PatL[lev - 1], SynchList_pre, Symmetry, sync_cache_rp_coarse[lev]);
Parallel::Sync(GH->PatL[lev - 1], SynchList_pre, Symmetry);
#if (RPB == 0)
Ppc = GH->PatL[lev - 1];
@@ -6107,7 +6079,7 @@ void bssn_class::RestrictProlong(int lev, int YN, bool BB)
Parallel::Restrict_bam(GH->PatL[lev - 1], GH->PatL[lev], SynchList_cor, StateList, GH->rsul[lev], Symmetry);
#endif
Parallel::Sync_cached(GH->PatL[lev - 1], StateList, Symmetry, sync_cache_rp_coarse[lev]);
Parallel::Sync(GH->PatL[lev - 1], StateList, Symmetry);
#if (RPB == 0)
Ppc = GH->PatL[lev - 1];
@@ -6131,7 +6103,7 @@ void bssn_class::RestrictProlong(int lev, int YN, bool BB)
#endif
}
Parallel::Sync_cached(GH->PatL[lev], SynchList_cor, Symmetry, sync_cache_rp_fine[lev]);
Parallel::Sync(GH->PatL[lev], SynchList_cor, Symmetry);
}
}
@@ -6214,10 +6186,10 @@ void bssn_class::ProlongRestrict(int lev, int YN, bool BB)
#else
Parallel::Restrict_after(GH->PatL[lev - 1], GH->PatL[lev], SynchList_cor, StateList, Symmetry);
#endif
Parallel::Sync_cached(GH->PatL[lev - 1], StateList, Symmetry, sync_cache_rp_coarse[lev]);
Parallel::Sync(GH->PatL[lev - 1], StateList, Symmetry);
}
Parallel::Sync_cached(GH->PatL[lev], SynchList_cor, Symmetry, sync_cache_rp_fine[lev]);
Parallel::Sync(GH->PatL[lev], SynchList_cor, Symmetry);
}
}
#undef MIXOUTB

View File

@@ -19,11 +19,7 @@ using namespace std;
#include <math.h>
#endif
#ifdef MPI_STUB
#include "mpi_stub.h"
#else
#include <mpi.h>
#endif
#include "macrodef.h"
#include "cgh.h"
@@ -130,11 +126,6 @@ public:
MyList<var> *OldStateList, *DumpList;
MyList<var> *ConstraintList;
Parallel::SyncCache *sync_cache_pre; // per-level cache for predictor sync
Parallel::SyncCache *sync_cache_cor; // per-level cache for corrector sync
Parallel::SyncCache *sync_cache_rp_coarse; // RestrictProlong sync on PatL[lev-1]
Parallel::SyncCache *sync_cache_rp_fine; // RestrictProlong sync on PatL[lev]
monitor *ErrorMonitor, *Psi4Monitor, *BHMonitor, *MAPMonitor;
monitor *ConVMonitor;
surface_integral *Waveshell;

View File

@@ -19,11 +19,7 @@ using namespace std;
#include <math.h>
#endif
#ifdef MPI_STUB
#include "mpi_stub.h"
#else
#include <mpi.h>
#endif
#include "macrodef.h"
#include "cgh.h"

View File

@@ -83,6 +83,10 @@
real*8, dimension(ex(1),ex(2),ex(3)) :: gupxx,gupxy,gupxz
real*8, dimension(ex(1),ex(2),ex(3)) :: gupyy,gupyz,gupzz
! shared work arrays (memory pool) to avoid repeated allocation in subroutines
real*8, dimension(-1:ex(1),-1:ex(2),-1:ex(3)) :: fh_work2 ! for fderivs_fh/fdderivs_fh (ghost_width=2)
real*8, dimension(-2:ex(1),-2:ex(2),-2:ex(3)) :: fh_work3 ! for kodis_fh/lopsided_fh (ghost_width=3)
real*8,dimension(3) ::SSS,AAS,ASA,SAA,ASS,SAS,SSA
real*8 :: dX, dY, dZ, PI
real*8, parameter :: ZEO = 0.d0,ONE = 1.D0, TWO = 2.D0, FOUR = 4.D0
@@ -151,22 +155,22 @@
gyy = dyy + ONE
gzz = dzz + ONE
call fderivs(ex,betax,betaxx,betaxy,betaxz,X,Y,Z,ANTI, SYM, SYM,Symmetry,Lev)
call fderivs(ex,betay,betayx,betayy,betayz,X,Y,Z, SYM,ANTI, SYM,Symmetry,Lev)
call fderivs(ex,betaz,betazx,betazy,betazz,X,Y,Z, SYM, SYM,ANTI,Symmetry,Lev)
call fderivs_fh(ex,betax,betaxx,betaxy,betaxz,X,Y,Z,ANTI, SYM, SYM,Symmetry,Lev,fh_work2)
call fderivs_fh(ex,betay,betayx,betayy,betayz,X,Y,Z, SYM,ANTI, SYM,Symmetry,Lev,fh_work2)
call fderivs_fh(ex,betaz,betazx,betazy,betazz,X,Y,Z, SYM, SYM,ANTI,Symmetry,Lev,fh_work2)
div_beta = betaxx + betayy + betazz
call fderivs(ex,chi,chix,chiy,chiz,X,Y,Z,SYM,SYM,SYM,symmetry,Lev)
call fderivs_fh(ex,chi,chix,chiy,chiz,X,Y,Z,SYM,SYM,SYM,symmetry,Lev,fh_work2)
chi_rhs = F2o3 *chin1*( alpn1 * trK - div_beta ) !rhs for chi
call fderivs(ex,dxx,gxxx,gxxy,gxxz,X,Y,Z,SYM ,SYM ,SYM ,Symmetry,Lev)
call fderivs(ex,gxy,gxyx,gxyy,gxyz,X,Y,Z,ANTI,ANTI,SYM ,Symmetry,Lev)
call fderivs(ex,gxz,gxzx,gxzy,gxzz,X,Y,Z,ANTI,SYM ,ANTI,Symmetry,Lev)
call fderivs(ex,dyy,gyyx,gyyy,gyyz,X,Y,Z,SYM ,SYM ,SYM ,Symmetry,Lev)
call fderivs(ex,gyz,gyzx,gyzy,gyzz,X,Y,Z,SYM ,ANTI,ANTI,Symmetry,Lev)
call fderivs(ex,dzz,gzzx,gzzy,gzzz,X,Y,Z,SYM ,SYM ,SYM ,Symmetry,Lev)
call fderivs_fh(ex,dxx,gxxx,gxxy,gxxz,X,Y,Z,SYM ,SYM ,SYM ,Symmetry,Lev,fh_work2)
call fderivs_fh(ex,gxy,gxyx,gxyy,gxyz,X,Y,Z,ANTI,ANTI,SYM ,Symmetry,Lev,fh_work2)
call fderivs_fh(ex,gxz,gxzx,gxzy,gxzz,X,Y,Z,ANTI,SYM ,ANTI,Symmetry,Lev,fh_work2)
call fderivs_fh(ex,dyy,gyyx,gyyy,gyyz,X,Y,Z,SYM ,SYM ,SYM ,Symmetry,Lev,fh_work2)
call fderivs_fh(ex,gyz,gyzx,gyzy,gyzz,X,Y,Z,SYM ,ANTI,ANTI,Symmetry,Lev,fh_work2)
call fderivs_fh(ex,dzz,gzzx,gzzy,gzzz,X,Y,Z,SYM ,SYM ,SYM ,Symmetry,Lev,fh_work2)
gxx_rhs = - TWO * alpn1 * Axx - F2o3 * gxx * div_beta + &
TWO *( gxx * betaxx + gxy * betayx + gxz * betazx)
@@ -284,8 +288,8 @@
(gupyy * gupzz + gupyz * gupyz)* Ayz
! Right hand side for Gam^i without shift terms...
call fderivs(ex,Lap,Lapx,Lapy,Lapz,X,Y,Z,SYM,SYM,SYM,Symmetry,Lev)
call fderivs(ex,trK,Kx,Ky,Kz,X,Y,Z,SYM,SYM,SYM,symmetry,Lev)
call fderivs_fh(ex,Lap,Lapx,Lapy,Lapz,X,Y,Z,SYM,SYM,SYM,Symmetry,Lev,fh_work2)
call fderivs_fh(ex,trK,Kx,Ky,Kz,X,Y,Z,SYM,SYM,SYM,symmetry,Lev,fh_work2)
Gamx_rhs = - TWO * ( Lapx * Rxx + Lapy * Rxy + Lapz * Rxz ) + &
TWO * alpn1 * ( &
@@ -314,12 +318,12 @@
Gamzxx * Rxx + Gamzyy * Ryy + Gamzzz * Rzz + &
TWO * ( Gamzxy * Rxy + Gamzxz * Rxz + Gamzyz * Ryz ) )
call fdderivs(ex,betax,gxxx,gxyx,gxzx,gyyx,gyzx,gzzx,&
X,Y,Z,ANTI,SYM, SYM ,Symmetry,Lev)
call fdderivs(ex,betay,gxxy,gxyy,gxzy,gyyy,gyzy,gzzy,&
X,Y,Z,SYM ,ANTI,SYM ,Symmetry,Lev)
call fdderivs(ex,betaz,gxxz,gxyz,gxzz,gyyz,gyzz,gzzz,&
X,Y,Z,SYM ,SYM, ANTI,Symmetry,Lev)
call fdderivs_fh(ex,betax,gxxx,gxyx,gxzx,gyyx,gyzx,gzzx,&
X,Y,Z,ANTI,SYM, SYM ,Symmetry,Lev,fh_work2)
call fdderivs_fh(ex,betay,gxxy,gxyy,gxzy,gyyy,gyzy,gzzy,&
X,Y,Z,SYM ,ANTI,SYM ,Symmetry,Lev,fh_work2)
call fdderivs_fh(ex,betaz,gxxz,gxyz,gxzz,gyyz,gyzz,gzzz,&
X,Y,Z,SYM ,SYM, ANTI,Symmetry,Lev,fh_work2)
fxx = gxxx + gxyy + gxzz
fxy = gxyx + gyyy + gyzz
@@ -332,9 +336,9 @@
Gamza = gupxx * Gamzxx + gupyy * Gamzyy + gupzz * Gamzzz + &
TWO*( gupxy * Gamzxy + gupxz * Gamzxz + gupyz * Gamzyz )
call fderivs(ex,Gamx,Gamxx,Gamxy,Gamxz,X,Y,Z,ANTI,SYM ,SYM ,Symmetry,Lev)
call fderivs(ex,Gamy,Gamyx,Gamyy,Gamyz,X,Y,Z,SYM ,ANTI,SYM ,Symmetry,Lev)
call fderivs(ex,Gamz,Gamzx,Gamzy,Gamzz,X,Y,Z,SYM ,SYM ,ANTI,Symmetry,Lev)
call fderivs_fh(ex,Gamx,Gamxx,Gamxy,Gamxz,X,Y,Z,ANTI,SYM ,SYM ,Symmetry,Lev,fh_work2)
call fderivs_fh(ex,Gamy,Gamyx,Gamyy,Gamyz,X,Y,Z,SYM ,ANTI,SYM ,Symmetry,Lev,fh_work2)
call fderivs_fh(ex,Gamz,Gamzx,Gamzy,Gamzz,X,Y,Z,SYM ,SYM ,ANTI,Symmetry,Lev,fh_work2)
Gamx_rhs = Gamx_rhs + F2o3 * Gamxa * div_beta - &
Gamxa * betaxx - Gamya * betaxy - Gamza * betaxz + &
@@ -377,27 +381,27 @@
gzzz = gxz * Gamxzz + gyz * Gamyzz + gzz * Gamzzz
!compute Ricci tensor for tilted metric
call fdderivs(ex,dxx,fxx,fxy,fxz,fyy,fyz,fzz,X,Y,Z,SYM ,SYM ,SYM ,symmetry,Lev)
call fdderivs_fh(ex,dxx,fxx,fxy,fxz,fyy,fyz,fzz,X,Y,Z,SYM ,SYM ,SYM ,symmetry,Lev,fh_work2)
Rxx = gupxx * fxx + gupyy * fyy + gupzz * fzz + &
( gupxy * fxy + gupxz * fxz + gupyz * fyz ) * TWO
call fdderivs(ex,dyy,fxx,fxy,fxz,fyy,fyz,fzz,X,Y,Z,SYM ,SYM ,SYM ,symmetry,Lev)
call fdderivs_fh(ex,dyy,fxx,fxy,fxz,fyy,fyz,fzz,X,Y,Z,SYM ,SYM ,SYM ,symmetry,Lev,fh_work2)
Ryy = gupxx * fxx + gupyy * fyy + gupzz * fzz + &
( gupxy * fxy + gupxz * fxz + gupyz * fyz ) * TWO
call fdderivs(ex,dzz,fxx,fxy,fxz,fyy,fyz,fzz,X,Y,Z,SYM ,SYM ,SYM ,symmetry,Lev)
call fdderivs_fh(ex,dzz,fxx,fxy,fxz,fyy,fyz,fzz,X,Y,Z,SYM ,SYM ,SYM ,symmetry,Lev,fh_work2)
Rzz = gupxx * fxx + gupyy * fyy + gupzz * fzz + &
( gupxy * fxy + gupxz * fxz + gupyz * fyz ) * TWO
call fdderivs(ex,gxy,fxx,fxy,fxz,fyy,fyz,fzz,X,Y,Z,ANTI, ANTI,SYM ,symmetry,Lev)
call fdderivs_fh(ex,gxy,fxx,fxy,fxz,fyy,fyz,fzz,X,Y,Z,ANTI, ANTI,SYM ,symmetry,Lev,fh_work2)
Rxy = gupxx * fxx + gupyy * fyy + gupzz * fzz + &
( gupxy * fxy + gupxz * fxz + gupyz * fyz ) * TWO
call fdderivs(ex,gxz,fxx,fxy,fxz,fyy,fyz,fzz,X,Y,Z,ANTI ,SYM ,ANTI,symmetry,Lev)
call fdderivs_fh(ex,gxz,fxx,fxy,fxz,fyy,fyz,fzz,X,Y,Z,ANTI ,SYM ,ANTI,symmetry,Lev,fh_work2)
Rxz = gupxx * fxx + gupyy * fyy + gupzz * fzz + &
( gupxy * fxy + gupxz * fxz + gupyz * fyz ) * TWO
call fdderivs(ex,gyz,fxx,fxy,fxz,fyy,fyz,fzz,X,Y,Z,SYM ,ANTI ,ANTI,symmetry,Lev)
call fdderivs_fh(ex,gyz,fxx,fxy,fxz,fyy,fyz,fzz,X,Y,Z,SYM ,ANTI ,ANTI,symmetry,Lev,fh_work2)
Ryz = gupxx * fxx + gupyy * fyy + gupzz * fzz + &
( gupxy * fxy + gupxz * fxz + gupyz * fyz ) * TWO
@@ -602,7 +606,7 @@
Gamxzz * gxzy + Gamyzz * gyzy + Gamzzz * gzzy + &
Gamxyz * gzzx + Gamyyz * gzzy + Gamzyz * gzzz )
!covariant second derivative of chi respect to tilted metric
call fdderivs(ex,chi,fxx,fxy,fxz,fyy,fyz,fzz,X,Y,Z,SYM,SYM,SYM,Symmetry,Lev)
call fdderivs_fh(ex,chi,fxx,fxy,fxz,fyy,fyz,fzz,X,Y,Z,SYM,SYM,SYM,Symmetry,Lev,fh_work2)
fxx = fxx - Gamxxx * chix - Gamyxx * chiy - Gamzxx * chiz
fxy = fxy - Gamxxy * chix - Gamyxy * chiy - Gamzxy * chiz
@@ -628,8 +632,8 @@
Ryz = Ryz + (fyz - chiy*chiz/chin1/TWO + gyz * f)/chin1/TWO
! covariant second derivatives of the lapse respect to physical metric
call fdderivs(ex,Lap,fxx,fxy,fxz,fyy,fyz,fzz,X,Y,Z, &
SYM,SYM,SYM,symmetry,Lev)
call fdderivs_fh(ex,Lap,fxx,fxy,fxz,fyy,fyz,fzz,X,Y,Z, &
SYM,SYM,SYM,symmetry,Lev,fh_work2)
gxxx = (gupxx * chix + gupxy * chiy + gupxz * chiz)/chin1
gxxy = (gupxy * chix + gupyy * chiy + gupyz * chiz)/chin1
@@ -812,7 +816,7 @@
betay_rhs = FF*dtSfy
betaz_rhs = FF*dtSfz
call fderivs(ex,chi,dtSfx_rhs,dtSfy_rhs,dtSfz_rhs,X,Y,Z,SYM,SYM,SYM,Symmetry,Lev)
call fderivs_fh(ex,chi,dtSfx_rhs,dtSfy_rhs,dtSfz_rhs,X,Y,Z,SYM,SYM,SYM,Symmetry,Lev,fh_work2)
reta = gupxx * dtSfx_rhs * dtSfx_rhs + gupyy * dtSfy_rhs * dtSfy_rhs + gupzz * dtSfz_rhs * dtSfz_rhs + &
TWO * (gupxy * dtSfx_rhs * dtSfy_rhs + gupxz * dtSfx_rhs * dtSfz_rhs + gupyz * dtSfy_rhs * dtSfz_rhs)
reta = 1.31d0/2*dsqrt(reta/chin1)/(1-dsqrt(chin1))**2
@@ -824,7 +828,7 @@
betay_rhs = FF*dtSfy
betaz_rhs = FF*dtSfz
call fderivs(ex,chi,dtSfx_rhs,dtSfy_rhs,dtSfz_rhs,X,Y,Z,SYM,SYM,SYM,Symmetry,Lev)
call fderivs_fh(ex,chi,dtSfx_rhs,dtSfy_rhs,dtSfz_rhs,X,Y,Z,SYM,SYM,SYM,Symmetry,Lev,fh_work2)
reta = gupxx * dtSfx_rhs * dtSfx_rhs + gupyy * dtSfy_rhs * dtSfy_rhs + gupzz * dtSfz_rhs * dtSfz_rhs + &
TWO * (gupxy * dtSfx_rhs * dtSfy_rhs + gupxz * dtSfx_rhs * dtSfz_rhs + gupyz * dtSfy_rhs * dtSfz_rhs)
reta = 1.31d0/2*dsqrt(reta/chin1)/(1-chin1)**2
@@ -832,7 +836,7 @@
dtSfy_rhs = Gamy_rhs - reta*dtSfy
dtSfz_rhs = Gamz_rhs - reta*dtSfz
#elif (GAUGE == 4)
call fderivs(ex,chi,dtSfx_rhs,dtSfy_rhs,dtSfz_rhs,X,Y,Z,SYM,SYM,SYM,Symmetry,Lev)
call fderivs_fh(ex,chi,dtSfx_rhs,dtSfy_rhs,dtSfz_rhs,X,Y,Z,SYM,SYM,SYM,Symmetry,Lev,fh_work2)
reta = gupxx * dtSfx_rhs * dtSfx_rhs + gupyy * dtSfy_rhs * dtSfy_rhs + gupzz * dtSfz_rhs * dtSfz_rhs + &
TWO * (gupxy * dtSfx_rhs * dtSfy_rhs + gupxz * dtSfx_rhs * dtSfz_rhs + gupyz * dtSfy_rhs * dtSfz_rhs)
reta = 1.31d0/2*dsqrt(reta/chin1)/(1-dsqrt(chin1))**2
@@ -844,7 +848,7 @@
dtSfy_rhs = ZEO
dtSfz_rhs = ZEO
#elif (GAUGE == 5)
call fderivs(ex,chi,dtSfx_rhs,dtSfy_rhs,dtSfz_rhs,X,Y,Z,SYM,SYM,SYM,Symmetry,Lev)
call fderivs_fh(ex,chi,dtSfx_rhs,dtSfy_rhs,dtSfz_rhs,X,Y,Z,SYM,SYM,SYM,Symmetry,Lev,fh_work2)
reta = gupxx * dtSfx_rhs * dtSfx_rhs + gupyy * dtSfy_rhs * dtSfy_rhs + gupzz * dtSfz_rhs * dtSfz_rhs + &
TWO * (gupxy * dtSfx_rhs * dtSfy_rhs + gupxz * dtSfx_rhs * dtSfz_rhs + gupyz * dtSfy_rhs * dtSfz_rhs)
reta = 1.31d0/2*dsqrt(reta/chin1)/(1-chin1)**2
@@ -947,51 +951,51 @@
!!!!!!!!!advection term part
call lopsided(ex,X,Y,Z,gxx,gxx_rhs,betax,betay,betaz,Symmetry,SSS)
call lopsided(ex,X,Y,Z,gxy,gxy_rhs,betax,betay,betaz,Symmetry,AAS)
call lopsided(ex,X,Y,Z,gxz,gxz_rhs,betax,betay,betaz,Symmetry,ASA)
call lopsided(ex,X,Y,Z,gyy,gyy_rhs,betax,betay,betaz,Symmetry,SSS)
call lopsided(ex,X,Y,Z,gyz,gyz_rhs,betax,betay,betaz,Symmetry,SAA)
call lopsided(ex,X,Y,Z,gzz,gzz_rhs,betax,betay,betaz,Symmetry,SSS)
call lopsided_fh(ex,X,Y,Z,gxx,gxx_rhs,betax,betay,betaz,Symmetry,SSS,fh_work3)
call lopsided_fh(ex,X,Y,Z,gxy,gxy_rhs,betax,betay,betaz,Symmetry,AAS,fh_work3)
call lopsided_fh(ex,X,Y,Z,gxz,gxz_rhs,betax,betay,betaz,Symmetry,ASA,fh_work3)
call lopsided_fh(ex,X,Y,Z,gyy,gyy_rhs,betax,betay,betaz,Symmetry,SSS,fh_work3)
call lopsided_fh(ex,X,Y,Z,gyz,gyz_rhs,betax,betay,betaz,Symmetry,SAA,fh_work3)
call lopsided_fh(ex,X,Y,Z,gzz,gzz_rhs,betax,betay,betaz,Symmetry,SSS,fh_work3)
call lopsided(ex,X,Y,Z,Axx,Axx_rhs,betax,betay,betaz,Symmetry,SSS)
call lopsided(ex,X,Y,Z,Axy,Axy_rhs,betax,betay,betaz,Symmetry,AAS)
call lopsided(ex,X,Y,Z,Axz,Axz_rhs,betax,betay,betaz,Symmetry,ASA)
call lopsided(ex,X,Y,Z,Ayy,Ayy_rhs,betax,betay,betaz,Symmetry,SSS)
call lopsided(ex,X,Y,Z,Ayz,Ayz_rhs,betax,betay,betaz,Symmetry,SAA)
call lopsided(ex,X,Y,Z,Azz,Azz_rhs,betax,betay,betaz,Symmetry,SSS)
call lopsided_fh(ex,X,Y,Z,Axx,Axx_rhs,betax,betay,betaz,Symmetry,SSS,fh_work3)
call lopsided_fh(ex,X,Y,Z,Axy,Axy_rhs,betax,betay,betaz,Symmetry,AAS,fh_work3)
call lopsided_fh(ex,X,Y,Z,Axz,Axz_rhs,betax,betay,betaz,Symmetry,ASA,fh_work3)
call lopsided_fh(ex,X,Y,Z,Ayy,Ayy_rhs,betax,betay,betaz,Symmetry,SSS,fh_work3)
call lopsided_fh(ex,X,Y,Z,Ayz,Ayz_rhs,betax,betay,betaz,Symmetry,SAA,fh_work3)
call lopsided_fh(ex,X,Y,Z,Azz,Azz_rhs,betax,betay,betaz,Symmetry,SSS,fh_work3)
call lopsided(ex,X,Y,Z,chi,chi_rhs,betax,betay,betaz,Symmetry,SSS)
call lopsided(ex,X,Y,Z,trK,trK_rhs,betax,betay,betaz,Symmetry,SSS)
call lopsided_fh(ex,X,Y,Z,chi,chi_rhs,betax,betay,betaz,Symmetry,SSS,fh_work3)
call lopsided_fh(ex,X,Y,Z,trK,trK_rhs,betax,betay,betaz,Symmetry,SSS,fh_work3)
call lopsided(ex,X,Y,Z,Gamx,Gamx_rhs,betax,betay,betaz,Symmetry,ASS)
call lopsided(ex,X,Y,Z,Gamy,Gamy_rhs,betax,betay,betaz,Symmetry,SAS)
call lopsided(ex,X,Y,Z,Gamz,Gamz_rhs,betax,betay,betaz,Symmetry,SSA)
call lopsided_fh(ex,X,Y,Z,Gamx,Gamx_rhs,betax,betay,betaz,Symmetry,ASS,fh_work3)
call lopsided_fh(ex,X,Y,Z,Gamy,Gamy_rhs,betax,betay,betaz,Symmetry,SAS,fh_work3)
call lopsided_fh(ex,X,Y,Z,Gamz,Gamz_rhs,betax,betay,betaz,Symmetry,SSA,fh_work3)
!!
call lopsided(ex,X,Y,Z,Lap,Lap_rhs,betax,betay,betaz,Symmetry,SSS)
call lopsided_fh(ex,X,Y,Z,Lap,Lap_rhs,betax,betay,betaz,Symmetry,SSS,fh_work3)
#if (GAUGE == 0 || GAUGE == 1 || GAUGE == 2 || GAUGE == 3 || GAUGE == 4 || GAUGE == 5 || GAUGE == 6 || GAUGE == 7)
call lopsided(ex,X,Y,Z,betax,betax_rhs,betax,betay,betaz,Symmetry,ASS)
call lopsided(ex,X,Y,Z,betay,betay_rhs,betax,betay,betaz,Symmetry,SAS)
call lopsided(ex,X,Y,Z,betaz,betaz_rhs,betax,betay,betaz,Symmetry,SSA)
call lopsided_fh(ex,X,Y,Z,betax,betax_rhs,betax,betay,betaz,Symmetry,ASS,fh_work3)
call lopsided_fh(ex,X,Y,Z,betay,betay_rhs,betax,betay,betaz,Symmetry,SAS,fh_work3)
call lopsided_fh(ex,X,Y,Z,betaz,betaz_rhs,betax,betay,betaz,Symmetry,SSA,fh_work3)
#endif
#if (GAUGE == 0 || GAUGE == 2 || GAUGE == 3 || GAUGE == 6 || GAUGE == 7)
call lopsided(ex,X,Y,Z,dtSfx,dtSfx_rhs,betax,betay,betaz,Symmetry,ASS)
call lopsided(ex,X,Y,Z,dtSfy,dtSfy_rhs,betax,betay,betaz,Symmetry,SAS)
call lopsided(ex,X,Y,Z,dtSfz,dtSfz_rhs,betax,betay,betaz,Symmetry,SSA)
call lopsided_fh(ex,X,Y,Z,dtSfx,dtSfx_rhs,betax,betay,betaz,Symmetry,ASS,fh_work3)
call lopsided_fh(ex,X,Y,Z,dtSfy,dtSfy_rhs,betax,betay,betaz,Symmetry,SAS,fh_work3)
call lopsided_fh(ex,X,Y,Z,dtSfz,dtSfz_rhs,betax,betay,betaz,Symmetry,SSA,fh_work3)
#endif
if(eps>0)then
! usual Kreiss-Oliger dissipation
call kodis(ex,X,Y,Z,chi,chi_rhs,SSS,Symmetry,eps)
call kodis(ex,X,Y,Z,trK,trK_rhs,SSS,Symmetry,eps)
call kodis(ex,X,Y,Z,dxx,gxx_rhs,SSS,Symmetry,eps)
call kodis(ex,X,Y,Z,gxy,gxy_rhs,AAS,Symmetry,eps)
call kodis(ex,X,Y,Z,gxz,gxz_rhs,ASA,Symmetry,eps)
call kodis(ex,X,Y,Z,dyy,gyy_rhs,SSS,Symmetry,eps)
call kodis(ex,X,Y,Z,gyz,gyz_rhs,SAA,Symmetry,eps)
call kodis(ex,X,Y,Z,dzz,gzz_rhs,SSS,Symmetry,eps)
call kodis_fh(ex,X,Y,Z,chi,chi_rhs,SSS,Symmetry,eps,fh_work3)
call kodis_fh(ex,X,Y,Z,trK,trK_rhs,SSS,Symmetry,eps,fh_work3)
call kodis_fh(ex,X,Y,Z,dxx,gxx_rhs,SSS,Symmetry,eps,fh_work3)
call kodis_fh(ex,X,Y,Z,gxy,gxy_rhs,AAS,Symmetry,eps,fh_work3)
call kodis_fh(ex,X,Y,Z,gxz,gxz_rhs,ASA,Symmetry,eps,fh_work3)
call kodis_fh(ex,X,Y,Z,dyy,gyy_rhs,SSS,Symmetry,eps,fh_work3)
call kodis_fh(ex,X,Y,Z,gyz,gyz_rhs,SAA,Symmetry,eps,fh_work3)
call kodis_fh(ex,X,Y,Z,dzz,gzz_rhs,SSS,Symmetry,eps,fh_work3)
#if 0
#define i 42
#define j 40
@@ -1005,7 +1009,7 @@ endif
#undef k
!!stop
#endif
call kodis(ex,X,Y,Z,Axx,Axx_rhs,SSS,Symmetry,eps)
call kodis_fh(ex,X,Y,Z,Axx,Axx_rhs,SSS,Symmetry,eps,fh_work3)
#if 0
#define i 42
#define j 40
@@ -1019,25 +1023,25 @@ endif
#undef k
!!stop
#endif
call kodis(ex,X,Y,Z,Axy,Axy_rhs,AAS,Symmetry,eps)
call kodis(ex,X,Y,Z,Axz,Axz_rhs,ASA,Symmetry,eps)
call kodis(ex,X,Y,Z,Ayy,Ayy_rhs,SSS,Symmetry,eps)
call kodis(ex,X,Y,Z,Ayz,Ayz_rhs,SAA,Symmetry,eps)
call kodis(ex,X,Y,Z,Azz,Azz_rhs,SSS,Symmetry,eps)
call kodis(ex,X,Y,Z,Gamx,Gamx_rhs,ASS,Symmetry,eps)
call kodis(ex,X,Y,Z,Gamy,Gamy_rhs,SAS,Symmetry,eps)
call kodis(ex,X,Y,Z,Gamz,Gamz_rhs,SSA,Symmetry,eps)
call kodis_fh(ex,X,Y,Z,Axy,Axy_rhs,AAS,Symmetry,eps,fh_work3)
call kodis_fh(ex,X,Y,Z,Axz,Axz_rhs,ASA,Symmetry,eps,fh_work3)
call kodis_fh(ex,X,Y,Z,Ayy,Ayy_rhs,SSS,Symmetry,eps,fh_work3)
call kodis_fh(ex,X,Y,Z,Ayz,Ayz_rhs,SAA,Symmetry,eps,fh_work3)
call kodis_fh(ex,X,Y,Z,Azz,Azz_rhs,SSS,Symmetry,eps,fh_work3)
call kodis_fh(ex,X,Y,Z,Gamx,Gamx_rhs,ASS,Symmetry,eps,fh_work3)
call kodis_fh(ex,X,Y,Z,Gamy,Gamy_rhs,SAS,Symmetry,eps,fh_work3)
call kodis_fh(ex,X,Y,Z,Gamz,Gamz_rhs,SSA,Symmetry,eps,fh_work3)
#if 1
!! bam does not apply dissipation on gauge variables
call kodis(ex,X,Y,Z,Lap,Lap_rhs,SSS,Symmetry,eps)
call kodis(ex,X,Y,Z,betax,betax_rhs,ASS,Symmetry,eps)
call kodis(ex,X,Y,Z,betay,betay_rhs,SAS,Symmetry,eps)
call kodis(ex,X,Y,Z,betaz,betaz_rhs,SSA,Symmetry,eps)
call kodis_fh(ex,X,Y,Z,Lap,Lap_rhs,SSS,Symmetry,eps,fh_work3)
call kodis_fh(ex,X,Y,Z,betax,betax_rhs,ASS,Symmetry,eps,fh_work3)
call kodis_fh(ex,X,Y,Z,betay,betay_rhs,SAS,Symmetry,eps,fh_work3)
call kodis_fh(ex,X,Y,Z,betaz,betaz_rhs,SSA,Symmetry,eps,fh_work3)
#if (GAUGE == 0 || GAUGE == 2 || GAUGE == 3 || GAUGE == 6 || GAUGE == 7)
call kodis(ex,X,Y,Z,dtSfx,dtSfx_rhs,ASS,Symmetry,eps)
call kodis(ex,X,Y,Z,dtSfy,dtSfy_rhs,SAS,Symmetry,eps)
call kodis(ex,X,Y,Z,dtSfz,dtSfz_rhs,SSA,Symmetry,eps)
call kodis_fh(ex,X,Y,Z,dtSfx,dtSfx_rhs,ASS,Symmetry,eps,fh_work3)
call kodis_fh(ex,X,Y,Z,dtSfy,dtSfy_rhs,SAS,Symmetry,eps,fh_work3)
call kodis_fh(ex,X,Y,Z,dtSfz,dtSfz_rhs,SSA,Symmetry,eps,fh_work3)
#endif
#endif
@@ -1078,12 +1082,12 @@ endif
! mov_Res_j = gupkj*(-1/chi d_k chi*A_ij + D_k A_ij) - 2/3 d_j trK - 8 PI s_j where D respect to physical metric
! store D_i A_jk - 1/chi d_i chi*A_jk in gjk_i
call fderivs(ex,Axx,gxxx,gxxy,gxxz,X,Y,Z,SYM ,SYM ,SYM ,Symmetry,0)
call fderivs(ex,Axy,gxyx,gxyy,gxyz,X,Y,Z,ANTI,ANTI,SYM ,Symmetry,0)
call fderivs(ex,Axz,gxzx,gxzy,gxzz,X,Y,Z,ANTI,SYM ,ANTI,Symmetry,0)
call fderivs(ex,Ayy,gyyx,gyyy,gyyz,X,Y,Z,SYM ,SYM ,SYM ,Symmetry,0)
call fderivs(ex,Ayz,gyzx,gyzy,gyzz,X,Y,Z,SYM ,ANTI,ANTI,Symmetry,0)
call fderivs(ex,Azz,gzzx,gzzy,gzzz,X,Y,Z,SYM ,SYM ,SYM ,Symmetry,0)
call fderivs_fh(ex,Axx,gxxx,gxxy,gxxz,X,Y,Z,SYM ,SYM ,SYM ,Symmetry,0,fh_work2)
call fderivs_fh(ex,Axy,gxyx,gxyy,gxyz,X,Y,Z,ANTI,ANTI,SYM ,Symmetry,0,fh_work2)
call fderivs_fh(ex,Axz,gxzx,gxzy,gxzz,X,Y,Z,ANTI,SYM ,ANTI,Symmetry,0,fh_work2)
call fderivs_fh(ex,Ayy,gyyx,gyyy,gyyz,X,Y,Z,SYM ,SYM ,SYM ,Symmetry,0,fh_work2)
call fderivs_fh(ex,Ayz,gyzx,gyzy,gyzz,X,Y,Z,SYM ,ANTI,ANTI,Symmetry,0,fh_work2)
call fderivs_fh(ex,Azz,gzzx,gzzy,gzzz,X,Y,Z,SYM ,SYM ,SYM ,Symmetry,0,fh_work2)
gxxx = gxxx - ( Gamxxx * Axx + Gamyxx * Axy + Gamzxx * Axz &
+ Gamxxx * Axx + Gamyxx * Axy + Gamzxx * Axz) - chix*Axx/chin1

View File

@@ -20,11 +20,7 @@ using namespace std;
#include <map.h>
#endif
#ifdef MPI_STUB
#include "mpi_stub.h"
#else
#include <mpi.h>
#endif
#include "macrodef.h"
#include "misc.h"

View File

@@ -2,11 +2,7 @@
#ifndef CGH_H
#define CGH_H
#ifdef MPI_STUB
#include "mpi_stub.h"
#else
#include <mpi.h>
#endif
#include "MyList.h"
#include "MPatch.h"
#include "macrodef.h"

View File

@@ -19,11 +19,7 @@ using namespace std;
#include <time.h>
#include <stdlib.h>
#ifdef MPI_STUB
#include "mpi_stub.h"
#else
#include <mpi.h>
#endif
#include "var.h"
#include "MyList.h"

View File

@@ -69,7 +69,6 @@
fy = ZEO
fz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -152,7 +151,6 @@
fx = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -220,7 +218,6 @@
fy = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -285,7 +282,6 @@
fz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -375,7 +371,6 @@
fxz = ZEO
fyz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -474,7 +469,6 @@
fxx = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -537,7 +531,6 @@
fyy = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -601,7 +594,6 @@
fzz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -665,7 +657,6 @@
fxy = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -728,7 +719,6 @@
fxz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -790,7 +780,6 @@
fyz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -877,7 +866,6 @@
fxz = ZEO
fyz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -1009,7 +997,6 @@
fy = ZEO
fz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -1116,6 +1103,103 @@
end subroutine fderivs
!-----------------------------------------------------------------------------
! fderivs variant: reuses caller-provided fh work array (memory pool)
!-----------------------------------------------------------------------------
subroutine fderivs_fh(ex,f,fx,fy,fz,X,Y,Z,SYM1,SYM2,SYM3, &
symmetry,onoff,fh)
implicit none
integer, intent(in ):: ex(1:3),symmetry,onoff
real*8, dimension(ex(1),ex(2),ex(3)), intent(in ):: f
real*8, dimension(ex(1),ex(2),ex(3)), intent(out):: fx,fy,fz
real*8, intent(in) :: X(ex(1)),Y(ex(2)),Z(ex(3))
real*8, intent(in ):: SYM1,SYM2,SYM3
real*8,dimension(-1:ex(1),-1:ex(2),-1:ex(3)),intent(inout):: fh
real*8 :: dX,dY,dZ
real*8, dimension(3) :: SoA
integer :: imin,jmin,kmin,imax,jmax,kmax,i,j,k
real*8 :: d12dx,d12dy,d12dz,d2dx,d2dy,d2dz
integer, parameter :: NO_SYMM = 0, EQ_SYMM = 1, OCTANT = 2
real*8, parameter :: ZEO=0.d0,ONE=1.d0
real*8, parameter :: TWO=2.d0,EIT=8.d0
real*8, parameter :: F12=1.2d1
dX = X(2)-X(1)
dY = Y(2)-Y(1)
dZ = Z(2)-Z(1)
imax = ex(1)
jmax = ex(2)
kmax = ex(3)
imin = 1
jmin = 1
kmin = 1
if(Symmetry > NO_SYMM .and. dabs(Z(1)) < dZ) kmin = -1
if(Symmetry > EQ_SYMM .and. dabs(X(1)) < dX) imin = -1
if(Symmetry > EQ_SYMM .and. dabs(Y(1)) < dY) jmin = -1
SoA(1) = SYM1
SoA(2) = SYM2
SoA(3) = SYM3
call symmetry_bd(2,ex,f,fh,SoA)
d12dx = ONE/F12/dX
d12dy = ONE/F12/dY
d12dz = ONE/F12/dZ
d2dx = ONE/TWO/dX
d2dy = ONE/TWO/dY
d2dz = ONE/TWO/dZ
fx = ZEO
fy = ZEO
fz = ZEO
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
#if 0
if(i+2 <= imax .and. i-2 >= imin)then
fx(i,j,k)=d12dx*(fh(i-2,j,k)-EIT*fh(i-1,j,k)+EIT*fh(i+1,j,k)-fh(i+2,j,k))
elseif(i+1 <= imax .and. i-1 >= imin)then
fx(i,j,k)=d2dx*(-fh(i-1,j,k)+fh(i+1,j,k))
endif
if(j+2 <= jmax .and. j-2 >= jmin)then
fy(i,j,k)=d12dy*(fh(i,j-2,k)-EIT*fh(i,j-1,k)+EIT*fh(i,j+1,k)-fh(i,j+2,k))
elseif(j+1 <= jmax .and. j-1 >= jmin)then
fy(i,j,k)=d2dy*(-fh(i,j-1,k)+fh(i,j+1,k))
endif
if(k+2 <= kmax .and. k-2 >= kmin)then
fz(i,j,k)=d12dz*(fh(i,j,k-2)-EIT*fh(i,j,k-1)+EIT*fh(i,j,k+1)-fh(i,j,k+2))
elseif(k+1 <= kmax .and. k-1 >= kmin)then
fz(i,j,k)=d2dz*(-fh(i,j,k-1)+fh(i,j,k+1))
endif
#else
if(i+2 <= imax .and. i-2 >= imin .and. &
j+2 <= jmax .and. j-2 >= jmin .and. &
k+2 <= kmax .and. k-2 >= kmin) then
fx(i,j,k)=d12dx*(fh(i-2,j,k)-EIT*fh(i-1,j,k)+EIT*fh(i+1,j,k)-fh(i+2,j,k))
fy(i,j,k)=d12dy*(fh(i,j-2,k)-EIT*fh(i,j-1,k)+EIT*fh(i,j+1,k)-fh(i,j+2,k))
fz(i,j,k)=d12dz*(fh(i,j,k-2)-EIT*fh(i,j,k-1)+EIT*fh(i,j,k+1)-fh(i,j,k+2))
elseif(i+1 <= imax .and. i-1 >= imin .and. &
j+1 <= jmax .and. j-1 >= jmin .and. &
k+1 <= kmax .and. k-1 >= kmin) then
fx(i,j,k)=d2dx*(-fh(i-1,j,k)+fh(i+1,j,k))
fy(i,j,k)=d2dy*(-fh(i,j-1,k)+fh(i,j+1,k))
fz(i,j,k)=d2dz*(-fh(i,j,k-1)+fh(i,j,k+1))
endif
#endif
enddo
enddo
enddo
return
end subroutine fderivs_fh
!-----------------------------------------------------------------------------
!
! single derivatives dx
!
@@ -1164,7 +1248,6 @@
fx = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -1241,7 +1324,6 @@
fy = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -1312,7 +1394,6 @@
fz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -1417,7 +1498,6 @@
fxz = ZEO
fyz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -1593,7 +1673,6 @@
fxx = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -1661,7 +1740,6 @@
fyy = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -1731,7 +1809,6 @@
fzz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -1801,7 +1878,6 @@
fxy = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -1872,7 +1948,6 @@
fxz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -1941,7 +2016,6 @@
fyz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -1963,6 +2037,162 @@
end subroutine fddyz
!-----------------------------------------------------------------------------
! fdderivs variant: reuses caller-provided fh work array (memory pool)
!-----------------------------------------------------------------------------
subroutine fdderivs_fh(ex,f,fxx,fxy,fxz,fyy,fyz,fzz,X,Y,Z, &
SYM1,SYM2,SYM3,symmetry,onoff,fh)
implicit none
integer, intent(in ):: ex(1:3),symmetry,onoff
real*8, dimension(ex(1),ex(2),ex(3)),intent(in ):: f
real*8, dimension(ex(1),ex(2),ex(3)),intent(out):: fxx,fxy,fxz,fyy,fyz,fzz
real*8, intent(in ):: X(ex(1)),Y(ex(2)),Z(ex(3))
real*8, intent(in ):: SYM1,SYM2,SYM3
real*8,dimension(-1:ex(1),-1:ex(2),-1:ex(3)),intent(inout):: fh
real*8 :: dX,dY,dZ
real*8, dimension(3) :: SoA
integer :: imin,jmin,kmin,imax,jmax,kmax,i,j,k
real*8 :: Sdxdx,Sdydy,Sdzdz,Fdxdx,Fdydy,Fdzdz
real*8 :: Sdxdy,Sdxdz,Sdydz,Fdxdy,Fdxdz,Fdydz
integer, parameter :: NO_SYMM = 0, EQ_SYMM = 1, OCTANT = 2
real*8, parameter :: ZEO=0.d0, ONE=1.d0, TWO=2.d0, F1o4=2.5d-1
real*8, parameter :: F8=8.d0, F16=1.6d1, F30=3.d1
real*8, parameter :: F1o12=ONE/1.2d1, F1o144=ONE/1.44d2
dX = X(2)-X(1)
dY = Y(2)-Y(1)
dZ = Z(2)-Z(1)
imax = ex(1)
jmax = ex(2)
kmax = ex(3)
imin = 1
jmin = 1
kmin = 1
if(Symmetry > NO_SYMM .and. dabs(Z(1)) < dZ) kmin = -1
if(Symmetry > EQ_SYMM .and. dabs(X(1)) < dX) imin = -1
if(Symmetry > EQ_SYMM .and. dabs(Y(1)) < dY) jmin = -1
SoA(1) = SYM1
SoA(2) = SYM2
SoA(3) = SYM3
call symmetry_bd(2,ex,f,fh,SoA)
Sdxdx = ONE /( dX * dX )
Sdydy = ONE /( dY * dY )
Sdzdz = ONE /( dZ * dZ )
Fdxdx = F1o12 /( dX * dX )
Fdydy = F1o12 /( dY * dY )
Fdzdz = F1o12 /( dZ * dZ )
Sdxdy = F1o4 /( dX * dY )
Sdxdz = F1o4 /( dX * dZ )
Sdydz = F1o4 /( dY * dZ )
Fdxdy = F1o144 /( dX * dY )
Fdxdz = F1o144 /( dX * dZ )
Fdydz = F1o144 /( dY * dZ )
fxx = ZEO
fyy = ZEO
fzz = ZEO
fxy = ZEO
fxz = ZEO
fyz = ZEO
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
#if 0
if(i+2 <= imax .and. i-2 >= imin)then
fxx(i,j,k) = Fdxdx*(-fh(i-2,j,k)+F16*fh(i-1,j,k)-F30*fh(i,j,k) &
-fh(i+2,j,k)+F16*fh(i+1,j,k) )
elseif(i+1 <= imax .and. i-1 >= imin)then
fxx(i,j,k) = Sdxdx*(fh(i-1,j,k)-TWO*fh(i,j,k)+fh(i+1,j,k))
endif
if(j+2 <= jmax .and. j-2 >= jmin)then
fyy(i,j,k) = Fdydy*(-fh(i,j-2,k)+F16*fh(i,j-1,k)-F30*fh(i,j,k) &
-fh(i,j+2,k)+F16*fh(i,j+1,k) )
elseif(j+1 <= jmax .and. j-1 >= jmin)then
fyy(i,j,k) = Sdydy*(fh(i,j-1,k)-TWO*fh(i,j,k)+fh(i,j+1,k))
endif
if(k+2 <= kmax .and. k-2 >= kmin)then
fzz(i,j,k) = Fdzdz*(-fh(i,j,k-2)+F16*fh(i,j,k-1)-F30*fh(i,j,k) &
-fh(i,j,k+2)+F16*fh(i,j,k+1) )
elseif(k+1 <= kmax .and. k-1 >= kmin)then
fzz(i,j,k) = Sdzdz*(fh(i,j,k-1)-TWO*fh(i,j,k)+fh(i,j,k+1))
endif
if(i+2 <= imax .and. i-2 >= imin .and. j+2 <= jmax .and. j-2 >= jmin)then
fxy(i,j,k) = Fdxdy*( (fh(i-2,j-2,k)-F8*fh(i-1,j-2,k)+F8*fh(i+1,j-2,k)-fh(i+2,j-2,k)) &
-F8 *(fh(i-2,j-1,k)-F8*fh(i-1,j-1,k)+F8*fh(i+1,j-1,k)-fh(i+2,j-1,k)) &
+F8 *(fh(i-2,j+1,k)-F8*fh(i-1,j+1,k)+F8*fh(i+1,j+1,k)-fh(i+2,j+1,k)) &
- (fh(i-2,j+2,k)-F8*fh(i-1,j+2,k)+F8*fh(i+1,j+2,k)-fh(i+2,j+2,k)))
elseif(i+1 <= imax .and. i-1 >= imin .and. j+1 <= jmax .and. j-1 >= jmin)then
fxy(i,j,k) = Sdxdy*(fh(i-1,j-1,k)-fh(i+1,j-1,k)-fh(i-1,j+1,k)+fh(i+1,j+1,k))
endif
if(i+2 <= imax .and. i-2 >= imin .and. k+2 <= kmax .and. k-2 >= kmin)then
fxz(i,j,k) = Fdxdz*( (fh(i-2,j,k-2)-F8*fh(i-1,j,k-2)+F8*fh(i+1,j,k-2)-fh(i+2,j,k-2)) &
-F8 *(fh(i-2,j,k-1)-F8*fh(i-1,j,k-1)+F8*fh(i+1,j,k-1)-fh(i+2,j,k-1)) &
+F8 *(fh(i-2,j,k+1)-F8*fh(i-1,j,k+1)+F8*fh(i+1,j,k+1)-fh(i+2,j,k+1)) &
- (fh(i-2,j,k+2)-F8*fh(i-1,j,k+2)+F8*fh(i+1,j,k+2)-fh(i+2,j,k+2)))
elseif(i+1 <= imax .and. i-1 >= imin .and. k+1 <= kmax .and. k-1 >= kmin)then
fxz(i,j,k) = Sdxdz*(fh(i-1,j,k-1)-fh(i+1,j,k-1)-fh(i-1,j,k+1)+fh(i+1,j,k+1))
endif
if(j+2 <= jmax .and. j-2 >= jmin .and. k+2 <= kmax .and. k-2 >= kmin)then
fyz(i,j,k) = Fdydz*( (fh(i,j-2,k-2)-F8*fh(i,j-1,k-2)+F8*fh(i,j+1,k-2)-fh(i,j+2,k-2)) &
-F8 *(fh(i,j-2,k-1)-F8*fh(i,j-1,k-1)+F8*fh(i,j+1,k-1)-fh(i,j+2,k-1)) &
+F8 *(fh(i,j-2,k+1)-F8*fh(i,j-1,k+1)+F8*fh(i,j+1,k+1)-fh(i,j+2,k+1)) &
- (fh(i,j-2,k+2)-F8*fh(i,j-1,k+2)+F8*fh(i,j+1,k+2)-fh(i,j+2,k+2)))
elseif(j+1 <= jmax .and. j-1 >= jmin .and. k+1 <= kmax .and. k-1 >= kmin)then
fyz(i,j,k) = Sdydz*(fh(i,j-1,k-1)-fh(i,j+1,k-1)-fh(i,j-1,k+1)+fh(i,j+1,k+1))
endif
#else
! for bam comparison
if(i+2 <= imax .and. i-2 >= imin .and. &
j+2 <= jmax .and. j-2 >= jmin .and. &
k+2 <= kmax .and. k-2 >= kmin) then
fxx(i,j,k) = Fdxdx*(-fh(i-2,j,k)+F16*fh(i-1,j,k)-F30*fh(i,j,k) &
-fh(i+2,j,k)+F16*fh(i+1,j,k) )
fyy(i,j,k) = Fdydy*(-fh(i,j-2,k)+F16*fh(i,j-1,k)-F30*fh(i,j,k) &
-fh(i,j+2,k)+F16*fh(i,j+1,k) )
fzz(i,j,k) = Fdzdz*(-fh(i,j,k-2)+F16*fh(i,j,k-1)-F30*fh(i,j,k) &
-fh(i,j,k+2)+F16*fh(i,j,k+1) )
fxy(i,j,k) = Fdxdy*( (fh(i-2,j-2,k)-F8*fh(i-1,j-2,k)+F8*fh(i+1,j-2,k)-fh(i+2,j-2,k)) &
-F8 *(fh(i-2,j-1,k)-F8*fh(i-1,j-1,k)+F8*fh(i+1,j-1,k)-fh(i+2,j-1,k)) &
+F8 *(fh(i-2,j+1,k)-F8*fh(i-1,j+1,k)+F8*fh(i+1,j+1,k)-fh(i+2,j+1,k)) &
- (fh(i-2,j+2,k)-F8*fh(i-1,j+2,k)+F8*fh(i+1,j+2,k)-fh(i+2,j+2,k)))
fxz(i,j,k) = Fdxdz*( (fh(i-2,j,k-2)-F8*fh(i-1,j,k-2)+F8*fh(i+1,j,k-2)-fh(i+2,j,k-2)) &
-F8 *(fh(i-2,j,k-1)-F8*fh(i-1,j,k-1)+F8*fh(i+1,j,k-1)-fh(i+2,j,k-1)) &
+F8 *(fh(i-2,j,k+1)-F8*fh(i-1,j,k+1)+F8*fh(i+1,j,k+1)-fh(i+2,j,k+1)) &
- (fh(i-2,j,k+2)-F8*fh(i-1,j,k+2)+F8*fh(i+1,j,k+2)-fh(i+2,j,k+2)))
fyz(i,j,k) = Fdydz*( (fh(i,j-2,k-2)-F8*fh(i,j-1,k-2)+F8*fh(i,j+1,k-2)-fh(i,j+2,k-2)) &
-F8 *(fh(i,j-2,k-1)-F8*fh(i,j-1,k-1)+F8*fh(i,j+1,k-1)-fh(i,j+2,k-1)) &
+F8 *(fh(i,j-2,k+1)-F8*fh(i,j-1,k+1)+F8*fh(i,j+1,k+1)-fh(i,j+2,k+1)) &
- (fh(i,j-2,k+2)-F8*fh(i,j-1,k+2)+F8*fh(i,j+1,k+2)-fh(i,j+2,k+2)))
elseif(i+1 <= imax .and. i-1 >= imin .and. &
j+1 <= jmax .and. j-1 >= jmin .and. &
k+1 <= kmax .and. k-1 >= kmin) then
fxx(i,j,k) = Sdxdx*(fh(i-1,j,k)-TWO*fh(i,j,k)+fh(i+1,j,k))
fyy(i,j,k) = Sdydy*(fh(i,j-1,k)-TWO*fh(i,j,k)+fh(i,j+1,k))
fzz(i,j,k) = Sdzdz*(fh(i,j,k-1)-TWO*fh(i,j,k)+fh(i,j,k+1))
fxy(i,j,k) = Sdxdy*(fh(i-1,j-1,k)-fh(i+1,j-1,k)-fh(i-1,j+1,k)+fh(i+1,j+1,k))
fxz(i,j,k) = Sdxdz*(fh(i-1,j,k-1)-fh(i+1,j,k-1)-fh(i-1,j,k+1)+fh(i+1,j,k+1))
fyz(i,j,k) = Sdydz*(fh(i,j-1,k-1)-fh(i,j+1,k-1)-fh(i,j-1,k+1)+fh(i,j+1,k+1))
endif
#endif
enddo
enddo
enddo
return
end subroutine fdderivs_fh
#elif (ghost_width == 4)
! sixth order code
@@ -2034,7 +2264,6 @@
fy = ZEO
fz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -2151,7 +2380,6 @@
fx = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -2237,7 +2465,6 @@
fy = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -2314,7 +2541,6 @@
fz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -2433,7 +2659,6 @@
fxz = ZEO
fyz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -2621,7 +2846,6 @@
fxx = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -2694,7 +2918,6 @@
fyy = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -2770,7 +2993,6 @@
fzz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -2846,7 +3068,6 @@
fxy = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -2927,7 +3148,6 @@
fxz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -3006,7 +3226,6 @@
fyz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -3114,7 +3333,6 @@
fy = ZEO
fz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -3251,7 +3469,6 @@
fx = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -3347,7 +3564,6 @@
fy = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -3432,7 +3648,6 @@
fz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -3568,7 +3783,6 @@
fxz = ZEO
fyz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -3841,7 +4055,6 @@
fxx = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -3923,7 +4136,6 @@
fyy = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -4008,7 +4220,6 @@
fzz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -4093,7 +4304,6 @@
fxy = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -4196,7 +4406,6 @@
fxz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
@@ -4297,7 +4506,6 @@
fyz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1

View File

@@ -81,7 +81,6 @@
fy = ZEO
fz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -180,7 +179,6 @@
fx = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -264,7 +262,6 @@
fy = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -345,7 +342,6 @@
fz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -447,7 +443,6 @@
fxz = ZEO
fyz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -558,7 +553,6 @@
fxx = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -633,7 +627,6 @@
fyy = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -709,7 +702,6 @@
fzz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -785,7 +777,6 @@
fxy = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -860,7 +851,6 @@
fxz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -934,7 +924,6 @@
fyz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -1030,7 +1019,6 @@
fy = ZEO
fz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -1146,7 +1134,6 @@
fx = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -1240,7 +1227,6 @@
fy = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -1328,7 +1314,6 @@
fz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -1445,7 +1430,6 @@
fxz = ZEO
fyz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -1596,7 +1580,6 @@
fxx = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -1676,7 +1659,6 @@
fyy = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -1758,7 +1740,6 @@
fzz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -1840,7 +1821,6 @@
fxy = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -1923,7 +1903,6 @@
fxz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -2004,7 +1983,6 @@
fyz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -2109,7 +2087,6 @@
fy = ZEO
fz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -2242,7 +2219,6 @@
fx = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -2345,7 +2321,6 @@
fy = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -2439,7 +2414,6 @@
fz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -2570,7 +2544,6 @@
fxz = ZEO
fyz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -2770,7 +2743,6 @@
fxx = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -2855,7 +2827,6 @@
fyy = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -2943,7 +2914,6 @@
fzz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -3031,7 +3001,6 @@
fxy = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -3124,7 +3093,6 @@
fxz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -3215,7 +3183,6 @@
fyz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -3335,7 +3302,6 @@
fy = ZEO
fz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -3488,7 +3454,6 @@
fx = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -3601,7 +3566,6 @@
fy = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -3703,7 +3667,6 @@
fz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -3851,7 +3814,6 @@
fxz = ZEO
fyz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -4136,7 +4098,6 @@
fxx = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -4230,7 +4191,6 @@
fyy = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -4327,7 +4287,6 @@
fzz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -4424,7 +4383,6 @@
fxy = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -4539,7 +4497,6 @@
fxz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -4652,7 +4609,6 @@
fyz = ZEO
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -4723,7 +4679,6 @@ subroutine fderivs_shc(ex,f,fx,fy,fz,crho,sigma,R,SYM1,SYM2,SYM3,Symmetry,Lev,ss
#if 0
integer :: i,j,k
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -4774,7 +4729,6 @@ subroutine fdderivs_shc(ex,f,fxx,fxy,fxz,fyy,fyz,fzz,crho,sigma,R,SYM1,SYM2,SYM3
#if 0
integer :: i,j,k
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)

View File

@@ -27,7 +27,6 @@
!~~~~~~>
!$omp parallel do collapse(2) private(i,j,k,lgxx,lgyy,lgzz,ldetg,lgupxx,lgupxy,lgupxz,lgupyy,lgupyz,lgupzz,ltrA,lscale)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -105,7 +104,6 @@
!~~~~~~>
!$omp parallel do collapse(2) private(i,j,k,lgxx,lgyy,lgzz,lscale,lgxy,lgxz,lgyz,lgupxx,lgupxy,lgupxz,lgupyy,lgupyz,lgupzz,ltrA)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)

View File

@@ -6,11 +6,7 @@
#include <stdio.h>
#include <assert.h>
#include <math.h>
#ifdef MPI_STUB
#include "mpi_stub.h"
#else
#include <mpi.h>
#endif
#include "util_Table.h"
#include "cctk.h"

View File

@@ -6,11 +6,7 @@
#include <stdio.h>
#include <assert.h>
#include <math.h>
#ifdef MPI_STUB
#include "mpi_stub.h"
#else
#include <mpi.h>
#endif
#include "cctk.h"

View File

@@ -65,8 +65,7 @@ real*8,intent(in) :: eps
! dx^4
! note the sign (-1)^r-1, now r=2
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -160,8 +159,7 @@ integer, parameter :: NO_SYMM=0, OCTANT=2
call symmetry_bd(3,ex,f,fh,SoA)
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -217,6 +215,99 @@ integer, parameter :: NO_SYMM=0, OCTANT=2
end subroutine kodis
!-----------------------------------------------------------------------------
! kodis variant: reuses caller-provided fh work array (memory pool)
!-----------------------------------------------------------------------------
subroutine kodis_fh(ex,X,Y,Z,f,f_rhs,SoA,Symmetry,eps,fh)
implicit none
! argument variables
integer,intent(in) :: Symmetry
integer,dimension(3),intent(in)::ex
real*8, dimension(1:3), intent(in) :: SoA
double precision,intent(in),dimension(ex(1))::X
double precision,intent(in),dimension(ex(2))::Y
double precision,intent(in),dimension(ex(3))::Z
double precision,intent(in),dimension(ex(1),ex(2),ex(3))::f
double precision,intent(inout),dimension(ex(1),ex(2),ex(3))::f_rhs
real*8,intent(in) :: eps
real*8,dimension(-2:ex(1),-2:ex(2),-2:ex(3)),intent(inout):: fh
! local variables
integer :: imin,jmin,kmin,imax,jmax,kmax
integer :: i,j,k
real*8 :: dX,dY,dZ
real*8, parameter :: ONE=1.d0,SIX=6.d0,FIT=1.5d1,TWT=2.d1
real*8,parameter::cof=6.4d1 ! 2^6
integer, parameter :: NO_SYMM=0, OCTANT=2
dX = X(2)-X(1)
dY = Y(2)-Y(1)
dZ = Z(2)-Z(1)
imax = ex(1)
jmax = ex(2)
kmax = ex(3)
imin = 1
jmin = 1
kmin = 1
if(Symmetry > NO_SYMM .and. dabs(Z(1)) < dZ) kmin = -2
if(Symmetry == OCTANT .and. dabs(X(1)) < dX) imin = -2
if(Symmetry == OCTANT .and. dabs(Y(1)) < dY) jmin = -2
call symmetry_bd(3,ex,f,fh,SoA)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
if(i-3 >= imin .and. i+3 <= imax .and. &
j-3 >= jmin .and. j+3 <= jmax .and. &
k-3 >= kmin .and. k+3 <= kmax) then
#if 0
f_rhs(i,j,k) = f_rhs(i,j,k) + eps/dX/cof * ( &
(fh(i-3,j,k)+fh(i+3,j,k)) - &
SIX*(fh(i-2,j,k)+fh(i+2,j,k)) + &
FIT*(fh(i-1,j,k)+fh(i+1,j,k)) - &
TWT* fh(i,j,k) )
f_rhs(i,j,k) = f_rhs(i,j,k) + eps/dY/cof * ( &
(fh(i,j-3,k)+fh(i,j+3,k)) - &
SIX*(fh(i,j-2,k)+fh(i,j+2,k)) + &
FIT*(fh(i,j-1,k)+fh(i,j+1,k)) - &
TWT* fh(i,j,k) )
f_rhs(i,j,k) = f_rhs(i,j,k) + eps/dZ/cof * ( &
(fh(i,j,k-3)+fh(i,j,k+3)) - &
SIX*(fh(i,j,k-2)+fh(i,j,k+2)) + &
FIT*(fh(i,j,k-1)+fh(i,j,k+1)) - &
TWT* fh(i,j,k) )
#else
f_rhs(i,j,k) = f_rhs(i,j,k) + eps/cof *( ( &
(fh(i-3,j,k)+fh(i+3,j,k)) - &
SIX*(fh(i-2,j,k)+fh(i+2,j,k)) + &
FIT*(fh(i-1,j,k)+fh(i+1,j,k)) - &
TWT* fh(i,j,k) )/dX + &
( &
(fh(i,j-3,k)+fh(i,j+3,k)) - &
SIX*(fh(i,j-2,k)+fh(i,j+2,k)) + &
FIT*(fh(i,j-1,k)+fh(i,j+1,k)) - &
TWT* fh(i,j,k) )/dY + &
( &
(fh(i,j,k-3)+fh(i,j,k+3)) - &
SIX*(fh(i,j,k-2)+fh(i,j,k+2)) + &
FIT*(fh(i,j,k-1)+fh(i,j,k+1)) - &
TWT* fh(i,j,k) )/dZ )
#endif
endif
enddo
enddo
enddo
return
end subroutine kodis_fh
#elif (ghost_width == 4)
! sixth order code
!------------------------------------------------------------------------------------------------------------------------------
@@ -275,8 +366,7 @@ real*8,intent(in) :: eps
! dx^8
! note the sign (-1)^r-1, now r=4
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -388,8 +478,7 @@ real*8,intent(in) :: eps
! dx^10
! note the sign (-1)^r-1, now r=5
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)

View File

@@ -80,8 +80,7 @@ real*8,intent(in) :: eps
! dx^4
! note the sign (-1)^r-1, now r=2
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -179,8 +178,7 @@ real*8,intent(in) :: eps
! dx^4
! note the sign (-1)^r-1, now r=2
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -275,8 +273,7 @@ integer, parameter :: NO_SYMM=0, EQ_SYMM=1, OCTANT=2
call symmetry_stbd(2,ex,f,fh,SoA)
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -372,8 +369,7 @@ integer, parameter :: NO_SYMM=0, EQ_SYMM=1, OCTANT=2
call symmetry_stbd(3,ex,f,fh,SoA)
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -514,8 +510,7 @@ integer, parameter :: NO_SYMM=0, EQ_SYMM=1, OCTANT=2
call symmetry_stbd(3,ex,f,fh,SoA)
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -603,8 +598,7 @@ integer, parameter :: NO_SYMM=0, EQ_SYMM=1, OCTANT=2
call symmetry_stbd(3,ex,f,fh,SoA)
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -700,8 +694,7 @@ real*8,intent(in) :: eps
! dx^8
! note the sign (-1)^r-1, now r=4
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -801,8 +794,7 @@ integer, parameter :: NO_SYMM=0, EQ_SYMM=1, OCTANT=2
call symmetry_stbd(4,ex,f,fh,SoA)
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -911,8 +903,7 @@ real*8,intent(in) :: eps
! dx^10
! note the sign (-1)^r-1, now r=5
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)
@@ -1015,8 +1006,7 @@ integer, parameter :: NO_SYMM=0, EQ_SYMM=1, OCTANT=2
call symmetry_stbd(5,ex,f,fh,SoA)
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)
do k=1,ex(3)
do j=1,ex(2)
do i=1,ex(1)

View File

@@ -68,8 +68,7 @@ subroutine lopsided(ex,X,Y,Z,f,f_rhs,Sfx,Sfy,Sfz,Symmetry,SoA)
! upper bound set ex-1 only for efficiency,
! the loop body will set ex 0 also
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
! x direction
@@ -234,8 +233,7 @@ subroutine lopsided(ex,X,Y,Z,f,f_rhs,Sfx,Sfy,Sfz,Symmetry,SoA)
! upper bound set ex-1 only for efficiency,
! the loop body will set ex 0 also
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
#if 0
@@ -489,6 +487,160 @@ subroutine lopsided(ex,X,Y,Z,f,f_rhs,Sfx,Sfy,Sfz,Symmetry,SoA)
end subroutine lopsided
!-----------------------------------------------------------------------------
! lopsided variant: reuses caller-provided fh work array (memory pool)
!-----------------------------------------------------------------------------
subroutine lopsided_fh(ex,X,Y,Z,f,f_rhs,Sfx,Sfy,Sfz,Symmetry,SoA,fh)
implicit none
!~~~~~~> Input parameters:
integer, intent(in) :: ex(1:3),Symmetry
real*8, intent(in) :: X(1:ex(1)),Y(1:ex(2)),Z(1:ex(3))
real*8,dimension(ex(1),ex(2),ex(3)),intent(in) :: f,Sfx,Sfy,Sfz
real*8,dimension(ex(1),ex(2),ex(3)),intent(inout):: f_rhs
real*8,dimension(3),intent(in) ::SoA
real*8,dimension(-2:ex(1),-2:ex(2),-2:ex(3)),intent(inout):: fh
!~~~~~~> local variables:
integer :: imin,jmin,kmin,imax,jmax,kmax,i,j,k
real*8 :: dX,dY,dZ
real*8 :: d12dx,d12dy,d12dz,d2dx,d2dy,d2dz
real*8, parameter :: ZEO=0.d0,ONE=1.d0, F3=3.d0
real*8, parameter :: TWO=2.d0,F6=6.0d0,F18=1.8d1
real*8, parameter :: F12=1.2d1, F10=1.d1,EIT=8.d0
integer, parameter :: NO_SYMM = 0, EQ_SYMM = 1, OCTANT = 2
dX = X(2)-X(1)
dY = Y(2)-Y(1)
dZ = Z(2)-Z(1)
d12dx = ONE/F12/dX
d12dy = ONE/F12/dY
d12dz = ONE/F12/dZ
d2dx = ONE/TWO/dX
d2dy = ONE/TWO/dY
d2dz = ONE/TWO/dZ
imax = ex(1)
jmax = ex(2)
kmax = ex(3)
imin = 1
jmin = 1
kmin = 1
if(Symmetry > NO_SYMM .and. dabs(Z(1)) < dZ) kmin = -2
if(Symmetry > EQ_SYMM .and. dabs(X(1)) < dX) imin = -2
if(Symmetry > EQ_SYMM .and. dabs(Y(1)) < dY) jmin = -2
call symmetry_bd(3,ex,f,fh,SoA)
! upper bound set ex-1 only for efficiency,
! the loop body will set ex 0 also
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
#if 0
!! old code - same as original lopsided
#else
!! new code, 2012dec27, based on bam
! x direction
if(Sfx(i,j,k) > ZEO)then
if(i+3 <= imax)then
f_rhs(i,j,k)=f_rhs(i,j,k)+ &
Sfx(i,j,k)*d12dx*(-F3*fh(i-1,j,k)-F10*fh(i,j,k)+F18*fh(i+1,j,k) &
-F6*fh(i+2,j,k)+ fh(i+3,j,k))
elseif(i+2 <= imax)then
f_rhs(i,j,k)=f_rhs(i,j,k)+ &
Sfx(i,j,k)*d12dx*(fh(i-2,j,k)-EIT*fh(i-1,j,k)+EIT*fh(i+1,j,k)-fh(i+2,j,k))
elseif(i+1 <= imax)then
f_rhs(i,j,k)=f_rhs(i,j,k)- &
Sfx(i,j,k)*d12dx*(-F3*fh(i+1,j,k)-F10*fh(i,j,k)+F18*fh(i-1,j,k) &
-F6*fh(i-2,j,k)+ fh(i-3,j,k))
endif
elseif(Sfx(i,j,k) < ZEO)then
if(i-3 >= imin)then
f_rhs(i,j,k)=f_rhs(i,j,k)- &
Sfx(i,j,k)*d12dx*(-F3*fh(i+1,j,k)-F10*fh(i,j,k)+F18*fh(i-1,j,k) &
-F6*fh(i-2,j,k)+ fh(i-3,j,k))
elseif(i-2 >= imin)then
f_rhs(i,j,k)=f_rhs(i,j,k)+ &
Sfx(i,j,k)*d12dx*(fh(i-2,j,k)-EIT*fh(i-1,j,k)+EIT*fh(i+1,j,k)-fh(i+2,j,k))
elseif(i-1 >= imin)then
f_rhs(i,j,k)=f_rhs(i,j,k)+ &
Sfx(i,j,k)*d12dx*(-F3*fh(i-1,j,k)-F10*fh(i,j,k)+F18*fh(i+1,j,k) &
-F6*fh(i+2,j,k)+ fh(i+3,j,k))
endif
endif
! y direction
if(Sfy(i,j,k) > ZEO)then
if(j+3 <= jmax)then
f_rhs(i,j,k)=f_rhs(i,j,k)+ &
Sfy(i,j,k)*d12dy*(-F3*fh(i,j-1,k)-F10*fh(i,j,k)+F18*fh(i,j+1,k) &
-F6*fh(i,j+2,k)+ fh(i,j+3,k))
elseif(j+2 <= jmax)then
f_rhs(i,j,k)=f_rhs(i,j,k)+ &
Sfy(i,j,k)*d12dy*(fh(i,j-2,k)-EIT*fh(i,j-1,k)+EIT*fh(i,j+1,k)-fh(i,j+2,k))
elseif(j+1 <= jmax)then
f_rhs(i,j,k)=f_rhs(i,j,k)- &
Sfy(i,j,k)*d12dy*(-F3*fh(i,j+1,k)-F10*fh(i,j,k)+F18*fh(i,j-1,k) &
-F6*fh(i,j-2,k)+ fh(i,j-3,k))
endif
elseif(Sfy(i,j,k) < ZEO)then
if(j-3 >= jmin)then
f_rhs(i,j,k)=f_rhs(i,j,k)- &
Sfy(i,j,k)*d12dy*(-F3*fh(i,j+1,k)-F10*fh(i,j,k)+F18*fh(i,j-1,k) &
-F6*fh(i,j-2,k)+ fh(i,j-3,k))
elseif(j-2 >= jmin)then
f_rhs(i,j,k)=f_rhs(i,j,k)+ &
Sfy(i,j,k)*d12dy*(fh(i,j-2,k)-EIT*fh(i,j-1,k)+EIT*fh(i,j+1,k)-fh(i,j+2,k))
elseif(j-1 >= jmin)then
f_rhs(i,j,k)=f_rhs(i,j,k)+ &
Sfy(i,j,k)*d12dy*(-F3*fh(i,j-1,k)-F10*fh(i,j,k)+F18*fh(i,j+1,k) &
-F6*fh(i,j+2,k)+ fh(i,j+3,k))
endif
endif
! z direction
if(Sfz(i,j,k) > ZEO)then
if(k+3 <= kmax)then
f_rhs(i,j,k)=f_rhs(i,j,k)+ &
Sfz(i,j,k)*d12dz*(-F3*fh(i,j,k-1)-F10*fh(i,j,k)+F18*fh(i,j,k+1) &
-F6*fh(i,j,k+2)+ fh(i,j,k+3))
elseif(k+2 <= kmax)then
f_rhs(i,j,k)=f_rhs(i,j,k)+ &
Sfz(i,j,k)*d12dz*(fh(i,j,k-2)-EIT*fh(i,j,k-1)+EIT*fh(i,j,k+1)-fh(i,j,k+2))
elseif(k+1 <= kmax)then
f_rhs(i,j,k)=f_rhs(i,j,k)- &
Sfz(i,j,k)*d12dz*(-F3*fh(i,j,k+1)-F10*fh(i,j,k)+F18*fh(i,j,k-1) &
-F6*fh(i,j,k-2)+ fh(i,j,k-3))
endif
elseif(Sfz(i,j,k) < ZEO)then
if(k-3 >= kmin)then
f_rhs(i,j,k)=f_rhs(i,j,k)- &
Sfz(i,j,k)*d12dz*(-F3*fh(i,j,k+1)-F10*fh(i,j,k)+F18*fh(i,j,k-1) &
-F6*fh(i,j,k-2)+ fh(i,j,k-3))
elseif(k-2 >= kmin)then
f_rhs(i,j,k)=f_rhs(i,j,k)+ &
Sfz(i,j,k)*d12dz*(fh(i,j,k-2)-EIT*fh(i,j,k-1)+EIT*fh(i,j,k+1)-fh(i,j,k+2))
elseif(k-1 >= kmin)then
f_rhs(i,j,k)=f_rhs(i,j,k)+ &
Sfz(i,j,k)*d12dz*(-F3*fh(i,j,k-1)-F10*fh(i,j,k)+F18*fh(i,j,k+1) &
-F6*fh(i,j,k+2)+ fh(i,j,k+3))
endif
endif
#endif
enddo
enddo
enddo
return
end subroutine lopsided_fh
#elif (ghost_width == 4)
! sixth order code
! Compute advection terms in right hand sides of field equations
@@ -560,8 +712,7 @@ subroutine lopsided(ex,X,Y,Z,f,f_rhs,Sfx,Sfy,Sfz,Symmetry,SoA)
! upper bound set ex-1 only for efficiency,
! the loop body will set ex 0 also
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
! x direction
@@ -777,8 +928,7 @@ subroutine lopsided(ex,X,Y,Z,f,f_rhs,Sfx,Sfy,Sfz,Symmetry,SoA)
! upper bound set ex-1 only for efficiency,
! the loop body will set ex 0 also
!$omp parallel do collapse(2) private(i,j,k)
do k=1,ex(3)-1
do k=1,ex(3)-1
do j=1,ex(2)-1
do i=1,ex(1)-1
! x direction

View File

@@ -16,12 +16,6 @@ include makefile.inc
.cu.o:
$(Cu) $(CUDA_APP_FLAGS) -c $< -o $@ $(CUDA_LIB_PATH)
TwoPunctures.o: TwoPunctures.C
${CXX} $(CXXAPPFLAGS) -qopenmp -c $< -o $@
TwoPunctureABE.o: TwoPunctureABE.C
${CXX} $(CXXAPPFLAGS) -qopenmp -c $< -o $@
# Input files
C++FILES = ABE.o Ansorg.o Block.o misc.o monitor.o Parallel.o MPatch.o var.o\
cgh.o bssn_class.o surface_integral.o ShellPatch.o\
@@ -95,14 +89,14 @@ $(CUDAFILES): bssn_gpu.h gpu_mem.h gpu_rhsSS_mem.h
misc.o : zbesh.o
# projects
ABE: $(C++FILES) $(F90FILES) $(F77FILES) $(AHFDOBJS)
$(CLINKER) $(CXXAPPFLAGS) -qopenmp -o $@ $(C++FILES) $(F90FILES) $(F77FILES) $(AHFDOBJS) $(LDLIBS)
ABE: $(C++FILES) $(F90FILES) $(F77FILES) $(AHFDOBJS)
$(CLINKER) $(CXXAPPFLAGS) -o $@ $(C++FILES) $(F90FILES) $(F77FILES) $(AHFDOBJS) $(LDLIBS)
ABEGPU: $(C++FILES_GPU) $(F90FILES) $(F77FILES) $(AHFDOBJS) $(CUDAFILES)
$(CLINKER) $(CXXAPPFLAGS) -o $@ $(C++FILES_GPU) $(F90FILES) $(F77FILES) $(AHFDOBJS) $(CUDAFILES) $(LDLIBS)
TwoPunctureABE: $(TwoPunctureFILES)
$(CLINKER) $(CXXAPPFLAGS) -qopenmp -o $@ $(TwoPunctureFILES) $(LDLIBS)
$(CLINKER) $(CXXAPPFLAGS) -o $@ $(TwoPunctureFILES) $(LDLIBS)
clean:
rm *.o ABE ABEGPU TwoPunctureABE make.log -f

View File

@@ -6,23 +6,25 @@
## Intel oneAPI version with oneMKL (Optimized for performance)
filein = -I/usr/include/ -I${MKLROOT}/include
## Using OpenMP-threaded MKL for parallel performance
## Using sequential MKL (OpenMP disabled for better single-threaded performance)
## Added -lifcore for Intel Fortran runtime and -limf for Intel math library
LDLIBS = -L${MKLROOT}/lib -lmkl_intel_lp64 -lmkl_intel_thread -lmkl_core -liomp5 -lifcore -limf -lpthread -lm -ldl
LDLIBS = -L${MKLROOT}/lib -lmkl_intel_lp64 -lmkl_sequential -lmkl_core -lifcore -limf -lpthread -lm -ldl
## Aggressive optimization flags + PGO Phase 2 (profile-guided optimization)
## -fprofile-instr-use: use collected profile data to guide optimization decisions
## (branch prediction, basic block layout, inlining, loop unrolling)
PROFDATA = /home/amss/AMSS-NCKU/pgo_profile/default.profdata
CXXAPPFLAGS = -O3 -march=native -fp-model fast=2 -fma -ipo -qopenmp \
-DMPI_STUB -Dfortran3 -Dnewc -I${MKLROOT}/include
f90appflags = -O3 -march=native -fp-model fast=2 -fma -ipo -qopenmp \
## Aggressive optimization flags:
## -O3: Maximum optimization
## -xHost: Optimize for the host CPU architecture (Intel/AMD compatible)
## -fp-model fast=2: Aggressive floating-point optimizations
## -fma: Enable fused multiply-add instructions
## Note: OpenMP has been disabled (-qopenmp removed) due to performance issues
CXXAPPFLAGS = -O3 -xHost -fp-model fast=2 -fma -ipo \
-Dfortran3 -Dnewc -I${MKLROOT}/include
f90appflags = -O3 -xHost -fp-model fast=2 -fma -ipo \
-align array64byte -fpp -I${MKLROOT}/include
f90 = ifx
f77 = ifx
CXX = icpx
CC = icx
CLINKER = icpx
CLINKER = mpiicpx
Cu = nvcc
CUDA_LIB_PATH = -L/usr/lib/cuda/lib64 -I/usr/include -I/usr/lib/cuda/include

View File

@@ -14,11 +14,7 @@ using namespace std;
#include <string.h>
#include <math.h>
#endif
#ifdef MPI_STUB
#include "mpi_stub.h"
#else
#include <mpi.h>
#endif
#include "misc.h"
#include "macrodef.h"

View File

@@ -24,11 +24,7 @@ using namespace std;
#include <complex.h>
#endif
#ifdef MPI_STUB
#include "mpi_stub.h"
#else
#include <mpi.h>
#endif
namespace misc
{

View File

@@ -20,11 +20,7 @@ using namespace std;
#endif
#include <time.h>
#ifdef MPI_STUB
#include "mpi_stub.h"
#else
#include <mpi.h>
#endif
class monitor
{

View File

@@ -1,153 +0,0 @@
#ifndef MPI_STUB_H
#define MPI_STUB_H
/*
* MPI Stub Header — single-process shim for AMSS-NCKU ABE solver.
* Provides all MPI types, constants, and functions used in the codebase
* as no-ops or trivial implementations for nprocs=1, myrank=0.
*/
#include <cstring>
#include <cstdlib>
#include <cstdio>
#include <time.h>
/* ── Types ─────────────────────────────────────────────────────────── */
typedef int MPI_Comm;
typedef int MPI_Datatype;
typedef int MPI_Op;
typedef int MPI_Request;
typedef int MPI_Group;
typedef struct MPI_Status {
int MPI_SOURCE;
int MPI_TAG;
int MPI_ERROR;
} MPI_Status;
/* ── Constants ─────────────────────────────────────────────────────── */
#define MPI_COMM_WORLD 0
#define MPI_INT 1
#define MPI_DOUBLE 2
#define MPI_DOUBLE_PRECISION 2
#define MPI_DOUBLE_INT 3
#define MPI_SUM 1
#define MPI_MAX 2
#define MPI_MAXLOC 3
#define MPI_STATUS_IGNORE ((MPI_Status *)0)
#define MPI_STATUSES_IGNORE ((MPI_Status *)0)
#define MPI_MAX_PROCESSOR_NAME 256
/* ── Helper: sizeof for MPI_Datatype ──────────────────────────────── */
static inline size_t mpi_stub_sizeof(MPI_Datatype type) {
switch (type) {
case MPI_INT: return sizeof(int);
case MPI_DOUBLE: return sizeof(double);
case MPI_DOUBLE_INT: return sizeof(double) + sizeof(int);
default: return 0;
}
}
/* ── Init / Finalize ──────────────────────────────────────────────── */
static inline int MPI_Init(int *, char ***) { return 0; }
static inline int MPI_Finalize() { return 0; }
/* ── Communicator queries ─────────────────────────────────────────── */
static inline int MPI_Comm_rank(MPI_Comm, int *rank) { *rank = 0; return 0; }
static inline int MPI_Comm_size(MPI_Comm, int *size) { *size = 1; return 0; }
static inline int MPI_Comm_split(MPI_Comm comm, int, int, MPI_Comm *newcomm) {
*newcomm = comm;
return 0;
}
static inline int MPI_Comm_free(MPI_Comm *) { return 0; }
/* ── Group operations ─────────────────────────────────────────────── */
static inline int MPI_Comm_group(MPI_Comm, MPI_Group *group) {
*group = 0;
return 0;
}
static inline int MPI_Group_translate_ranks(MPI_Group, int n,
const int *ranks1, MPI_Group, int *ranks2) {
for (int i = 0; i < n; ++i) ranks2[i] = ranks1[i];
return 0;
}
static inline int MPI_Group_free(MPI_Group *) { return 0; }
/* ── Collective operations ────────────────────────────────────────── */
static inline int MPI_Allreduce(const void *sendbuf, void *recvbuf,
int count, MPI_Datatype datatype, MPI_Op, MPI_Comm) {
std::memcpy(recvbuf, sendbuf, count * mpi_stub_sizeof(datatype));
return 0;
}
static inline int MPI_Iallreduce(const void *sendbuf, void *recvbuf,
int count, MPI_Datatype datatype, MPI_Op, MPI_Comm,
MPI_Request *request) {
std::memcpy(recvbuf, sendbuf, count * mpi_stub_sizeof(datatype));
*request = 0;
return 0;
}
static inline int MPI_Bcast(void *, int, MPI_Datatype, int, MPI_Comm) {
return 0;
}
static inline int MPI_Barrier(MPI_Comm) { return 0; }
/* ── Point-to-point (never reached with nprocs=1) ─────────────────── */
static inline int MPI_Send(const void *, int, MPI_Datatype, int, int, MPI_Comm) {
return 0;
}
static inline int MPI_Recv(void *, int, MPI_Datatype, int, int, MPI_Comm, MPI_Status *) {
return 0;
}
static inline int MPI_Isend(const void *, int, MPI_Datatype, int, int, MPI_Comm,
MPI_Request *req) {
*req = 0;
return 0;
}
static inline int MPI_Irecv(void *, int, MPI_Datatype, int, int, MPI_Comm,
MPI_Request *req) {
*req = 0;
return 0;
}
/* ── Completion ───────────────────────────────────────────────────── */
static inline int MPI_Wait(MPI_Request *, MPI_Status *) { return 0; }
static inline int MPI_Waitall(int, MPI_Request *, MPI_Status *) { return 0; }
/* ── Utility ──────────────────────────────────────────────────────── */
static inline int MPI_Abort(MPI_Comm, int error_code) {
std::fprintf(stderr, "MPI_Abort called with error code %d\n", error_code);
std::exit(error_code);
return 0;
}
static inline double MPI_Wtime() {
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return (double)ts.tv_sec + (double)ts.tv_nsec * 1.0e-9;
}
static inline int MPI_Get_processor_name(char *name, int *resultlen) {
const char *stub_name = "localhost";
std::strcpy(name, stub_name);
*resultlen = (int)std::strlen(stub_name);
return 0;
}
#endif /* MPI_STUB_H */

View File

@@ -24,11 +24,7 @@ using namespace std;
#include <map.h>
#endif
#ifdef MPI_STUB
#include "mpi_stub.h"
#else
#include <mpi.h>
#endif
namespace parameters
{

View File

@@ -30,11 +30,7 @@ using namespace std;
#include <sys/time.h>
#include <sys/resource.h>
#ifdef MPI_STUB
#include "mpi_stub.h"
#else
#include <mpi.h>
#endif
/* Real time */
#define TimerSignal SIGALRM

View File

@@ -109,33 +109,23 @@
if( RK4 == 0 ) then
!$omp parallel workshare
f1 = f0 + HLF * dT * f_rhs
!$omp end parallel workshare
elseif(RK4 == 1 ) then
!$omp parallel workshare
f_rhs = f_rhs + TWO * f1
!$omp end parallel workshare
!$omp parallel workshare
f1 = f0 + HLF * dT * f1
!$omp end parallel workshare
elseif(RK4 == 2 ) then
!$omp parallel workshare
f_rhs = f_rhs + TWO * f1
!$omp end parallel workshare
!$omp parallel workshare
f1 = f0 + dT * f1
!$omp end parallel workshare
elseif( RK4 == 3 ) then
!$omp parallel workshare
f1 = f0 +F1o6 * dT *(f1 + f_rhs)
!$omp end parallel workshare
else
@@ -144,7 +134,7 @@
endif
return
return
end subroutine rungekutta4_rout
!-----------------------------------------------------------------------------
@@ -225,19 +215,15 @@
if( RK4 == 0 ) then
!$omp parallel workshare
f1 = f0 + dT * f_rhs
!$omp end parallel workshare
else
!$omp parallel workshare
f1 = f0 + HLF * dT * (f1+f_rhs)
!$omp end parallel workshare
endif
return
return
end subroutine icn_rout
!~~~~~~~~~~~~~~~~~~
@@ -253,10 +239,8 @@
real*8, dimension(ex(1),ex(2),ex(3)),intent(in) ::f_rhs
real*8, dimension(ex(1),ex(2),ex(3)),intent(out) ::f1
!$omp parallel workshare
f1 = f0 + dT * f_rhs
!$omp end parallel workshare
return
return
end subroutine euler_rout

View File

@@ -19,11 +19,7 @@ using namespace std;
#include <math.h>
#endif
#ifdef MPI_STUB
#include "mpi_stub.h"
#else
#include <mpi.h>
#endif
#include "cgh.h"
#include "ShellPatch.h"

View File

@@ -18,11 +18,7 @@ using namespace std;
#include <math.h>
#endif
#ifdef MPI_STUB
#include "mpi_stub.h"
#else
#include <mpi.h>
#endif
#include "misc.h"
#include "microdef.h"

View File

@@ -3,11 +3,7 @@
#include <math.h>
#include <string.h>
#ifdef MPI_STUB
#include "mpi_stub.h"
#else
#include <mpi.h>
#endif
#include "util_Table.h"
#include "cctk.h"

View File

@@ -20,11 +20,7 @@ using namespace std;
#include <math.h>
#include <map.h>
#endif
#ifdef MPI_STUB
#include "mpi_stub.h"
#else
#include <mpi.h>
#endif
#include "misc.h"
#include "cgh.h"
@@ -224,9 +220,16 @@ void surface_integral::surf_Wave(double rex, int lev, cgh *GH, var *Rpsi4, var *
pox[2][n] = rex * nz_g[n];
}
double *shellf;
shellf = new double[n_tot * InList];
GH->PatL[lev]->data->Interp_Points(DG_List, n_tot, pox, shellf, Symmetry);
int mp, Lp, Nmin, Nmax;
mp = n_tot / cpusize;
Lp = n_tot - cpusize * mp;
if (Lp > myrank)
{
Nmin = myrank * mp + myrank;
@@ -238,11 +241,6 @@ void surface_integral::surf_Wave(double rex, int lev, cgh *GH, var *Rpsi4, var *
Nmax = Nmin + mp - 1;
}
double *shellf;
shellf = new double[n_tot * InList];
GH->PatL[lev]->data->Interp_Points(DG_List, n_tot, pox, shellf, Symmetry, Nmin, Nmax);
//|~~~~~> Integrate the dot product of Dphi with the surface normal.
double *RP_out, *IP_out;
@@ -365,17 +363,8 @@ void surface_integral::surf_Wave(double rex, int lev, cgh *GH, var *Rpsi4, var *
}
//|------+ Communicate and sum the results from each processor.
{
double *RPIP_out = new double[2 * NN];
double *RPIP = new double[2 * NN];
memcpy(RPIP_out, RP_out, NN * sizeof(double));
memcpy(RPIP_out + NN, IP_out, NN * sizeof(double));
MPI_Allreduce(RPIP_out, RPIP, 2 * NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
memcpy(RP, RPIP, NN * sizeof(double));
memcpy(IP, RPIP + NN, NN * sizeof(double));
delete[] RPIP_out;
delete[] RPIP;
}
MPI_Allreduce(RP_out, RP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(IP_out, IP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
//|------= Free memory.
@@ -567,17 +556,8 @@ void surface_integral::surf_Wave(double rex, int lev, cgh *GH, var *Rpsi4, var *
}
//|------+ Communicate and sum the results from each processor.
{
double *RPIP_out = new double[2 * NN];
double *RPIP = new double[2 * NN];
memcpy(RPIP_out, RP_out, NN * sizeof(double));
memcpy(RPIP_out + NN, IP_out, NN * sizeof(double));
MPI_Allreduce(RPIP_out, RPIP, 2 * NN, MPI_DOUBLE, MPI_SUM, Comm_here);
memcpy(RP, RPIP, NN * sizeof(double));
memcpy(IP, RPIP + NN, NN * sizeof(double));
delete[] RPIP_out;
delete[] RPIP;
}
MPI_Allreduce(RP_out, RP, NN, MPI_DOUBLE, MPI_SUM, Comm_here);
MPI_Allreduce(IP_out, IP, NN, MPI_DOUBLE, MPI_SUM, Comm_here);
//|------= Free memory.
@@ -755,17 +735,8 @@ void surface_integral::surf_Wave(double rex, int lev, ShellPatch *GH, var *Rpsi4
}
//|------+ Communicate and sum the results from each processor.
{
double *RPIP_out = new double[2 * NN];
double *RPIP = new double[2 * NN];
memcpy(RPIP_out, RP_out, NN * sizeof(double));
memcpy(RPIP_out + NN, IP_out, NN * sizeof(double));
MPI_Allreduce(RPIP_out, RPIP, 2 * NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
memcpy(RP, RPIP, NN * sizeof(double));
memcpy(IP, RPIP + NN, NN * sizeof(double));
delete[] RPIP_out;
delete[] RPIP;
}
MPI_Allreduce(RP_out, RP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(IP_out, IP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
//|------= Free memory.
@@ -1013,17 +984,8 @@ void surface_integral::surf_Wave(double rex, int lev, ShellPatch *GH,
}
//|------+ Communicate and sum the results from each processor.
{
double *RPIP_out = new double[2 * NN];
double *RPIP = new double[2 * NN];
memcpy(RPIP_out, RP_out, NN * sizeof(double));
memcpy(RPIP_out + NN, IP_out, NN * sizeof(double));
MPI_Allreduce(RPIP_out, RPIP, 2 * NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
memcpy(RP, RPIP, NN * sizeof(double));
memcpy(IP, RPIP + NN, NN * sizeof(double));
delete[] RPIP_out;
delete[] RPIP;
}
MPI_Allreduce(RP_out, RP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(IP_out, IP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
//|------= Free memory.
@@ -1457,17 +1419,8 @@ void surface_integral::surf_Wave(double rex, int lev, ShellPatch *GH,
}
//|------+ Communicate and sum the results from each processor.
{
double *RPIP_out = new double[2 * NN];
double *RPIP = new double[2 * NN];
memcpy(RPIP_out, RP_out, NN * sizeof(double));
memcpy(RPIP_out + NN, IP_out, NN * sizeof(double));
MPI_Allreduce(RPIP_out, RPIP, 2 * NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
memcpy(RP, RPIP, NN * sizeof(double));
memcpy(IP, RPIP + NN, NN * sizeof(double));
delete[] RPIP_out;
delete[] RPIP;
}
MPI_Allreduce(RP_out, RP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(IP_out, IP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
//|------= Free memory.
@@ -1901,17 +1854,8 @@ void surface_integral::surf_Wave(double rex, int lev, cgh *GH,
}
//|------+ Communicate and sum the results from each processor.
{
double *RPIP_out = new double[2 * NN];
double *RPIP = new double[2 * NN];
memcpy(RPIP_out, RP_out, NN * sizeof(double));
memcpy(RPIP_out + NN, IP_out, NN * sizeof(double));
MPI_Allreduce(RPIP_out, RPIP, 2 * NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
memcpy(RP, RPIP, NN * sizeof(double));
memcpy(IP, RPIP + NN, NN * sizeof(double));
delete[] RPIP_out;
delete[] RPIP;
}
MPI_Allreduce(RP_out, RP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(IP_out, IP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
//|------= Free memory.
@@ -2096,17 +2040,8 @@ void surface_integral::surf_Wave(double rex, int lev, NullShellPatch2 *GH, var *
}
//|------+ Communicate and sum the results from each processor.
{
double *RPIP_out = new double[2 * NN];
double *RPIP = new double[2 * NN];
memcpy(RPIP_out, RP_out, NN * sizeof(double));
memcpy(RPIP_out + NN, IP_out, NN * sizeof(double));
MPI_Allreduce(RPIP_out, RPIP, 2 * NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
memcpy(RP, RPIP, NN * sizeof(double));
memcpy(IP, RPIP + NN, NN * sizeof(double));
delete[] RPIP_out;
delete[] RPIP;
}
MPI_Allreduce(RP_out, RP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(IP_out, IP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
//|------= Free memory.
@@ -2291,17 +2226,8 @@ void surface_integral::surf_Wave(double rex, int lev, NullShellPatch *GH, var *R
}
//|------+ Communicate and sum the results from each processor.
{
double *RPIP_out = new double[2 * NN];
double *RPIP = new double[2 * NN];
memcpy(RPIP_out, RP_out, NN * sizeof(double));
memcpy(RPIP_out + NN, IP_out, NN * sizeof(double));
MPI_Allreduce(RPIP_out, RPIP, 2 * NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
memcpy(RP, RPIP, NN * sizeof(double));
memcpy(IP, RPIP + NN, NN * sizeof(double));
delete[] RPIP_out;
delete[] RPIP;
}
MPI_Allreduce(RP_out, RP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(IP_out, IP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
//|------= Free memory.
@@ -2388,9 +2314,25 @@ void surface_integral::surf_MassPAng(double rex, int lev, cgh *GH, var *chi, var
pox[2][n] = rex * nz_g[n];
}
double *shellf;
shellf = new double[n_tot * InList];
// we have assumed there is only one box on this level,
// so we do not need loop boxes
GH->PatL[lev]->data->Interp_Points(DG_List, n_tot, pox, shellf, Symmetry);
double Mass_out = 0;
double ang_outx, ang_outy, ang_outz;
double p_outx, p_outy, p_outz;
ang_outx = ang_outy = ang_outz = 0.0;
p_outx = p_outy = p_outz = 0.0;
const double f1o8 = 0.125;
int mp, Lp, Nmin, Nmax;
mp = n_tot / cpusize;
Lp = n_tot - cpusize * mp;
if (Lp > myrank)
{
Nmin = myrank * mp + myrank;
@@ -2402,20 +2344,6 @@ void surface_integral::surf_MassPAng(double rex, int lev, cgh *GH, var *chi, var
Nmax = Nmin + mp - 1;
}
double *shellf;
shellf = new double[n_tot * InList];
// we have assumed there is only one box on this level,
// so we do not need loop boxes
GH->PatL[lev]->data->Interp_Points(DG_List, n_tot, pox, shellf, Symmetry, Nmin, Nmax);
double Mass_out = 0;
double ang_outx, ang_outy, ang_outz;
double p_outx, p_outy, p_outz;
ang_outx = ang_outy = ang_outz = 0.0;
p_outx = p_outy = p_outz = 0.0;
const double f1o8 = 0.125;
double Chi, Psi;
double Gxx, Gxy, Gxz, Gyy, Gyz, Gzz;
double gupxx, gupxy, gupxz, gupyy, gupyz, gupzz;
@@ -2536,13 +2464,15 @@ void surface_integral::surf_MassPAng(double rex, int lev, cgh *GH, var *chi, var
}
}
{
double scalar_out[7] = {Mass_out, ang_outx, ang_outy, ang_outz, p_outx, p_outy, p_outz};
double scalar_in[7];
MPI_Allreduce(scalar_out, scalar_in, 7, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
mass = scalar_in[0]; sx = scalar_in[1]; sy = scalar_in[2]; sz = scalar_in[3];
px = scalar_in[4]; py = scalar_in[5]; pz = scalar_in[6];
}
MPI_Allreduce(&Mass_out, &mass, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(&ang_outx, &sx, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(&ang_outy, &sy, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(&ang_outz, &sz, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(&p_outx, &px, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(&p_outy, &py, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(&p_outz, &pz, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
#ifdef GaussInt
mass = mass * rex * rex * dphi * factor;
@@ -2805,13 +2735,15 @@ void surface_integral::surf_MassPAng(double rex, int lev, cgh *GH, var *chi, var
}
}
{
double scalar_out[7] = {Mass_out, ang_outx, ang_outy, ang_outz, p_outx, p_outy, p_outz};
double scalar_in[7];
MPI_Allreduce(scalar_out, scalar_in, 7, MPI_DOUBLE, MPI_SUM, Comm_here);
mass = scalar_in[0]; sx = scalar_in[1]; sy = scalar_in[2]; sz = scalar_in[3];
px = scalar_in[4]; py = scalar_in[5]; pz = scalar_in[6];
}
MPI_Allreduce(&Mass_out, &mass, 1, MPI_DOUBLE, MPI_SUM, Comm_here);
MPI_Allreduce(&ang_outx, &sx, 1, MPI_DOUBLE, MPI_SUM, Comm_here);
MPI_Allreduce(&ang_outy, &sy, 1, MPI_DOUBLE, MPI_SUM, Comm_here);
MPI_Allreduce(&ang_outz, &sz, 1, MPI_DOUBLE, MPI_SUM, Comm_here);
MPI_Allreduce(&p_outx, &px, 1, MPI_DOUBLE, MPI_SUM, Comm_here);
MPI_Allreduce(&p_outy, &py, 1, MPI_DOUBLE, MPI_SUM, Comm_here);
MPI_Allreduce(&p_outz, &pz, 1, MPI_DOUBLE, MPI_SUM, Comm_here);
#ifdef GaussInt
mass = mass * rex * rex * dphi * factor;
@@ -3088,13 +3020,15 @@ void surface_integral::surf_MassPAng(double rex, int lev, ShellPatch *GH, var *c
}
}
{
double scalar_out[7] = {Mass_out, ang_outx, ang_outy, ang_outz, p_outx, p_outy, p_outz};
double scalar_in[7];
MPI_Allreduce(scalar_out, scalar_in, 7, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
mass = scalar_in[0]; sx = scalar_in[1]; sy = scalar_in[2]; sz = scalar_in[3];
px = scalar_in[4]; py = scalar_in[5]; pz = scalar_in[6];
}
MPI_Allreduce(&Mass_out, &mass, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(&ang_outx, &sx, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(&ang_outy, &sy, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(&ang_outz, &sz, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(&p_outx, &px, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(&p_outy, &py, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(&p_outz, &pz, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
#ifdef GaussInt
mass = mass * rex * rex * dphi * factor;
@@ -3673,17 +3607,8 @@ void surface_integral::surf_Wave(double rex, cgh *GH, ShellPatch *SH,
}
//|------+ Communicate and sum the results from each processor.
{
double *RPIP_out = new double[2 * NN];
double *RPIP = new double[2 * NN];
memcpy(RPIP_out, RP_out, NN * sizeof(double));
memcpy(RPIP_out + NN, IP_out, NN * sizeof(double));
MPI_Allreduce(RPIP_out, RPIP, 2 * NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
memcpy(RP, RPIP, NN * sizeof(double));
memcpy(IP, RPIP + NN, NN * sizeof(double));
delete[] RPIP_out;
delete[] RPIP;
}
MPI_Allreduce(RP_out, RP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(IP_out, IP, NN, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
//|------= Free memory.

View File

@@ -18,11 +18,7 @@ using namespace std;
#include <math.h>
#endif
#ifdef MPI_STUB
#include "mpi_stub.h"
#else
#include <mpi.h>
#endif
#include "misc.h"
#include "macrodef.h"

View File

@@ -20,11 +20,7 @@ using namespace std;
#include <map.h>
#endif
#ifdef MPI_STUB
#include "mpi_stub.h"
#else
#include <mpi.h>
#endif
#include "misc.h"
#include "macrodef.h"

View File

@@ -9,11 +9,7 @@
using namespace std;
#include <time.h>
#ifdef MPI_STUB
#include "mpi_stub.h"
#else
#include <mpi.h>
#endif
#include "var.h"

View File

@@ -10,31 +10,18 @@
import AMSS_NCKU_Input as input_data
import subprocess
import time
import os
## OpenMP configuration for threaded Fortran kernels
## OMP_NUM_THREADS: set to number of physical cores (not hyperthreads)
## OMP_PROC_BIND: bind threads to cores to avoid migration overhead
## OMP_STACKSIZE: each thread needs stack space for fh arrays (~3.6MB)
if "OMP_NUM_THREADS" not in os.environ:
os.environ["OMP_NUM_THREADS"] = "96"
os.environ["OMP_STACKSIZE"] = "16M"
os.environ["OMP_PROC_BIND"] = "close"
os.environ["OMP_PLACES"] = "cores"
## CPU core binding configuration using taskset
## taskset ensures all child processes inherit the CPU affinity mask
## This forces make and all compiler processes to use only nohz_full cores (4-55, 60-111)
## Format: taskset -c 4-55,60-111 ensures processes only run on these cores
NUMACTL_CPU_BIND = "taskset -c 16-47,64-95"
#NUMACTL_CPU_BIND = "taskset -c 0-111"
#NUMACTL_CPU_BIND = "taskset -c 16-47,64-95"
#NUMACTL_CPU_BIND = "taskset -c 8-15"
NUMACTL_CPU_BIND = ""
## Build parallelism configuration
## Use nohz_full cores (4-55, 60-111) for compilation: 52 + 52 = 104 cores
## Set make -j to utilize available cores for faster builds
BUILD_JOBS = 16
BUILD_JOBS = 64
##################################################################
@@ -128,28 +115,28 @@ def run_ABE():
print( )
## Define the command to run; cast other values to strings as needed
if (input_data.GPU_Calculation == "no"):
run_command = NUMACTL_CPU_BIND + " ./ABE"
run_command_outfile = "ABE_out.log"
mpi_command = NUMACTL_CPU_BIND + " mpirun -np " + str(input_data.MPI_processes) + " ./ABE"
mpi_command_outfile = "ABE_out.log"
elif (input_data.GPU_Calculation == "yes"):
run_command = NUMACTL_CPU_BIND + " ./ABEGPU"
run_command_outfile = "ABEGPU_out.log"
## Execute the command and stream output
run_process = subprocess.Popen(run_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)
mpi_command = NUMACTL_CPU_BIND + " mpirun -np " + str(input_data.MPI_processes) + " ./ABEGPU"
mpi_command_outfile = "ABEGPU_out.log"
## Execute the MPI command and stream output
mpi_process = subprocess.Popen(mpi_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)
## Write ABE run output to file while printing to stdout
with open(run_command_outfile, 'w') as file0:
with open(mpi_command_outfile, 'w') as file0:
## Read and print output lines; also write each line to file
for line in run_process.stdout:
for line in mpi_process.stdout:
print(line, end='') # stream output in real time
file0.write(line) # write the line to file
file0.flush() # flush to ensure each line is written immediately (optional)
file0.close()
## Wait for the process to finish
run_return_code = run_process.wait()
mpi_return_code = mpi_process.wait()
print( )
print( " The ABE/ABEGPU simulation is finished " )
@@ -166,14 +153,13 @@ def run_ABE():
## Run the AMSS-NCKU TwoPuncture program TwoPunctureABE
def run_TwoPunctureABE():
tp_time1=time.time()
print( )
print( " Running the AMSS-NCKU executable file TwoPunctureABE " )
print( )
## Define the command to run
#TwoPuncture_command = NUMACTL_CPU_BIND + " ./TwoPunctureABE"
TwoPuncture_command = " ./TwoPunctureABE"
TwoPuncture_command = NUMACTL_CPU_BIND + " ./TwoPunctureABE"
TwoPuncture_command_outfile = "TwoPunctureABE_out.log"
## Execute the command with subprocess.Popen and stream output
@@ -194,9 +180,7 @@ def run_TwoPunctureABE():
print( )
print( " The TwoPunctureABE simulation is finished " )
print( )
tp_time2=time.time()
et=tp_time2-tp_time1
print(f"Used time: {et}")
return
##################################################################

View File

@@ -1,97 +0,0 @@
# AMSS-NCKU PGO Profile Analysis Report
## 1. Profiling Environment
| Item | Value |
|------|-------|
| Compiler | Intel oneAPI DPC++/C++ 2025.3.0 (icpx/ifx) |
| Instrumentation Flag | `-fprofile-instr-generate` |
| Optimization Level (instrumented) | `-O2 -xHost -fma` |
| MPI Processes | 1 (single process to avoid MPI+instrumentation deadlock) |
| Profile File | `default_9725750769337483397_0.profraw` (327 KB) |
| Merged Profile | `default.profdata` (394 KB) |
| llvm-profdata | `/home/intel/oneapi/compiler/2025.3/bin/compiler/llvm-profdata` |
## 2. Reduced Simulation Parameters (for profiling run)
| Parameter | Production Value | Profiling Value |
|-----------|-----------------|-----------------|
| MPI_processes | 64 | 1 |
| grid_level | 9 | 4 |
| static_grid_level | 5 | 3 |
| static_grid_number | 96 | 24 |
| moving_grid_number | 48 | 16 |
| largest_box_xyz_max | 320^3 | 160^3 |
| Final_Evolution_Time | 1000.0 | 10.0 |
| Evolution_Step_Number | 10,000,000 | 1,000 |
| Detector_Number | 12 | 2 |
## 3. Profile Summary
| Metric | Value |
|--------|-------|
| Total instrumented functions | 1,392 |
| Functions with non-zero counts | 117 (8.4%) |
| Functions with zero counts | 1,275 (91.6%) |
| Maximum function entry count | 386,459,248 |
| Maximum internal block count | 370,477,680 |
| Total block count | 4,198,023,118 |
## 4. Top 20 Hotspot Functions
| Rank | Total Count | Max Block Count | Function | Category |
|------|------------|-----------------|----------|----------|
| 1 | 1,241,601,732 | 370,477,680 | `polint_` | Interpolation |
| 2 | 755,994,435 | 230,156,640 | `prolong3_` | Grid prolongation |
| 3 | 667,964,095 | 3,697,792 | `compute_rhs_bssn_` | BSSN RHS evolution |
| 4 | 539,736,051 | 386,459,248 | `symmetry_bd_` | Symmetry boundary |
| 5 | 277,310,808 | 53,170,728 | `lopsided_` | Lopsided FD stencil |
| 6 | 155,534,488 | 94,535,040 | `decide3d_` | 3D grid decision |
| 7 | 119,267,712 | 19,266,048 | `rungekutta4_rout_` | RK4 time integrator |
| 8 | 91,574,616 | 48,824,160 | `kodis_` | Kreiss-Oliger dissipation |
| 9 | 67,555,389 | 43,243,680 | `fderivs_` | Finite differences |
| 10 | 55,296,000 | 42,246,144 | `misc::fact(int)` | Factorial utility |
| 11 | 43,191,071 | 27,663,328 | `fdderivs_` | 2nd-order FD derivatives |
| 12 | 36,233,965 | 22,429,440 | `restrict3_` | Grid restriction |
| 13 | 24,698,512 | 17,231,520 | `polin3_` | Polynomial interpolation |
| 14 | 22,962,942 | 20,968,768 | `copy_` | Data copy |
| 15 | 20,135,696 | 17,259,168 | `Ansorg::barycentric(...)` | Spectral interpolation |
| 16 | 14,650,224 | 7,224,768 | `Ansorg::barycentric_omega(...)` | Spectral weights |
| 17 | 13,242,296 | 2,871,920 | `global_interp_` | Global interpolation |
| 18 | 12,672,000 | 7,734,528 | `sommerfeld_rout_` | Sommerfeld boundary |
| 19 | 6,872,832 | 1,880,064 | `sommerfeld_routbam_` | Sommerfeld boundary (BAM) |
| 20 | 5,709,900 | 2,809,632 | `l2normhelper_` | L2 norm computation |
## 5. Hotspot Category Breakdown
Top 20 functions account for ~98% of total execution counts:
| Category | Functions | Combined Count | Share |
|----------|-----------|---------------|-------|
| Interpolation / Prolongation / Restriction | polint_, prolong3_, restrict3_, polin3_, global_interp_, Ansorg::* | ~2,093M | ~50% |
| BSSN RHS + FD stencils | compute_rhs_bssn_, lopsided_, fderivs_, fdderivs_ | ~1,056M | ~25% |
| Boundary conditions | symmetry_bd_, sommerfeld_rout_, sommerfeld_routbam_ | ~559M | ~13% |
| Time integration | rungekutta4_rout_ | ~119M | ~3% |
| Dissipation | kodis_ | ~92M | ~2% |
| Utilities | misc::fact, decide3d_, copy_, l2normhelper_ | ~256M | ~6% |
## 6. Conclusions
1. **Profile data is valid**: 1,392 functions instrumented, 117 exercised with ~4.2 billion total counts.
2. **Hotspot concentration is high**: Top 5 functions alone account for ~76% of all counts, which is ideal for PGO — the compiler has strong branch/layout optimization targets.
3. **Fortran numerical kernels dominate**: `polint_`, `prolong3_`, `compute_rhs_bssn_`, `symmetry_bd_`, `lopsided_` are all Fortran routines in the inner evolution loop. PGO will optimize their branch prediction and basic block layout.
4. **91.6% of functions have zero counts**: These are code paths for unused features (GPU, BSSN-EScalar, BSSN-EM, Z4C, etc.). PGO will deprioritize them, improving instruction cache utilization.
5. **Profile is representative**: Despite the reduced grid size, the code path coverage matches production — the same kernels (RHS, prolongation, restriction, boundary) are exercised. PGO branch probabilities from this profile will transfer well to full-scale runs.
## 7. PGO Phase 2 Usage
To apply the profile, use the following flags in `makefile.inc`:
```makefile
CXXAPPFLAGS = -O3 -xHost -fp-model fast=2 -fma -ipo \
-fprofile-instr-use=/home/amss/AMSS-NCKU/pgo_profile/default.profdata \
-Dfortran3 -Dnewc -I${MKLROOT}/include
f90appflags = -O3 -xHost -fp-model fast=2 -fma -ipo \
-fprofile-instr-use=/home/amss/AMSS-NCKU/pgo_profile/default.profdata \
-align array64byte -fpp -I${MKLROOT}/include
```

Binary file not shown.