- Add mpi_stub.h providing all MPI types/constants/functions as no-ops (nprocs=1, myrank=0) with memcpy-based Allreduce and clock_gettime Wtime - Replace #include <mpi.h> with conditional #ifdef MPI_STUB in 31 files (19 headers + 12 source files) preserving ability to build with real MPI - Change makefile.inc: CLINKER mpiicpx->icpx, add -DMPI_STUB to CXXAPPFLAGS - Update makefile_and_run.py: run ./ABE directly instead of mpirun -np N - Set MPI_processes=1 in AMSS_NCKU_Input.py Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
154 lines
5.5 KiB
C++
154 lines
5.5 KiB
C++
#ifndef MPI_STUB_H
|
|
#define MPI_STUB_H
|
|
|
|
/*
|
|
* MPI Stub Header — single-process shim for AMSS-NCKU ABE solver.
|
|
* Provides all MPI types, constants, and functions used in the codebase
|
|
* as no-ops or trivial implementations for nprocs=1, myrank=0.
|
|
*/
|
|
|
|
#include <cstring>
|
|
#include <cstdlib>
|
|
#include <cstdio>
|
|
#include <time.h>
|
|
|
|
/* ── Types ─────────────────────────────────────────────────────────── */
|
|
|
|
typedef int MPI_Comm;
|
|
typedef int MPI_Datatype;
|
|
typedef int MPI_Op;
|
|
typedef int MPI_Request;
|
|
typedef int MPI_Group;
|
|
|
|
typedef struct MPI_Status {
|
|
int MPI_SOURCE;
|
|
int MPI_TAG;
|
|
int MPI_ERROR;
|
|
} MPI_Status;
|
|
|
|
/* ── Constants ─────────────────────────────────────────────────────── */
|
|
|
|
#define MPI_COMM_WORLD 0
|
|
|
|
#define MPI_INT 1
|
|
#define MPI_DOUBLE 2
|
|
#define MPI_DOUBLE_PRECISION 2
|
|
#define MPI_DOUBLE_INT 3
|
|
|
|
#define MPI_SUM 1
|
|
#define MPI_MAX 2
|
|
#define MPI_MAXLOC 3
|
|
|
|
#define MPI_STATUS_IGNORE ((MPI_Status *)0)
|
|
#define MPI_STATUSES_IGNORE ((MPI_Status *)0)
|
|
|
|
#define MPI_MAX_PROCESSOR_NAME 256
|
|
|
|
/* ── Helper: sizeof for MPI_Datatype ──────────────────────────────── */
|
|
|
|
static inline size_t mpi_stub_sizeof(MPI_Datatype type) {
|
|
switch (type) {
|
|
case MPI_INT: return sizeof(int);
|
|
case MPI_DOUBLE: return sizeof(double);
|
|
case MPI_DOUBLE_INT: return sizeof(double) + sizeof(int);
|
|
default: return 0;
|
|
}
|
|
}
|
|
|
|
/* ── Init / Finalize ──────────────────────────────────────────────── */
|
|
|
|
static inline int MPI_Init(int *, char ***) { return 0; }
|
|
static inline int MPI_Finalize() { return 0; }
|
|
|
|
/* ── Communicator queries ─────────────────────────────────────────── */
|
|
|
|
static inline int MPI_Comm_rank(MPI_Comm, int *rank) { *rank = 0; return 0; }
|
|
static inline int MPI_Comm_size(MPI_Comm, int *size) { *size = 1; return 0; }
|
|
static inline int MPI_Comm_split(MPI_Comm comm, int, int, MPI_Comm *newcomm) {
|
|
*newcomm = comm;
|
|
return 0;
|
|
}
|
|
static inline int MPI_Comm_free(MPI_Comm *) { return 0; }
|
|
|
|
/* ── Group operations ─────────────────────────────────────────────── */
|
|
|
|
static inline int MPI_Comm_group(MPI_Comm, MPI_Group *group) {
|
|
*group = 0;
|
|
return 0;
|
|
}
|
|
static inline int MPI_Group_translate_ranks(MPI_Group, int n,
|
|
const int *ranks1, MPI_Group, int *ranks2) {
|
|
for (int i = 0; i < n; ++i) ranks2[i] = ranks1[i];
|
|
return 0;
|
|
}
|
|
static inline int MPI_Group_free(MPI_Group *) { return 0; }
|
|
|
|
/* ── Collective operations ────────────────────────────────────────── */
|
|
|
|
static inline int MPI_Allreduce(const void *sendbuf, void *recvbuf,
|
|
int count, MPI_Datatype datatype, MPI_Op, MPI_Comm) {
|
|
std::memcpy(recvbuf, sendbuf, count * mpi_stub_sizeof(datatype));
|
|
return 0;
|
|
}
|
|
|
|
static inline int MPI_Iallreduce(const void *sendbuf, void *recvbuf,
|
|
int count, MPI_Datatype datatype, MPI_Op, MPI_Comm,
|
|
MPI_Request *request) {
|
|
std::memcpy(recvbuf, sendbuf, count * mpi_stub_sizeof(datatype));
|
|
*request = 0;
|
|
return 0;
|
|
}
|
|
|
|
static inline int MPI_Bcast(void *, int, MPI_Datatype, int, MPI_Comm) {
|
|
return 0;
|
|
}
|
|
|
|
static inline int MPI_Barrier(MPI_Comm) { return 0; }
|
|
|
|
/* ── Point-to-point (never reached with nprocs=1) ─────────────────── */
|
|
|
|
static inline int MPI_Send(const void *, int, MPI_Datatype, int, int, MPI_Comm) {
|
|
return 0;
|
|
}
|
|
static inline int MPI_Recv(void *, int, MPI_Datatype, int, int, MPI_Comm, MPI_Status *) {
|
|
return 0;
|
|
}
|
|
static inline int MPI_Isend(const void *, int, MPI_Datatype, int, int, MPI_Comm,
|
|
MPI_Request *req) {
|
|
*req = 0;
|
|
return 0;
|
|
}
|
|
static inline int MPI_Irecv(void *, int, MPI_Datatype, int, int, MPI_Comm,
|
|
MPI_Request *req) {
|
|
*req = 0;
|
|
return 0;
|
|
}
|
|
|
|
/* ── Completion ───────────────────────────────────────────────────── */
|
|
|
|
static inline int MPI_Wait(MPI_Request *, MPI_Status *) { return 0; }
|
|
static inline int MPI_Waitall(int, MPI_Request *, MPI_Status *) { return 0; }
|
|
|
|
/* ── Utility ──────────────────────────────────────────────────────── */
|
|
|
|
static inline int MPI_Abort(MPI_Comm, int error_code) {
|
|
std::fprintf(stderr, "MPI_Abort called with error code %d\n", error_code);
|
|
std::exit(error_code);
|
|
return 0;
|
|
}
|
|
|
|
static inline double MPI_Wtime() {
|
|
struct timespec ts;
|
|
clock_gettime(CLOCK_MONOTONIC, &ts);
|
|
return (double)ts.tv_sec + (double)ts.tv_nsec * 1.0e-9;
|
|
}
|
|
|
|
static inline int MPI_Get_processor_name(char *name, int *resultlen) {
|
|
const char *stub_name = "localhost";
|
|
std::strcpy(name, stub_name);
|
|
*resultlen = (int)std::strlen(stub_name);
|
|
return 0;
|
|
}
|
|
|
|
#endif /* MPI_STUB_H */
|