#include <stdio.h> |
#include <math.h> |
#include <stdlib.h> |
#include <unistd.h> |
#include <gsl/gsl_rng.h> |
#include <gsl/gsl_randist.h> |
#include <omp.h> |
#include “mpi.h” |
#define OUT_FILE “out_hybrid.txt” |
#define PIPE_MSG 0 //next pipe node |
#define END_MSG 1 //finish |
#define OPENMP 12 //number of OMP threads |
//number of Monte-Carlo (MC) simulations in each chunk |
#define MC 100 |
#define NP 10 //number of processes (client axis) |
#define CMC 100 //number of MC chunks |
#define N 1000 //number of clients |
#define M 5 //number of phases |
void print_results(double results, double time, int lambda) { |
FILE fp; |
const char DATA_FILE = OUT_FILE; |
fp = fopen(DATA_FILE, “w”); |
fprintf(fp, “%d%s%d%s%d%s%dn”, N, “, ”, M, “, ”, lambda0, “, ”, lambda[M]); |
time = MPI_Wtime() − time; |
fprintf(fp, “%fn”, time); |
for (int i = 0; i < MC CMC − 1; i++) fprintf(fp, “%f%s”, results[i], “, ”); |
fprintf(fp, “%fn”, results[MC CMC − 1]); |
fclose(fp); |
|
void node(int numprocs, int myid, gsl_rng ran, int lambda) { |
int nmcb = 0; //nmbc- Number of MC batches |
int nmcb_id = 0; |
int i, j, k, t, u, v; |
double time = MPI_Wtime(); //program start time |
MPI_Status Status; |
double tau[MC] = {0}; //interarrival time |
double st[MC] = {0}; //sojourn time |
//sojourn time of the previous customer in each phase |
double st_prev[M][MC] = 0; |
double results[MC CMC] = {0}; //overall results |
double temp; //aux variable |
while () { |
nmcb_id = CMC; //aux var. to omit the cycle |
if (myid != 0) { //receive data from the previous node |
MPI_Recv(&tau, MC, MPI_DOUBLE, myid − 1, MPI_ANY_TAG, MPI_COMM_WORLD, &Status); |
if (Status.MPI_TAG == END_MSG) break; |
MPI_Recv(&st, MC, MPI_DOUBLE, myid − 1, MPI_ANY_TAG, MPI_COMM_WORLD, &Status); |
MPI_Recv(&st_prev, MC M, MPI_DOUBLE, myid − 1, MPI_ANY_TAG, MPI_COMM_WORLD, &Status); |
//eliminate below for in case of not the main thread |
nmcb_id = 1; |
} |
for (k = 0; k < nmcb_id; k++) { |
omp_set_num_threads(OPENMP); |
{ |
#pragma omp parallel for default(shared) |
private(j, i, t, u, v) |
for (j = 0; j < MC; j++) { |
if (myid == 0) { //init each MC trial (main process) |
#pragma omp critical |
{ |
tau[j] = gsl_ran_exponential(ran, 1.0/lambda0); |
} |
st[j] = 0; |
for (u = 0; u < M; u++) |
for (v = 0; v < MC; v++) st_prev[u][v] = 0.; |
} |
for (i = 0; i < N/NP; i++) { |
for (t = 0; t < M; t++) { |
//recurrent equation |
temp = gsl_ran_exponential(ran, 1.0/lambda[t + 1]); |
#pragma omp critical |
{ |
temp = gsl_ran_exponential(ran, 1.0/lambda[t + 1]); |
} |
st[j] += temp + fmax(0.0, st_prev[t][j] − st[j] − tau[j]); |
st_prev[t][j] = st[j]; |
|
results[j + MC nmcb] = st[j]; |
|
nmcb++; |
if (myid != numprocs − 1) { |
//if not the last process send data to the next process |
MPI_Send(&tau, MC, MPI_DOUBLE, myid + 1, PIPE_MSG, MPI_COMM_WORLD); |
MPI_Send(&st, MC, MPI_DOUBLE, myid + 1, PIPE_MSG, MPI_COMM_WORLD); |
MPI_Send(&st_prev, MC M, MPI_DOUBLE, myid + 1, PIPE_MSG, MPI_COMM_WORLD); |
} |
} |
//if the main process - go out of while cycle |
if (myid == 0) break; |
} |
//if finished - send the end msg. to the next pipe node |
if (myid != numprocs − 1)MPI_Send(&tau, MC, MPI_DOUBLE, myid + 1, END_MSG, MPI_COMM_WORLD); |
//if last process - send results |
if (myid == numprocs − 1) |
MPI_Send(&results, MC CMC, MPI_DOUBLE, 0, PIPE_MSG, MPI_COMM_WORLD); |
//print results |
if (myid == 0) { |
MPI_Recv(&results, MC CMC, MPI_DOUBLE, numprocs − 1, MPI_ANY_TAG, MPI_COMM_WORLD, &Status); |
print_results(&results0, time, &lambda0); |
} |
} |
int main(int argc, char argv) { |
int namelen; |
char processor_name[MPI_MAX_PROCESSOR_NAME]; |
int numprocs; |
int myid; |
gsl_rng ran; //random generator |
//parameter for the exponential distribution |
int lambda[M + 1] = {0}; |
//init mpi |
MPI_Init(&argc, &argv); |
MPI_Comm_size(MPI_COMM_WORLD, &(numprocs)); |
MPI_Comm_rank(MPI_COMM_WORLD, &(myid)); |
MPI_Get_processor_name(processor_name, &namelen); |
//init parameter for the exponential distribution |
lambda0 = 30000; |
for (int i = 1; i < M; i++)lambda[i] = lambda[i − 1] − 25000 / M; |
lambda[M] = 5000; |
fprintf(stdout, “Process %d of %d is on %sn”, myid, numprocs, processor_name); |
fflush(stdout); |
//init random generator |
gsl_rng_env_setup(); |
ran = gsl_rng_alloc(gsl_rng_ranlxs2); |
gsl_rng_set(ran, (long) (myid) 22); |
//process |
node(numprocs, myid, ran, lambda); |
//finish |
gsl_rng_free(ran); |
MPI_Finalize(); |
return (0); |
} |