From 153c4381a76144a9880fe03453d7d5fbdcad691c Mon Sep 17 00:00:00 2001 From: josh Date: Mon, 8 Dec 2008 00:41:36 +0000 Subject: [PATCH] changed TASK_SIZE into a variable - task_size - and added a CLI option for it git-svn-id: svn://anubis/gvsu@335 45c1a28c-8058-47b2-ae61-ca45b979098e --- cs677/final/mpi-fractals.cc | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/cs677/final/mpi-fractals.cc b/cs677/final/mpi-fractals.cc index 7b220e2..3f1ef4f 100644 --- a/cs677/final/mpi-fractals.cc +++ b/cs677/final/mpi-fractals.cc @@ -14,8 +14,6 @@ #include "FatouComputation.h" using namespace std; -/* a "task" will be processing TASK_SIZE pixels */ -#define TASK_SIZE 100 #define PROGNAME "Josh's CS677 Final : MPI Fractal Generator" #define getXVirt(x) (((x) - (width >> 1)) * zoom + x_center) #define getYVirt(y) ((-((y) - (height >> 1))) * zoom + y_center) @@ -30,6 +28,8 @@ void sendWindowVals(double * winVals, int world_size); static double x_center = 0.0; static double y_center = 0.0; static double zoom = 1/300.0; +/* a "task" will be processing task_size pixels */ +static int task_size = 100; int main(int argc, char * argv[]) { @@ -68,6 +68,10 @@ int main(int argc, char * argv[]) { omp_set_num_threads(1); } + else if (!strncmp(argv[i], "-s", 2)) + { + task_size = atoi(strlen(argv[i]) > 2 ? argv[i] + 2 : argv[++i]); + } } getSizes(&my_rank, &world_size, &nprocs); if (my_rank == 0) @@ -94,7 +98,7 @@ int main(int argc, char * argv[]) break; } - unsigned int * taskVals = new unsigned int[TASK_SIZE + 1]; + unsigned int * taskVals = new unsigned int[task_size + 1]; double window_vals[4]; if (my_rank == 0) { @@ -284,30 +288,30 @@ void draw(int rank, int world_size, int nprocs, int width, int height, if (pixel < num_pixels) { MPI_Send(&pixel, 1, MPI_INT, to_proc, 0, MPI_COMM_WORLD); - pixel += TASK_SIZE; + pixel += task_size; } else { MPI_Send(&done_val, 1, MPI_INT, to_proc, 0, MPI_COMM_WORLD); } } - int num_tasks = (num_pixels + TASK_SIZE - 1) / TASK_SIZE; + int num_tasks = (num_pixels + task_size - 1) / task_size; for (int received = 0; received < num_tasks; received++) { - MPI_Recv(taskVals, TASK_SIZE + 1, MPI_INT, + MPI_Recv(taskVals, task_size + 1, MPI_INT, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &mpi_status); if (pixel < num_pixels) { MPI_Send(&pixel, 1, MPI_INT, mpi_status.MPI_SOURCE, 0, MPI_COMM_WORLD); - pixel += TASK_SIZE; + pixel += task_size; } else { MPI_Send(&done_val, 1, MPI_INT, mpi_status.MPI_SOURCE, 0, MPI_COMM_WORLD); } - memcpy(pixels + taskVals[0], taskVals + 1, TASK_SIZE * sizeof(int)); + memcpy(pixels + taskVals[0], taskVals + 1, task_size * sizeof(int)); } } else @@ -321,7 +325,7 @@ void draw(int rank, int world_size, int nprocs, int width, int height, if (pixel_num < 0) /* exit if we are done */ break; #pragma omp parallel for - for (int i = 0; i < TASK_SIZE; i++) + for (int i = 0; i < task_size; i++) { int this_pixel_num = pixel_num + i; int x = this_pixel_num % width; @@ -332,7 +336,7 @@ void draw(int rank, int world_size, int nprocs, int width, int height, } /* send the computed pixel data to the master node */ taskVals[0] = pixel_num; - MPI_Send(taskVals, TASK_SIZE + 1, MPI_INT, 0, 0, MPI_COMM_WORLD); + MPI_Send(taskVals, task_size + 1, MPI_INT, 0, 0, MPI_COMM_WORLD); } } }