changed TASK_SIZE into a variable - task_size - and added a CLI option for it

git-svn-id: svn://anubis/gvsu@335 45c1a28c-8058-47b2-ae61-ca45b979098e
This commit is contained in:
josh 2008-12-08 00:41:36 +00:00
parent 4aa10bec95
commit 153c4381a7

View File

@ -14,8 +14,6 @@
#include "FatouComputation.h"
using namespace std;
/* a "task" will be processing TASK_SIZE pixels */
#define TASK_SIZE 100
#define PROGNAME "Josh's CS677 Final : MPI Fractal Generator"
#define getXVirt(x) (((x) - (width >> 1)) * zoom + x_center)
#define getYVirt(y) ((-((y) - (height >> 1))) * zoom + y_center)
@ -30,6 +28,8 @@ void sendWindowVals(double * winVals, int world_size);
static double x_center = 0.0;
static double y_center = 0.0;
static double zoom = 1/300.0;
/* a "task" will be processing task_size pixels */
static int task_size = 100;
int main(int argc, char * argv[])
{
@ -68,6 +68,10 @@ int main(int argc, char * argv[])
{
omp_set_num_threads(1);
}
else if (!strncmp(argv[i], "-s", 2))
{
task_size = atoi(strlen(argv[i]) > 2 ? argv[i] + 2 : argv[++i]);
}
}
getSizes(&my_rank, &world_size, &nprocs);
if (my_rank == 0)
@ -94,7 +98,7 @@ int main(int argc, char * argv[])
break;
}
unsigned int * taskVals = new unsigned int[TASK_SIZE + 1];
unsigned int * taskVals = new unsigned int[task_size + 1];
double window_vals[4];
if (my_rank == 0)
{
@ -284,30 +288,30 @@ void draw(int rank, int world_size, int nprocs, int width, int height,
if (pixel < num_pixels)
{
MPI_Send(&pixel, 1, MPI_INT, to_proc, 0, MPI_COMM_WORLD);
pixel += TASK_SIZE;
pixel += task_size;
}
else
{
MPI_Send(&done_val, 1, MPI_INT, to_proc, 0, MPI_COMM_WORLD);
}
}
int num_tasks = (num_pixels + TASK_SIZE - 1) / TASK_SIZE;
int num_tasks = (num_pixels + task_size - 1) / task_size;
for (int received = 0; received < num_tasks; received++)
{
MPI_Recv(taskVals, TASK_SIZE + 1, MPI_INT,
MPI_Recv(taskVals, task_size + 1, MPI_INT,
MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &mpi_status);
if (pixel < num_pixels)
{
MPI_Send(&pixel, 1, MPI_INT,
mpi_status.MPI_SOURCE, 0, MPI_COMM_WORLD);
pixel += TASK_SIZE;
pixel += task_size;
}
else
{
MPI_Send(&done_val, 1, MPI_INT,
mpi_status.MPI_SOURCE, 0, MPI_COMM_WORLD);
}
memcpy(pixels + taskVals[0], taskVals + 1, TASK_SIZE * sizeof(int));
memcpy(pixels + taskVals[0], taskVals + 1, task_size * sizeof(int));
}
}
else
@ -321,7 +325,7 @@ void draw(int rank, int world_size, int nprocs, int width, int height,
if (pixel_num < 0) /* exit if we are done */
break;
#pragma omp parallel for
for (int i = 0; i < TASK_SIZE; i++)
for (int i = 0; i < task_size; i++)
{
int this_pixel_num = pixel_num + i;
int x = this_pixel_num % width;
@ -332,7 +336,7 @@ void draw(int rank, int world_size, int nprocs, int width, int height,
}
/* send the computed pixel data to the master node */
taskVals[0] = pixel_num;
MPI_Send(taskVals, TASK_SIZE + 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
MPI_Send(taskVals, task_size + 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
}
}
}