added presentation.odp, updated mpi-fractals-static.cc
git-svn-id: svn://anubis/gvsu@339 45c1a28c-8058-47b2-ae61-ca45b979098e
This commit is contained in:
parent
a43f2ea5cd
commit
ea27af0726
@ -29,6 +29,8 @@ void getSizes(int * rank, int * size, int * nprocs);
|
||||
void draw(int rank, int world_size, int nprocs, int width, int height,
|
||||
Uint32 * pixels, Uint32 * taskVals, Computation * computation);
|
||||
void sendWindowVals(double * winVals, int world_size);
|
||||
inline void taskAllocate(int total_tasks, int total_workers, int this_id,
|
||||
int * first_task_id, int * num);
|
||||
|
||||
/**************************************************************************
|
||||
* Global variables *
|
||||
@ -109,7 +111,7 @@ int main(int argc, char * argv[])
|
||||
break;
|
||||
}
|
||||
|
||||
unsigned int * taskVals = new unsigned int[task_size + 1];
|
||||
unsigned int * taskVals = new unsigned int[width * height];
|
||||
double window_vals[4];
|
||||
if (my_rank == 0)
|
||||
{
|
||||
@ -285,81 +287,38 @@ void getSizes(int * rank, int * size, int * nprocs)
|
||||
void draw(int rank, int world_size, int nprocs, int width, int height,
|
||||
Uint32 * pixels, Uint32 * taskVals, Computation * computation)
|
||||
{
|
||||
// DEBUG:
|
||||
// cout << "In draw() with rank " << rank << endl;
|
||||
MPI_Status mpi_status;
|
||||
if (world_size == 1)
|
||||
int num_pixels = width * height;
|
||||
int firstPixel;
|
||||
int numPixels;
|
||||
taskAllocate(num_pixels, world_size, rank, &firstPixel, &numPixels);
|
||||
|
||||
#pragma omp parallel for
|
||||
for (int i = 0; i < numPixels; i++)
|
||||
{
|
||||
for (int y = 0; y < height; y++)
|
||||
{
|
||||
for (int x = 0; x < width; x++)
|
||||
{
|
||||
double x_virt = getXVirt(x);
|
||||
double y_virt = getYVirt(y);
|
||||
*pixels++ = computation->compute(x_virt, y_virt);
|
||||
}
|
||||
}
|
||||
int this_pixel_num = firstPixel + i;
|
||||
int x = this_pixel_num % width;
|
||||
int y = this_pixel_num / width;
|
||||
double x_virt = getXVirt(x);
|
||||
double y_virt = getYVirt(y);
|
||||
unsigned int color = computation->compute(x_virt, y_virt);
|
||||
if (rank == 0)
|
||||
pixels[pixel] = color
|
||||
else
|
||||
taskVals[i] = color;
|
||||
}
|
||||
else if (rank == 0)
|
||||
|
||||
if (rank == 0)
|
||||
{
|
||||
int done_val = -1;
|
||||
int num_pixels = width * height;
|
||||
int pixel = 0;
|
||||
for (int to_proc = 1; to_proc < world_size; to_proc++)
|
||||
for (int i = 1; i < world_size; i++)
|
||||
{
|
||||
if (pixel < num_pixels)
|
||||
{
|
||||
MPI_Send(&pixel, 1, MPI_INT, to_proc, 0, MPI_COMM_WORLD);
|
||||
pixel += task_size;
|
||||
}
|
||||
else
|
||||
{
|
||||
MPI_Send(&done_val, 1, MPI_INT, to_proc, 0, MPI_COMM_WORLD);
|
||||
}
|
||||
}
|
||||
int num_tasks = (num_pixels + task_size - 1) / task_size;
|
||||
for (int received = 0; received < num_tasks; received++)
|
||||
{
|
||||
MPI_Recv(taskVals, task_size + 1, MPI_INT,
|
||||
MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &mpi_status);
|
||||
if (pixel < num_pixels)
|
||||
{
|
||||
MPI_Send(&pixel, 1, MPI_INT,
|
||||
mpi_status.MPI_SOURCE, 0, MPI_COMM_WORLD);
|
||||
pixel += task_size;
|
||||
}
|
||||
else
|
||||
{
|
||||
MPI_Send(&done_val, 1, MPI_INT,
|
||||
mpi_status.MPI_SOURCE, 0, MPI_COMM_WORLD);
|
||||
}
|
||||
memcpy(pixels + taskVals[0], taskVals + 1, task_size * sizeof(int));
|
||||
taskAllocate(num_pixels, world_size, i, &firstPixel, &numPixels);
|
||||
MPI_Recv(pixels + firstPixel, numPixels, MPI_INT,
|
||||
i, MPI_ANY_TAG, MPI_COMM_WORLD, NULL);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for (;;)
|
||||
{
|
||||
int pixel_num;
|
||||
/* wait to be told what to do */
|
||||
MPI_Recv(&pixel_num, 1, MPI_INT,
|
||||
MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, NULL);
|
||||
if (pixel_num < 0) /* exit if we are done */
|
||||
break;
|
||||
#pragma omp parallel for
|
||||
for (int i = 0; i < task_size; i++)
|
||||
{
|
||||
int this_pixel_num = pixel_num + i;
|
||||
int x = this_pixel_num % width;
|
||||
int y = this_pixel_num / width;
|
||||
double x_virt = getXVirt(x);
|
||||
double y_virt = getYVirt(y);
|
||||
taskVals[i + 1] = computation->compute(x_virt, y_virt);
|
||||
}
|
||||
/* send the computed pixel data to the master node */
|
||||
taskVals[0] = pixel_num;
|
||||
MPI_Send(taskVals, task_size + 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
|
||||
}
|
||||
MPI_Send(taskVals, numPixels, MPI_INT, 0, 0, MPI_COMM_WORLD);
|
||||
}
|
||||
}
|
||||
|
||||
@ -377,3 +336,32 @@ void sendWindowVals(double * winVals, int world_size)
|
||||
0, MPI_COMM_WORLD);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* taskAllocate() will divide a set of total_tasks tasks into
|
||||
* total_workers groups, as evenly as possible
|
||||
* Parameters:
|
||||
* total_tasks : IN : the total number of tasks to divide up
|
||||
* total_workers : IN : the total number of workers to allocate tasks to (>0)
|
||||
* this_id : IN : the id (base 0) of the task calling us for work
|
||||
* first_task_id : OUT : the id (base 0) of the first task for this worker
|
||||
* num : OUT : the number of tasks assigned to this worker
|
||||
*/
|
||||
inline void taskAllocate(int total_tasks, int total_workers, int this_id,
|
||||
int * first_task_id, int * num)
|
||||
{
|
||||
int l_num;
|
||||
int leftovers = total_tasks % total_workers; /* num of "leftover" tasks */
|
||||
if (this_id < leftovers)
|
||||
{
|
||||
l_num = total_tasks / total_workers + 1; /* do one of the leftovers */
|
||||
*first_task_id = l_num * this_id;
|
||||
}
|
||||
else
|
||||
{
|
||||
l_num = total_tasks / total_workers;
|
||||
*first_task_id = l_num * this_id + leftovers;
|
||||
}
|
||||
*num = l_num;
|
||||
}
|
||||
|
||||
|
BIN
cs677/final/presentation.odp
Executable file
BIN
cs677/final/presentation.odp
Executable file
Binary file not shown.
Loading…
x
Reference in New Issue
Block a user