added mpi-fractals-static.cc from mpi-fractals.cc

git-svn-id: svn://anubis/gvsu@338 45c1a28c-8058-47b2-ae61-ca45b979098e
This commit is contained in:
josh 2008-12-08 18:08:16 +00:00
parent 60d9bb6178
commit a43f2ea5cd

View File

@ -0,0 +1,379 @@
/*
* Josh Holtrop
* 2008-12-11
* CS677 Final Project
* This program implements a fractal-image generator and viewer
* that uses OpenMPI and OpenMP.
*/
#include <SDL/SDL.h>
#include <mpi.h>
#include <sys/time.h> /* struct timeval, gettimeofday() */
#include <iostream>
#include <omp.h>
#include "Computation.h"
#include "NewtonComputation.h"
#include "FatouComputation.h"
using namespace std;
#define PROGNAME "Josh's CS677 Final : MPI Fractal Generator"
#define getXVirt(x) (((x) - (width >> 1)) * zoom + x_center)
#define getYVirt(y) ((-((y) - (height >> 1))) * zoom + y_center)
/**************************************************************************
* Utility functions *
*************************************************************************/
bool createWindow(int width, int height,
SDL_Surface ** screen, Uint32 ** pixels);
void getSizes(int * rank, int * size, int * nprocs);
void draw(int rank, int world_size, int nprocs, int width, int height,
Uint32 * pixels, Uint32 * taskVals, Computation * computation);
void sendWindowVals(double * winVals, int world_size);
/**************************************************************************
* Global variables *
*************************************************************************/
static double x_center = 0.0;
static double y_center = 0.0;
static double zoom = 1/300.0;
/* a "task" will be processing task_size pixels */
static int task_size = 100;
/**************************************************************************
* This is the main entry point for our program *
*************************************************************************/
int main(int argc, char * argv[])
{
int width = 600;
int height = 600;
int my_rank = 0;
int world_size = 0;
int nprocs = 0;
Computation * computation = NULL;
int fractal_type = 0;
bool display_times = false;
SDL_Surface * screen;
Uint32 * pixels;
MPI_Init(&argc, &argv);
for (int i = 1; i < argc; i++)
{
if (!strncmp(argv[i], "-w", 2))
{
width = atoi(strlen(argv[i]) > 2 ? argv[i] + 2 : argv[++i]);
}
else if (!strncmp(argv[i], "-h", 2))
{
height = atoi(strlen(argv[i]) > 2 ? argv[i] + 2 : argv[++i]);
}
else if (!strncmp(argv[i], "-t", 2))
{
fractal_type = atoi(strlen(argv[i]) > 2 ? argv[i] + 2 : argv[++i]);
}
else if (!strcmp(argv[i], "--times"))
{
display_times = true;
}
else if (!strcmp(argv[i], "--no-threads"))
{
omp_set_num_threads(1);
}
else if (!strncmp(argv[i], "-s", 2))
{
task_size = atoi(strlen(argv[i]) > 2 ? argv[i] + 2 : argv[++i]);
}
}
getSizes(&my_rank, &world_size, &nprocs);
if (my_rank == 0)
{
char hostname[1000];
gethostname(&hostname[0], 1000);
cout << "Master hostname: " << hostname << endl;
}
switch (fractal_type)
{
case 0:
default:
computation = new NewtonComputation();
x_center = 0.0;
y_center = 0.0;
zoom = 2.0 / width;
break;
case 1:
computation = new FatouComputation();
x_center = 3.001;
y_center = 0.075975;
zoom = 2.0 / width;
break;
}
unsigned int * taskVals = new unsigned int[task_size + 1];
double window_vals[4];
if (my_rank == 0)
{
SDL_Event event;
bool going = true;
bool window_success = createWindow(width, height, &screen, &pixels);
bool redraw = true;
if (!window_success)
going = false;
/* master loop */
while (going && SDL_WaitEvent(&event) != 0)
{
if (redraw)
{
struct timeval before, after;
window_vals[0] = 0.0;
window_vals[1] = x_center;
window_vals[2] = y_center;
window_vals[3] = zoom;
sendWindowVals(&window_vals[0], world_size);
gettimeofday(&before, NULL);
draw(my_rank, world_size, nprocs, width, height,
pixels, taskVals, computation);
gettimeofday(&after, NULL);
if (display_times)
{
double time_before = before.tv_sec + before.tv_usec / 1000000.0;
double time_after = after.tv_sec + after.tv_usec / 1000000.0;
double diff = time_after - time_before;
cout << "Elapsed time: " << diff << " seconds." << endl;
}
redraw = false;
}
SDL_UpdateRect(screen, 0, 0, 0, 0);
switch (event.type)
{
case SDL_QUIT:
going = false;
break;
case SDL_KEYDOWN:
if (event.key.keysym.sym == SDLK_q)
going = false;
break;
case SDL_MOUSEBUTTONDOWN:
switch (event.button.button)
{
case 1: /* left-click to re-center and zoom in */
x_center = getXVirt(event.button.x);
y_center = getYVirt(event.button.y);
zoom /= 2.0;
redraw = true;
break;
case 2: /* middle click just to re-center */
x_center = getXVirt(event.button.x);
y_center = getYVirt(event.button.y);
redraw = true;
break;
case 4: /* zoom in */
zoom /= 2.0;
redraw = true;
break;
case 5: /* zoom out */
zoom *= 2.0;
redraw = true;
break;
}
break;
}
}
window_vals[0] = 1.0;
sendWindowVals(&window_vals[0], world_size);
}
else
{
/* slave loop */
for (;;)
{
// DEBUG:
// cout << "MPI node " << my_rank << " waiting for command." << endl;
/* wait for a redraw or quit command */
MPI_Recv(&window_vals[0], 4, MPI_DOUBLE,
MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, NULL);
if (window_vals[0] != 0.0)
break;
x_center = window_vals[1];
y_center = window_vals[2];
zoom = window_vals[3];
// DEBUG:
// cout << "MPI node " << my_rank << " received ("
// << x_center << ", " << y_center << "), zoom " << zoom << endl;
draw(my_rank, world_size, nprocs, width, height,
NULL, taskVals, computation);
}
}
delete[] taskVals;
MPI_Finalize();
delete computation;
return 0;
}
/**************************************************************************
* This utility function is used by the master MPI node to create a *
* window using SDL for displaying the fractal in and getting user input. *
*************************************************************************/
bool createWindow(int width, int height,
SDL_Surface ** screen, Uint32 ** pixels)
{
if (SDL_Init(SDL_INIT_VIDEO))
{
cerr << "Failed to initialize SDL!" << endl;
return false;
}
atexit(SDL_Quit);
if (!(*screen = SDL_SetVideoMode(width, height, 32, 0)))
{
cerr << "Failed to set video mode!" << endl;
return false;
}
SDL_WM_SetCaption(PROGNAME, PROGNAME);
*pixels = (Uint32 *) (*screen)->pixels;
return true;
}
/**************************************************************************
* This utility function returns the MPI node's rank, the total number *
* of MPI nodes, and the number of processing cores on the local node *
*************************************************************************/
void getSizes(int * rank, int * size, int * nprocs)
{
MPI_Comm_rank(MPI_COMM_WORLD, rank);
MPI_Comm_size(MPI_COMM_WORLD, size);
*nprocs = sysconf(_SC_NPROCESSORS_CONF);
int displs[*size];
int counts[*size];
for (int i = 0; i < *size; i++)
{
displs[i] = i;
counts[i] = 1;
}
int all_nprocs[*size];
MPI_Gatherv(nprocs, 1, MPI_INT,
&all_nprocs[0], &counts[0], &displs[0], MPI_INT,
0, MPI_COMM_WORLD);
if (*rank == 0)
{
int total_nprocs = 0;
cout << "Number of cores on each MPI node:" << endl;
for (int i = 0; i < *size; i++)
{
cout << all_nprocs[i] << " ";
total_nprocs += all_nprocs[i];
}
cout << endl;
cout << "Total number of cores: " << total_nprocs << endl;
}
}
/**************************************************************************
* This function is executed by each MPI node every time a fractal *
* frame is to be drawn. *
*************************************************************************/
void draw(int rank, int world_size, int nprocs, int width, int height,
Uint32 * pixels, Uint32 * taskVals, Computation * computation)
{
// DEBUG:
// cout << "In draw() with rank " << rank << endl;
MPI_Status mpi_status;
if (world_size == 1)
{
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
double x_virt = getXVirt(x);
double y_virt = getYVirt(y);
*pixels++ = computation->compute(x_virt, y_virt);
}
}
}
else if (rank == 0)
{
int done_val = -1;
int num_pixels = width * height;
int pixel = 0;
for (int to_proc = 1; to_proc < world_size; to_proc++)
{
if (pixel < num_pixels)
{
MPI_Send(&pixel, 1, MPI_INT, to_proc, 0, MPI_COMM_WORLD);
pixel += task_size;
}
else
{
MPI_Send(&done_val, 1, MPI_INT, to_proc, 0, MPI_COMM_WORLD);
}
}
int num_tasks = (num_pixels + task_size - 1) / task_size;
for (int received = 0; received < num_tasks; received++)
{
MPI_Recv(taskVals, task_size + 1, MPI_INT,
MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &mpi_status);
if (pixel < num_pixels)
{
MPI_Send(&pixel, 1, MPI_INT,
mpi_status.MPI_SOURCE, 0, MPI_COMM_WORLD);
pixel += task_size;
}
else
{
MPI_Send(&done_val, 1, MPI_INT,
mpi_status.MPI_SOURCE, 0, MPI_COMM_WORLD);
}
memcpy(pixels + taskVals[0], taskVals + 1, task_size * sizeof(int));
}
}
else
{
for (;;)
{
int pixel_num;
/* wait to be told what to do */
MPI_Recv(&pixel_num, 1, MPI_INT,
MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, NULL);
if (pixel_num < 0) /* exit if we are done */
break;
#pragma omp parallel for
for (int i = 0; i < task_size; i++)
{
int this_pixel_num = pixel_num + i;
int x = this_pixel_num % width;
int y = this_pixel_num / width;
double x_virt = getXVirt(x);
double y_virt = getYVirt(y);
taskVals[i + 1] = computation->compute(x_virt, y_virt);
}
/* send the computed pixel data to the master node */
taskVals[0] = pixel_num;
MPI_Send(taskVals, task_size + 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
}
}
}
/**************************************************************************
* This utility function is used by the master process to update all *
* of the slave processes for the position and zoom-level of the view. *
*************************************************************************/
void sendWindowVals(double * winVals, int world_size)
{
// DEBUG:
// cout << "Master sending out new window values" << endl;
for (int to_proc = 1; to_proc < world_size; to_proc++)
{
MPI_Send(winVals, 4, MPI_DOUBLE, to_proc,
0, MPI_COMM_WORLD);
}
}