gvsu/cs677/hw4/src/transpose.cc
josh cdcad5abb5 added root parameter to MPI_Gatherv()
git-svn-id: svn://anubis/gvsu@246 45c1a28c-8058-47b2-ae61-ca45b979098e
2008-11-23 03:25:03 +00:00

127 lines
3.4 KiB
C++

#include <iostream>
#include <sys/time.h>
#include <unistd.h> /* gethostname() */
#include <stdlib.h> /* rand() */
#include <mpi.h>
using namespace std;
/*
* taskAllocate() will divide a set of total_tasks tasks into
* total_workers groups, as evenly as possible
* Parameters:
* total_tasks : IN : the total number of tasks to divide up
* total_workers : IN : the total number of workers to allocate tasks to (>0)
* this_id : IN : the id (0-based) of the task calling us for work
* first_task_id : OUT : the id (0-based) of the first task for this worker
* num : OUT : the number of tasks assigned to this worker
*/
inline void taskAllocate(int total_tasks, int total_workers, int this_id,
int * first_task_id, int * num)
{
int l_num;
int leftovers = total_tasks % total_workers; /* num of "leftover" tasks */
if (this_id < leftovers)
{
l_num = total_tasks / total_workers + 1; /* do one of the leftovers */
*first_task_id = l_num * this_id;
}
else
{
l_num = total_tasks / total_workers;
*first_task_id = l_num * this_id + leftovers;
}
*num = l_num;
}
int main(int argc, char * argv[])
{
int my_rank;
int p; /* the number of processes */
int n = 10; /* the size of the matrix */
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &p);
#if 0
for (int i = 0; i < argc; i++)
{
if (!strncmp(argv[i], "-s", 2))
{
n = atoi(strlen(argv[i]) > 2
? argv[i] + 2
: argv[++i]);
}
}
#endif
/* Initialize the matrix */
int matrix[n][n];
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
matrix[i][j] = 0;
int my_first_row;
int my_num_rows;
taskAllocate(n, p, my_rank, &my_first_row, &my_num_rows);
for (int row = my_first_row; row < my_first_row + my_num_rows; row++)
{
for (int j = 0; j < n; j++)
{
matrix[row][j] = 100 * (row + 1) + (j + 1);
}
}
/* Populate the displacements array */
int displs[p];
int counts[p];
for (int i = 0, total = 0; i < p; i++)
{
int first;
int count;
taskAllocate(n, p, i, &first, &count);
displs[i] = total;
count[i] = count;
total += count;
}
/* Transpose the matrix with p gather operations */
int recvbuf[p];
for (int i = 0; i < p; i++)
{
int col_i_vals_for_proc_p[my_num_rows];
for (int row_offset = 0; row_offset < my_num_rows; row_offset++)
{
col_i_vals_for_proc_p[row_offset] =
matrix[my_first_row + row_offset][i];
}
MPI_Gatherv(&col_i_vals_for_proc_p[0], my_num_rows, MPI_INT,
&recvbuf[0], &counts[0], &displs[0],
MPI_INT, i, MPI_COMM_WORLD);
}
/* Put my received entries into my columns of the matrix */
for (int i = 0; i < p; i++)
{
matrix[i][p] = recvbuf[i];
}
/* Print out the final matrix */
if (my_rank == 0)
{
cout << "Final matrix:" << endl;
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
{
cout << matrix[i][j] << " ";
}
cout << endl;
}
}
MPI_Finalize();
return 0;
}