it's my first question on StackOverflow :-) So sorry if I post the question in a wrong way...
This is my problem: I have to compare the Recursive Fibonacci algorithm with different parallel programming model: Cilk, openMP...and openMPI
Cilk and OpenMP were trivial, but openMPI it's a bit more complicated for me...
I found an implementation of the Recursive Fibonacci that uses MPI_Comm_spawn and it works, but MPI_Comm_spawn primitive creates and execute new processes on the master node only. So the cluster is unused.
So...my ask is: there is a way to execute the spawned processes on the entire cluster? Otherwise, there are other solutions to implement Recursive Fibonacci with openMPI?
Thank you for helping me! :-)
This is the code that actually works on master node only:
[MASTER]
int main (int argc, char **argv){
long n, fibn;
int world_size, flag;
int universe_size = 10;
int myrank;
char command[] = "slave_fib";
MPI_Comm children_comm;
MPI_Status status;
int errcodes[1];
MPI_Init (&argc, &argv);
MPI_Comm_size (MPI_COMM_WORLD, &world_size);
MPI_Comm_rank (MPI_COMM_WORLD, &myrank);
MPI_Info local_info;
MPI_Info_create (&local_info);
if (world_size != 1)
perror ("Top heavy with management");
MPI_Comm_get_attr (MPI_COMM_WORLD, MPI_UNIVERSE_SIZE, &universe_size, &flag);
if (universe_size == 1)
perror ("No room to start workers");
// Prepare argv for spawning the recursive process
argv += 1;
n = atol (argv[0]);
if (n < 2){
printf ("fib(%ld)=%ld\n", n, n);
exit (0);
}else{
sprintf (argv[0], "%ld", n);
MPI_Comm_spawn (command, argv, 1, local_info, myrank, MPI_COMM_SELF,
&children_comm, errcodes);
}
MPI_Recv (&fibn, 1, MPI_LONG, MPI_ANY_SOURCE, 1, children_comm,
MPI_STATUS_IGNORE);
printf ("fib(%ld)=%ld\n", n, fibn);
fflush(stdout);
MPI_Finalize ();
}
##### SPAWNED BYNARY #####
int main (int argc, char **argv){
long n, fibn, x, y;
int myrank, size;
char command[] = "slave_fib";
MPI_Comm children_comm[2];
MPI_Comm parent;
MPI_Info local_info;
int world_size,flag;
int universe_size=10;
int errcodes[1];
MPI_Init (&argc, &argv);
MPI_Comm_get_parent (&parent);
MPI_Comm_rank (MPI_COMM_WORLD, &myrank);
MPI_Info_create (&local_info);
MPI_Comm_size (MPI_COMM_WORLD, &world_size);
if (parent == MPI_COMM_NULL)
perror ("No parent!");
if (parent != MPI_COMM_NULL)
MPI_Comm_remote_size (parent, &size);
if (size != 1)
perror ("Something's wrong with the parent");
MPI_Comm_get_attr (MPI_COMM_WORLD, MPI_UNIVERSE_SIZE, &universe_size, &flag);
argv += 1;
n = atol (argv[0]);
if (n < 2){
MPI_Send (&n, 1, MPI_LONG, 0, 1, parent);
}else{
sprintf (argv[0], "%ld", (n - 1));
MPI_Comm_spawn (command, argv, 1, local_info, myrank,
MPI_COMM_SELF, &children_comm[0], errcodes);
sprintf (argv[0], "%ld", (n - 2));
MPI_Comm_spawn (command, argv, 1, local_info, myrank,
MPI_COMM_SELF, &children_comm[1], errcodes);
MPI_Recv (&y, 1, MPI_LONG, MPI_ANY_SOURCE, 1,
children_comm[1], MPI_STATUS_IGNORE);
fibn = x + y; // computation
MPI_Send (&fibn, 1, MPI_LONG, 0, 1, parent);
}
MPI_Finalize ();
}
How to execute it: mpirun -np 1 bynary name fib_num
The only way to execute it is with -np 1, if you set np > 1 the execution will return an error ( for the MPI_Comm_spawn )
mpirun --host host1:n1,host2:n2,... -np 1 ...to use more than one node - Gilles Gouaillardet