0
votes

In a (series of) attempt to learn MPI-2 one-sided-communication, I am testing the following code, in which I store one fundamental type value such as int in the master process, and expose it to all other processes. Now what I do with the integer is simple. I have each process iteratively increment it until the shared integer reaches a maximum value. Each process will fence before printing out the shared integer, like follows (complete code at bottom):

  for (int i = 0; i < 10; i++) {
    mpi_val_t<int>::inc_val(val,1);
    if (mpi_val_t<int>::get_val(val) >= 25)
      break;
  }
  MPI_Win_fence(0,val->win);
  std::cout << "val = " << mpi_val_t<int>::get_val(val) << std::endl;

I expect that each process will print the same value (25) on exit. But I sometimes get output like this:

$ mpiexec.exe -n 4 a.exe
val = 17
val = 22
val = 25
val = 25

Can some please explain what's going on here and how to correctly synchronize it?

Thanks,


Code:

#include <mpi.h>
#include <cstdlib>
#include <cstdio>
#include <iostream>

template <typename T>
inline MPI_Datatype mpi_type();
template <> inline MPI_Datatype mpi_type<int>() { return MPI_INT; }
template <> inline MPI_Datatype mpi_type<double>() { return MPI_DOUBLE; }

template <typename T>
class mpi_val_t {
public:
  MPI_Win win;
  int  hostrank;  //id of the process that host the value to be exposed to all processes
  int  rank;      //process id
  int  size;      //number of processes
  T    val;       //the shared value

  static struct mpi_val_t *create_val(int hostrank, T v) {
      struct mpi_val_t *val;

      val = (struct mpi_val_t *)malloc(sizeof(struct mpi_val_t));
      val->hostrank = hostrank;
      MPI_Comm_rank(MPI_COMM_WORLD, &(val->rank));
      MPI_Comm_size(MPI_COMM_WORLD, &(val->size));

      if (val->rank == hostrank) {
          MPI_Alloc_mem(sizeof(T), MPI_INFO_NULL, &(val->val));
          val -> val = v;
          MPI_Win_create(&val->val, sizeof(T), sizeof(T),
                         MPI_INFO_NULL, MPI_COMM_WORLD, &(val->win));
      }
      else {
          MPI_Win_create(&val->val, 0, 1,
                         MPI_INFO_NULL, MPI_COMM_WORLD, &(val->win));
      }
      return val;
  }

  static void delete_val(struct mpi_val_t **val) {
      MPI_Win_free(&((*val)->win));
      free((*val));
      *val = NULL;
      return;
  }

  static T get_val(struct mpi_val_t *val) {
      T ret;
      MPI_Win_lock(MPI_LOCK_SHARED, val->hostrank, 0, val->win);
      MPI_Get(&ret, 1 , mpi_type<T>(), val->hostrank, 0, 1, mpi_type<T>(), val->win);
      MPI_Win_unlock(0, val->win);
      return ret;
  }

  static void inc_val(struct mpi_val_t *val, T inc) {
      MPI_Win_lock(MPI_LOCK_EXCLUSIVE, val->hostrank, 0, val->win);
      MPI_Accumulate(&inc, 1, mpi_type<T>(), val->hostrank, 0, 1, mpi_type<T>(), MPI_SUM,val->win);
      MPI_Win_unlock(0, val->win);
  }

}; //mpi_val_t

int main(int argc, char* argv[])
{
  MPI_Init(&argc, &argv);
  mpi_val_t<int>* val = mpi_val_t<int>::create_val(0,0);
  for (int i = 0; i < 10; i++) {
    mpi_val_t<int>::inc_val(val,1);
    if (mpi_val_t<int>::get_val(val) >= 25)
      break;
  }
  MPI_Win_fence(0,val->win);
  std::cout << "val = " << mpi_val_t<int>::get_val(val) << std::endl;
  mpi_val_t<int>::delete_val(&val);
  MPI_Finalize();
}
1

1 Answers

2
votes

The fence calls in MPI's RMA should come in pairs - the first one starts the access / exposure epoch and the second one completes it:

 MPI_Win_fence(0, win);
 ...
 MPI_Win_fence(0, win);

The standard explicitly warns against using fence calls instead of barriers:

However, a call to MPI_WIN_FENCE that is known not to end any epoch (in particular, a call with assert = MPI_MODE_NOPRECEDE) does not necessarily act as a barrier.

Moreover, fences are used for active target communication and should not be mixed with passive target communication operations such as MPI_Win_lock.

Solution: Replace the call to MPI_Win_fence with a barrier on MPI_COMM_WORLD.

Also, note that there is an error in your implementation - while you lock the window in rank val->hostrank, you always pass a rank of 0 to the unlock call.