// /=====================================================
for (i = 0; i < n_neighbors; i++) {
  MPI_Irecv(edge, len, MPI_DOUBLE, nbr[i], tag, comm, requests[i]);
}
for (i = 0; i < n_neighbors; i++) {
  MPI_Send(edge, len, MPI_DOUBLE, nbr[i], tag, comm);
}
MPI_Waitall(n_neighbors, requests, statuses);

Does not perform well in practice.Why ? ;



// /=====================================================
// 上下两个区别 一个用的是 send  另一个用 Isend
for (i = 0; i < n_neighbors; i++) {
  MPI_Irecv(edge, len, MPI_DOUBLE, nbr[i], tag, comm, requests[i]);
}
for (i = 0; i < n_neighbors; i++) {
  MPI_Isend(edge, len, MPI_DOUBLE, nbr[i], tag, comm,
            requests[n_neighbors + i]);
}
MPI_Waitall(2 * n_neighbors, requests, statuses);


// MPE, Tau and HPCToolkit are popular profiling tools


int main(int argc, char ** argv)
{
    int *a;    MPI_Win win;

    MPI_Init(&argc, &argv);

    /* create private memory */
    MPI_Alloc_mem(1000*sizeof(int), MPI_INFO_NULL, &a);
    /* use private memory like you normally would */
    a[0] = 1;  a[1] = 2;

    /* collectively declare memory as remotely accessible */
    MPI_Win_create(a, 1000*sizeof(int), sizeof(int), 
				MPI_INFO_NULL,	MPI_COMM_WORLD, &win);

	 /* Array ‘a’ is now accessibly by all processes in
     * MPI_COMM_WORLD */

	 MPI_Win_free(&win);
    MPI_Free_mem(a);
	 MPI_Finalize(); return 0;
}
