
    #include <mpi.h>
    #include <stdio.h>
    #include <stdlib.h>
    #include <assert.h>
   
    #define TEST 
    //#undef TEST
    #ifndef _EXTERN_C_
    #ifdef __cplusplus
    #define _EXTERN_C_ extern "C"
    #else /* __cplusplus */
    #define _EXTERN_C_
    #endif /* __cplusplus */
    #endif /* _EXTERN_C_ */
    
    #ifdef MPICH_HAS_C2F
    _EXTERN_C_ void *MPIR_ToPointer(int);
    #endif // MPICH_HAS_C2F
    
    #ifdef PIC
    /* For shared libraries, declare these weak and figure out which one was linked
    based on which init wrapper was called.  See mpi_init wrappers.  */
    #pragma weak pmpi_init
    #pragma weak PMPI_INIT
    #pragma weak pmpi_init_
    #pragma weak pmpi_init__
    #endif /* PIC */
    
    _EXTERN_C_ void pmpi_init(MPI_Fint *ierr);
    _EXTERN_C_ void PMPI_INIT(MPI_Fint *ierr);
    _EXTERN_C_ void pmpi_init_(MPI_Fint *ierr);
    _EXTERN_C_ void pmpi_init__(MPI_Fint *ierr);

    //global variables
    int *nodes_status; //the status of each node, 1-alive, 0-dead
    int real_rank = 0, group_rank = 0, real_num_nodes = 0, num_nodes = 0;
    int mirror_protocal = 0; //use mirror protocal or parallel protocal   
    int is_lead = 1; //current node is lead or replica 
    static int ANY_SOURCE_TAG = 123;
    int print_node = 1;
    int print_all = 1;
    
    //given a rank id, return the lead rank id
    int get_lead(int rank) {
        if (rank >= num_nodes) {
            return rank - num_nodes;
        } else return rank;
    }

    //given a rank id, return the replia rank id
    int get_replica(int rank) {
        if (rank < num_nodes) {
            return rank + num_nodes;
        } else return rank;
    }

    //given a rank id, return the status of that rank
    int is_alive(int rank) {
        //printf("node %d alive status %d\n", rank,  nodes_status[rank]);
        return nodes_status[rank];
    }

    //give a rank (a lead or a replica), get the partner of that rank
    int get_partner(int rank) {
        if (rank < num_nodes) {
            return rank + num_nodes;
        } else {
            return rank - num_nodes;
        }
    }
    
    //given a pair of partners (sender and receiver), return the real rank of the partner in that group
    //me should be the real_rank, not group_rank; partner should be the argument
    int get_partner_in_group(int me, int partner) {
        if (me < num_nodes) {
            return partner;
        } else {
            return partner + num_nodes;
        }
    }

    //given a pair of partners (sender and receiver), return the real rank of the partner in the counter group
    //me should be the real_rank, not group_rank; partner should be the argument 
    int get_partner_in_counter_group(int me, int partner) {
        if (me < num_nodes) {
            return partner + num_nodes;
        } else {
            return partner;
        }
    }
  
    // Need to add special operations in the following options
/* ================== C Wrappers for MPI_Send ================== */
_EXTERN_C_ int PMPI_Send(void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm);
_EXTERN_C_ int MPI_Send(void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm) { 
    int _wrap_py_return_val = 0;
 {
     if(!is_alive(real_rank))
     {
#ifdef TEST
         if(print_node == real_rank || print_all)
         {
             printf("TEST (%d): node %d is dead, return from Send\n", real_rank, real_rank);
         }
#endif
         return _wrap_py_return_val;
     }

     // get the destination inside the same group (either lead or replica)
     // e.g (0->1) and (2->3) for 4 nodes
     int in_dest = get_partner_in_group(real_rank, dest);

     // get the destination in the different group
     // e.g (0->3) and (2->1) for 4 nodes
     int cross_dest = get_partner_in_counter_group(real_rank, dest);

     if (mirror_protocal) { //mirror protocal

        if (is_alive(in_dest)){
            _wrap_py_return_val = PMPI_Send(buf, count, datatype, in_dest, tag, MPI_COMM_WORLD);
#ifdef TEST
            if(real_rank == print_node || print_all)
            {
                printf("TEST (%d): Send %d --> %d (inside group)\n",
                        real_rank, real_rank, in_dest);
            }
#endif
        }
        if (is_alive(cross_dest)) {
            _wrap_py_return_val = PMPI_Send(buf, count, datatype, 
                cross_dest, tag, MPI_COMM_WORLD);
#ifdef TEST
            if(real_rank == print_node || print_all)
            {
                printf("TEST (%d): Send %d --> %d (cross group)\n", 
                        real_rank, real_rank, cross_dest);
            }
#endif
        }    
    } else { //parallel protocal

        //synchronization on senders 
        if (is_alive(get_partner(real_rank))) {
            if (is_lead) {
                int syn = 1;
                //lead send to replica
                PMPI_Send(&syn, 1, MPI_INT, get_replica(real_rank), ANY_SOURCE_TAG, MPI_COMM_WORLD);
#ifdef TEST
                if(real_rank == print_node || print_all)
                {
                    printf("TEST (%d): Parallel Sync Send (lead %d --> repilca %d)\n", 
                            real_rank, real_rank, get_replica(real_rank));
                }
#endif
            } else {
                //replica recv from lead
                MPI_Status syn_status;
                int syn = 0;
                PMPI_Recv(&syn, 1, MPI_INT, get_lead(real_rank), ANY_SOURCE_TAG, MPI_COMM_WORLD, &syn_status);
#ifdef TEST
                if(real_rank == print_node || print_all)
                {
                    printf("TEST (%d): Parallel Sync Recv(replica %d <-- lead %d)\n", 
                            real_rank, real_rank, get_lead(real_rank));
                }
#endif
            }
        } else {
#ifdef TEST
            if(real_rank == print_node || print_all)
            {
                printf("TEST (%d): Parallel Sync: No need for sync since partner is dead \n", real_rank);
            }
#endif
        }

        // Start to send real information
       if (is_alive(in_dest)) {
           //if partner is alive, send msg to partner
            _wrap_py_return_val = PMPI_Send(buf, count, datatype, in_dest, tag, MPI_COMM_WORLD);
#ifdef TEST
            if(real_rank == print_node || print_all)
            {
                printf("TEST (%d): Send %d --> %d (inside group) \n", 
                        real_rank, real_rank, in_dest);
            }
#endif
       }

       // If partner is dead, then needs to send to cross dest
       int partner_node = get_partner(real_rank);
       if(!is_alive(partner_node) && is_alive(cross_dest))
       {
            _wrap_py_return_val = PMPI_Send(buf, count, datatype, cross_dest, tag, MPI_COMM_WORLD);
#ifdef TEST
            if(real_rank == print_node || print_all)
            {
                printf("TEST (%d): Send %d --> %d (cross groups)\n", 
                        real_rank, real_rank, cross_dest);
            }
#endif
       }
    }
}
    return _wrap_py_return_val;
}

/* ================== C Wrappers for MPI_Recv ================== */
_EXTERN_C_ int PMPI_Recv(void *buf, int count, MPI_Datatype datatype, int source, int tag, MPI_Comm comm, MPI_Status *status);
_EXTERN_C_ int MPI_Recv(void *buf, int count, MPI_Datatype datatype, int source, int tag, MPI_Comm comm, MPI_Status *status) { 
    int _wrap_py_return_val = 0;
 {
     if(!is_alive(real_rank))
     {
#ifdef TEST
         if(print_node == real_rank || print_all)
         {
             printf("TEST (%d): node %d is dead, return from Recv\n", real_rank, real_rank);
         }
#endif
         return _wrap_py_return_val;
     }
     
     // get the destination inside the same group (either lead or replica)
     // e.g (0->1) and (2->3) for 4 nodes
     int in_source = get_partner_in_group(real_rank, source);

     // get the sourceination in the different group
     // e.g (0->3) and (2->1) for 4 nodes
     int cross_source = get_partner_in_counter_group(real_rank, source);

     if (mirror_protocal) { //mirror protocal
        //if ANY_SOURCE, if lead, receive one, receive one from duplica sender
        //if replica receiver alive, send the source id to it
        //if replica, <-- lead the sender's id
        //then <-- both senders if alive

        if (source == MPI_ANY_SOURCE) {  
            if (is_lead) {
                int src;
                //lead proceed receiving (could be from lead, could be from replica)
                if (status == MPI_STATUS_IGNORE)
                {
                    MPI_Status temp_status;
                    PMPI_Recv(buf, count, datatype, source, tag, MPI_COMM_WORLD, &temp_status); 
                    src = temp_status.MPI_SOURCE;
                }
                else
                {
                    _wrap_py_return_val = 
                        PMPI_Recv(buf, count, datatype, source, tag, MPI_COMM_WORLD, status); 
                    src = status->MPI_SOURCE;
                }

#ifdef TEST
                if(real_rank == print_node || print_all)
                {
                    printf("TEST (%d): Recv %d <-- ANY_SOURCE (%d)\n", real_rank, real_rank, src);    
                }
#endif
                //when replica is alive
                if (is_alive(get_replica(real_rank))) { 
                    //send this source to the replica
                    PMPI_Send(&src, 1, MPI_INT, get_replica(real_rank), 
                            ANY_SOURCE_TAG, MPI_COMM_WORLD);
#ifdef TEST
                    if(real_rank == print_node || print_all)
                    {
                        printf("TEST (%d): Send ANY_SOURCE ID (%d) to replica %d\n", 
                                real_rank, src, get_replica(real_rank));
                    }
#endif
                }
             
                // Receive the redudent copy of the message
                int redt_src = src < num_nodes ? get_replica(src) : get_lead(src); 
                if (is_alive(redt_src)) 
                {
                    PMPI_Recv(buf, count, datatype, redt_src, tag, MPI_COMM_WORLD, status);
#ifdef TEST
                    if(real_rank == print_node || print_all)
                    {
                        printf("TEST (%d): Recv redundant msg of ANY_SOURCE <-- %d\n", 
                                real_rank, redt_src);
                    }
#endif
                }
            } 
            else // replica
            {
                //if lead is alive, get Any_source info from lead
                if (is_alive(get_lead(real_rank))) {
                    int src = -1;
                    MPI_Status status_temp;
                    PMPI_Recv(&src, 1, MPI_INT, get_lead(real_rank), ANY_SOURCE_TAG, 
                            MPI_COMM_WORLD, &status_temp);
#ifdef TEST
                    if(real_rank == print_node || print_all)
                    {
                        printf("TEST (%d): Recv ANY_SOURCE ID (%d) from lead %d\n",
                                real_rank, src, get_lead(real_rank));
                    }
#endif
                    //src received
                    if (src > -1) 
                    {
                        //recv msg in the lead group
                        src = get_lead(src);
                        if (is_alive(src)) 
                        {
                            _wrap_py_return_val = PMPI_Recv(buf, count, datatype, src, 
                                    tag, MPI_COMM_WORLD, status);
#ifdef TEST
                            if(real_rank == print_node || print_all)
                            {
                                printf("TEST (%d): Recv %d <-- %d (cross group)\n", 
                                        real_rank, real_rank, src);
                            }
#endif
                        }
                        src = get_replica(src);
                        //recv msg in the replica group
                        if (is_alive(src)) {
                            _wrap_py_return_val = PMPI_Recv(buf, count, datatype, src, 
                                    tag, MPI_COMM_WORLD, status);
#ifdef TEST
                            if(real_rank == print_node || print_all)
                            {
                                printf("TEST (%d): Recv <-- %d (inside group) \n", 
                                        real_rank, src);
                            }
#endif
                        }
                    }                
                    else
                    {
                        printf("WARNING: node %d failed to recevie ANY_SOURCE ID from lead %d", 
                                    real_rank, get_lead(real_rank));
                        
                    }
                } 
                else 
                {
                    //if lead is dead, do all the receiving by itself
                    int src;
                    if (status == MPI_STATUS_IGNORE)
                    {
                        MPI_Status temp_status;
                        PMPI_Recv(buf, count, datatype, source, tag, MPI_COMM_WORLD, &temp_status); 
                        src = temp_status.MPI_SOURCE;
                    }
                    else
                    {
                        _wrap_py_return_val = 
                            PMPI_Recv(buf, count, datatype, source, tag, MPI_COMM_WORLD, status); 
                        src = status->MPI_SOURCE;
                    }
#ifdef TEST
                    if(real_rank == print_node || print_all)
                    {
                        printf("TEST (%d): Recv %d <-- (ANY_SOURCE) %d\n", real_rank, real_rank, src);
                    }
#endif

                    // Receive the redudent copy of the message
                    int redt_src = src < num_nodes ? get_replica(src) : get_lead(src); 
                    if (is_alive(redt_src)) 
                    {
                        PMPI_Recv(buf, count, datatype, redt_src, tag, MPI_COMM_WORLD, status);
#ifdef TEST
                        if(real_rank == print_node || print_all)
                        {
                            printf("TEST (%d): Recv redudent msg of ANY_SOURCE <-- %d\n", 
                                    real_rank, redt_src);
                        }
#endif
                    }
                } // lead is dead
            }  // replica
        }  // ANY_SOURCE
        else 
        {
            //if src in group is alive
            if (is_alive(in_source)){
                _wrap_py_return_val = PMPI_Recv(buf, count, datatype, in_source, tag, 
                                                MPI_COMM_WORLD, status);
#ifdef TEST
                if(real_rank == print_node || print_all)
                {
                    printf("TEST (%d): Recv %d <-- %d (inside group)\n", 
                            real_rank, real_rank, in_source);
                }
#endif
            }
            //if src in counter group is alive
            if (is_alive(cross_source)) {
                _wrap_py_return_val = PMPI_Recv(buf, count, datatype, cross_source, tag, 
                                                MPI_COMM_WORLD, status);
#ifdef TEST
                if(real_rank == print_node || print_all)
                {
                    printf("TEST (%d): Recv %d <-- %d (cross group) \n", 
                            real_rank, real_rank, cross_source);
                }
#endif
            }            
        }
   
    } else {//parallel protocal
       
        if (source == MPI_ANY_SOURCE) {
            //lead send src id to replica, then replica recv from that src
            if (is_lead) 
            {
                //lead proceed receiving (could be from lead, could be from replica)
                int src;
                //lead proceed receiving (could be from lead, could be from replica)
                if (status == MPI_STATUS_IGNORE)
                {
                    MPI_Status temp_status;
                    PMPI_Recv(buf, count, datatype, source, tag, MPI_COMM_WORLD, &temp_status); 
                    src = temp_status.MPI_SOURCE;
                }
                else
                {
                    _wrap_py_return_val = 
                        PMPI_Recv(buf, count, datatype, source, tag, MPI_COMM_WORLD, status); 
                    src = status->MPI_SOURCE;
                }

#ifdef TEST
                if(real_rank == print_node || print_all)
                {
                    printf("TEST (%d): Recv %d <-- ANY_SOURCE (%d)\n", real_rank, real_rank, src);    
                }
#endif
                //when replica is alive
                if (is_alive(get_replica(real_rank))) {
                    //send this source to the replica
                    PMPI_Send(&src, 1, MPI_INT, get_replica(real_rank), 
                            ANY_SOURCE_TAG, MPI_COMM_WORLD);
#ifdef TEST
                    if(real_rank == print_node || print_all)
                    {
                        printf("TEST (%d): Send ANY_SOURCE ID (%d) to replica %d\n", 
                                real_rank, src, get_replica(real_rank));
                    }
#endif
                }
            } 
            else // replica
            {
                //if lead is alive, get Any_source info from lead
                if (is_alive(get_lead(real_rank))) {
                    int src = -1;
                    MPI_Status status_temp;
                    PMPI_Recv(&src, 1, MPI_INT, get_lead(real_rank), ANY_SOURCE_TAG, 
                            MPI_COMM_WORLD, &status_temp);

#ifdef TEST
                    if(real_rank == print_node || print_all)
                    {
                        printf("TEST (%d): Recv ANY_SOURCE ID (%d) from lead %d\n",
                                real_rank, src, get_lead(real_rank));
                    }
#endif
                    //src received
                    if (src > -1) {
                        //recv msg the replica group
                        src = get_replica(src);
                        if (is_alive(src)) {
                            _wrap_py_return_val = PMPI_Recv(buf, count, datatype, src, 
                                    tag, MPI_COMM_WORLD, status);

#ifdef TEST
                            if(real_rank == print_node || print_all)
                            {
                                printf("TEST (%d): Recv %d <-- %d (inside group)\n", real_rank, real_rank, src);
                            }
#endif
                        } 
                        else // if src in replica is dead
                        {
                            src = get_lead(src);
                            _wrap_py_return_val = PMPI_Recv(buf, count, datatype, src, tag, 
                                    MPI_COMM_WORLD, status);
#ifdef TEST
                            if(real_rank == print_node || print_all)
                            {
                                printf("TEST (%d): Recv %d <-- %d (cross group)\n", real_rank, real_rank, src);
                            }
#endif
                        }
                    }                
                    else
                    {
                        printf("WARNING: node %d failed to recevie ANY_SOURCE ID from lead %d", 
                                    real_rank, get_lead(real_rank));
                        
                    }
                }  // lead is alive
                else 
                {
                    //when lead is dead, recv msg by itself
                    int any_src;
                    if (status == MPI_STATUS_IGNORE) 
                    {
                        MPI_Status temp_status;
                        _wrap_py_return_val = PMPI_Recv(buf, count, datatype, source, tag, MPI_COMM_WORLD, &temp_status);
                        any_src = temp_status.MPI_SOURCE;
                    } 
                    else 
                    { 
                        _wrap_py_return_val = PMPI_Recv(buf, count, datatype, source, tag, MPI_COMM_WORLD, status);
                        any_src = status->MPI_SOURCE;
                    }
#ifdef TEST
                    if(real_rank == print_node || print_all)
                    {
                        printf("TEST (%d): Recv <-- ANY_SOURCE (lead is dead) %d\n", 
                                real_rank, any_src);
                    }
#endif
                }
            }  // replica
        } // ANY_SOURCE
        else 
        {
            //if src in group is alive, recv msg from it
            //else recv msg from duplica
            if (is_alive(in_source)) {
                _wrap_py_return_val = PMPI_Recv(buf, count, datatype, in_source, tag, 
                                    MPI_COMM_WORLD, status);
#ifdef TEST
                if(real_rank == print_node || print_all)
                {
                    printf("TEST (%d): Recv %d <-- %d (inside group)\n", 
                            real_rank, real_rank, in_source);
                }
#endif
            } 
            else if (is_alive(cross_source))
            {
                _wrap_py_return_val = PMPI_Recv(buf, count, datatype, cross_source, tag, 
                                    MPI_COMM_WORLD, status);
#ifdef TEST
                if(real_rank == print_node || print_all)
                {
                    printf("TEST (%d): Recv %d <-- %d (cross group)\n", 
                            real_rank, real_rank, cross_source);
                }
#endif
            } else {
                printf("WARNING: Both node %d and partner node %d are dead \n", in_source, cross_source);
            }
        } 
    }
}
    return _wrap_py_return_val;
}

/* ================== C Wrappers for MPI_Init ================== */
_EXTERN_C_ int PMPI_Init(int *argc, char ***argv);
_EXTERN_C_ int MPI_Init(int *argc, char ***argv) { 
    int _wrap_py_return_val = 0;
    int i = 0;
 {
    _wrap_py_return_val = PMPI_Init(argc, argv);
    
    //initiallize nodes_status
    PMPI_Comm_size(MPI_COMM_WORLD, &real_num_nodes);
    num_nodes = real_num_nodes / 2;
    nodes_status = (int *) malloc(real_num_nodes * sizeof(int));
    for (i = 0; i < real_num_nodes; i++) {
        nodes_status[i] = 1; //originally all nodes are alive
    }

/*    //test: print ranks
    for (i = 0; i < num_nodes; i++) {
        printf("* lead rank %d is node %d \n", i, ranks_lead[i]);
    }
    for (i = 0; i < num_nodes; i++) {
        printf("* replica rank %d is node %d \n", i, ranks_replica[i]);
    }
*/

    //compute of lead and replica
    PMPI_Comm_rank(MPI_COMM_WORLD, &real_rank);
    if (real_rank >= num_nodes) {
        group_rank = real_rank - num_nodes;
        is_lead = 0; //current node is a replica
    } else {
        group_rank = real_rank;
        is_lead = 1;
    }
#ifdef TEST
    //printf("TEST: current node is %d, is_lead is %d, group_rank is %d \n", real_rank, is_lead, group_rank);
    if(real_rank == print_node && !print_all)
    {
        printf("TEST: logging on node %d\n\n", print_node);
    }
#endif
  
}
    return _wrap_py_return_val;
}

/* ================== C Wrappers for MPI_Finalize ================== */
_EXTERN_C_ int PMPI_Finalize();
_EXTERN_C_ int MPI_Finalize() { 
    int _wrap_py_return_val = 0;
 {
    free(nodes_status);

    _wrap_py_return_val = PMPI_Finalize();
 }
    return _wrap_py_return_val;
 }

/* ================== C Wrappers for MPI_Pcontrol ================== */
_EXTERN_C_ int PMPI_Pcontrol(const int level, ...);
_EXTERN_C_ int MPI_Pcontrol(const int level, ...) { 
    int _wrap_py_return_val = 0;
 {
    _wrap_py_return_val = PMPI_Pcontrol(level);
    if (level > -1 && level < real_num_nodes) {
        nodes_status[level] = 0; //set the node to be dead
        if (real_rank == level)
        {    
            MPI_Finalize();
            exit(0);
        }
    }
}
    return _wrap_py_return_val;
}

/* ================== C Wrappers for MPI_Barrier ================== */
_EXTERN_C_ int PMPI_Barrier(MPI_Comm comm);
_EXTERN_C_ int MPI_Barrier(MPI_Comm comm) { 
    int _wrap_py_return_val = 0;
    int i;
    int send_buf = 1, recv_buf = 0;
    MPI_Status status;
 {
    //convert barrier to send and reev: 
    //root send to everybody and other body receive
    if (group_rank == 0) {
        //send num_nodes times
        for (i = 1; i < num_nodes; i++) {
            _wrap_py_return_val = MPI_Send(&send_buf, 1, MPI_INT, i, 11, MPI_COMM_WORLD);
        }
    } else {
        _wrap_py_return_val = MPI_Recv(&recv_buf, 1, MPI_INT, 0, 11, MPI_COMM_WORLD, &status);
    }
}
    return _wrap_py_return_val;
}

/* ================== C Wrappers for MPI_Comm_rank ================== */
_EXTERN_C_ int PMPI_Comm_rank(MPI_Comm comm, int *rank);
_EXTERN_C_ int MPI_Comm_rank(MPI_Comm comm, int *rank) { 
    int _wrap_py_return_val = 0;
 {
    *rank = group_rank;
   // printf("* MPI_Comm_rank: real rank is %d, group_rank is %d \n", real_rank, group_rank);
}
    return _wrap_py_return_val;
}

/* ================== C Wrappers for MPI_Comm_size ================== */
_EXTERN_C_ int PMPI_Comm_size(MPI_Comm comm, int *size);
_EXTERN_C_ int MPI_Comm_size(MPI_Comm comm, int *size) { 
    int _wrap_py_return_val = 0;
 {
    *size = num_nodes;
   // printf("* MPI_Comm_size: size is %d\n", *size);
}
    return _wrap_py_return_val;
}


