#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <math.h>
#include <sys/time.h>
#include "mpi_profiler.h"

Graph* create_graph()
{
    Graph *graph;
    graph = (Graph *)malloc(sizeof(Graph)); 
    graph->vertex_head= NULL;
    graph->num_vertices = 0;

    return graph;
}

//add computation edges for simple vertices graphs
void add_computation_edges(Graph *subgraphs[], int num_nodes) {    
    int i = 0;
    for (i = 0; i < num_nodes; i++) {
        Vertex *v1 = subgraphs[i]->vertex_head;
        while (v1 != NULL) {
            Vertex *v2 = v1->next;
            if (v2 == NULL) {
                break;//end of the vertices list          
            }
            //add computation edge from v1 -> v2, insert into edge_ptr of v1 and edge of graph
            insert_computation_edge(v1, v2);
            //proceed to next vertex
            v1 = v2;
        }
    }
}

//add latency edge
void add_latency_edges(Graph *subgraphs[], int num_nodes) {
    int i = 0;    
    //traverse every graph, for every send, try to match the Recv or Irecv, if it is a Irecv, connect to the wait or waitall vertex
    for (i = 0; i < num_nodes; i++) {
        Vertex *v = subgraphs[i]->vertex_head;
        while (v != NULL) {
            if (v->mpi_id == MPI_Send_counter || v->mpi_id == MPI_Isend_counter) {
                //find the dest, then try to match the correct recv/wait
                int dest_rank = v->dest_rank;
                int found_match = 0;
                Vertex *w = subgraphs[dest_rank]->vertex_head;
                while (w != NULL) {
                    if (!w->is_matched && (w->mpi_id == MPI_Recv_counter 
                                || w->mpi_id == MPI_Irecv_counter)) {                                       
                         if (w->msg_type == v->msg_type && (w->src_rank == i || w->src_rank == MPI_ANY_SOURCE) 
                                && w->msg_size == v->msg_size) {
                             w->is_matched = 1;
                             found_match = 1; 
                             if (w->mpi_id == MPI_Recv_counter) {
                                 insert_latency_edge(v, w);//directly add latency edge
                             } else {//is Irecv, try to find the matching wait or waitall
                                int wait_vertex_id = w->wait_vertex_id;
                                if (wait_vertex_id != -1) {                                               
                                    //find the wait_vertex by wait_vertex_id
                                    Vertex *v_wait = w->next;
                                    while (v_wait != NULL) {
                                        if (v_wait->vertex_id == wait_vertex_id) {
                                            insert_latency_edge(v, v_wait);                                                                                  
                                            break; 
                                        }
                                        v_wait = v_wait->next;
                                    }
                                } else {
                                    //if no wait or waitall for Irecv, no latency edge will be added since there is no blocking
                                    printf("No wait matching for the %lld'th MPI_Irecv in rank %d \n", w->invoke_id, dest_rank);
                                }
                            }
                            break;
                         }
                    }
                    w = w->next;
                }
                //if cannot find one, print out error
                if (!found_match) {
                    printf("Cannot find a matchting recv for the %lld'th MPI_Send in rank %d \n", v->invoke_id, i);
                }
            }
            v = v->next;
        }
    }
}

void merge_collective_vertices(Graph *subgraphs[], int num_nodes) {
    int j = 0;
    //traverse all vertices in root, for each collective, find the matching ones from all other nodes, and replace them with the one from root
    Vertex *v = subgraphs[0]->vertex_head;
    Vertex *last_collective = NULL;//to updat dest_vertex of edges from last_collective
    while (v != NULL) {
        if (v->is_collective) {
            for (j = 1; j < num_nodes; j++) {
                Vertex *w = subgraphs[j]->vertex_head;
                Vertex *last_w = NULL;//maitnain last_w to update the edges destnated to a vertex that might be removed
                while (w != NULL) {
                    Vertex *new_w = w;
                    Vertex *next = w->next;
                    if (w->is_collective && w->mpi_id == v->mpi_id && !w->is_matched) {
                        if (v->invoke_id == w->invoke_id && v->msg_type == w->msg_type 
                                && v->msg_size == w->msg_size && w->src_rank == w->src_rank) {//matching seqid, type, msgsize, operation name, if all matches, replace w to be v
                            new_w = v;
                            //compute weight of vertex
                            double weight = get_collective_weight(v, num_nodes);                        
                            v->weight = weight;
                            v->rank = -1;
                            //update edges of last_w if they are destnated to w, change their dest_vertex to v
                            if (last_w != NULL) {
                                Edge *e = last_w->edge_ptr;
                                while (e != NULL) {
                                    if (e->dest_vertex == w) {
                                        e->dest_vertex = v;
                                        v->in_degree++;
                                    }
                                    e = e->edge_ptr;
                                }

                                //remove w from current subgraph
                                last_w->next = w->next;
                            } else {
                                subgraphs[j]->vertex_head = w->next;
                            }

                            //it is possible that the edges from last_collective are destinated to w, update them
                            if (last_collective != NULL) {
                                Edge *e = last_collective->edge_ptr;
                                while (e != NULL) {
                                    if (e->dest_vertex == w) {
                                        e->dest_vertex = v;
                                        v->in_degree++;
                                    }
                                    e = e->edge_ptr;
                                }
                            }
                            
                            //move edge list of w to v
                            Edge *edge_tail_v = v->edge_ptr;
                            if (edge_tail_v != NULL) {
                                while (edge_tail_v->edge_ptr != NULL) edge_tail_v = edge_tail_v->edge_ptr;
                                edge_tail_v->edge_ptr = w->edge_ptr;
                                //maintain in_degree and out_degree of v
                                v->out_degree = v->out_degree + w->out_degree;
                            }
                            free(w);         
                            subgraphs[j]->num_vertices--; 
                            last_w = NULL;
                            break;
                        }
                    }
                    last_w = new_w; 
                    w = next;
                }
            }
            last_collective = v;
        }
        v = v->next;
    }
}

/*  merge subgraphs to one big graph
     1. input is subgraphs with no edge information
     2. will add computation edges first
     3. then will add latency edges
     4. finally merge collective operations and results in one big graph
*/

Graph* merge_graph(Graph *subgraphs[], int num_nodes) {
    int i = 0, j = 0;
    //add computation edges
    add_computation_edges(subgraphs, num_nodes);
    //add latency edge
    add_latency_edges(subgraphs, num_nodes);
    
    //merge collective vertices one by one
    merge_collective_vertices(subgraphs, num_nodes);
 
    //put all subgraphs together and return
    Vertex *last_vertex = get_last_vertex(subgraphs[0]);
    for (i = 1; i < num_nodes; i++) {
         last_vertex->next = subgraphs[i]->vertex_head;
         subgraphs[0]->num_vertices += subgraphs[i]->num_vertices;
         last_vertex = get_last_vertex(subgraphs[0]);
    }

    return subgraphs[0];
}

/* \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ */

//use benchmark, compute the vertex weight for collective operations
//all numbers for the computation is from benchmark analysis
double get_collective_weight(Vertex *v, int num_nodes) {
    long long int actual_msg_size = get_actual_msg_size(v->msg_type, v->msg_size);
    double weight = 0;
    switch (v->mpi_id) {
        case MPI_Init_counter:
            weight = v->weight;
            break;
        case MPI_Finalize_counter:
            weight = v->weight;
            break;
        case MPI_Barrier_counter:
            /*if (num_nodes == 2) weight = 0.00006;
            else if (num_nodes == 3) weight = 0.00013;
            else if (num_nodes == 4) weight = 0.000131;
            */
            weight = 5.0e-7 + 3.55e-05 * num_nodes;
            break;
        case MPI_Allreduce_counter:
            /*if (num_nodes == 2) weight = 0.000000008086902 * actual_msg_size + 0.000072146365144;
            else if (num_nodes == 3) weight = 0.000000017113292 * actual_msg_size + 0.000151680838433;
            else if (num_nodes == 4) weight = 0.000000016539455 * actual_msg_size + 0.000150652604014;
            */
            weight = (-4.2281e-05) + 5.9036e-05 * num_nodes + 1.3913e-08 * actual_msg_size; 
            break;
        case MPI_Alltoall_counter:
            /*if (num_nodes == 2) weight =  0.000000004852119 * actual_msg_size + 0.000065573637519;
            else if (num_nodes == 3) weight = 0.00000000660291 * actual_msg_size + 0.000083878434984;
            else if (num_nodes == 4) weight = 0.000000008823758 * actual_msg_size + 0.000104982961701;            
            */
            weight = (-2.1883e-06) + 2.9e-05 * num_nodes + 6.7596e-09 * actual_msg_size;
            break;
        case MPI_Scatter_counter:
            /*if (num_nodes == 2) weight = 0.000000001701337 * actual_msg_size + 0.00001017914348;
            else if (num_nodes == 3) weight = 0.000000003388895 * actual_msg_size + 0.000037351351801;
            else if (num_nodes == 4) weight = 0.000000004296249 * actual_msg_size + 0.000029175587131;            
            */
            weight = (-2.1146e-05) + 1.5571e-05 * num_nodes + 3.1288e-09 * actual_msg_size;
            break;
        case MPI_Gather_counter:
            /*if (num_nodes == 2) weight = 0.000000001520216 * actual_msg_size + 0.000011741229798;
            else if (num_nodes == 3) weight = 0.000000003053244 * actual_msg_size + 0.000007993913704;
            else if (num_nodes == 4) weight = 0.000000006403692 * actual_msg_size + 0.000031310944791;            
            */
            weight = (-4.6627e-05) + 2.1214e-05 * num_nodes + 3.6591e-09 * actual_msg_size;
            break;
        case MPI_Reduce_counter:
            /*
            if (num_nodes == 2) weight = 0.000000009495385 * actual_msg_size + 0.00003055345898;
            else if (num_nodes == 3) weight = 0.000000014209487 * actual_msg_size + 0.00003255885176;
            else if (num_nodes == 4) weight = 0.000000017985091 * actual_msg_size + 0.000046314356889;            
            */
            weight = (-4.6774e-05) + 2.775e-05 * num_nodes + 1.3897e-08 * actual_msg_size;
            break;
        default:
            break;
    }   

    if (weight < 0) weight = -weight;
    return weight;
}

/* \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ */

void init_vertex
(
    Vertex *v, 
    long long unsigned vid, 
    int rank,
    double in_time,
    double out_time,
    double exec_time,
    MPI_ID id,
    long long unsigned invoke_id,
    int is_collective
)
{
    v->out_degree = 0;
    v->in_degree = 0;
    v->vertex_id = vid;
    v->visited = 0;
    v->crit_duration = 0.0;
    v->edge_ptr = NULL;
    v->next = NULL;
    v->prev_crit_vertex = NULL;
    v->on_crit_path = 0;
    // MPI information
    v->rank = rank;
    v->src_rank = -1;
    v->dest_rank = -1;
    v->invoke_id = invoke_id;
    v->weight = exec_time;
    v->in_time = in_time;
    v->out_time = out_time;
    v->mpi_id = id;
    v->wait_vertex_id = -1;
    v->msg_size = 0;
    // default msg_type to MPI_INT
    v->msg_type = MPI_INT;
    v->is_matched = 0;
    v->is_collective = is_collective; 
}

/* \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ */

int insert_vertex(Graph *graph, Vertex *vertex)
{
    Vertex *v = graph->vertex_head; 
    //find the tail of vertices list
    while (v != NULL) {
        if (v->next != NULL) {
            v = v->next;
        } else {
            break;
        }
    }

    // Link the new vertex to the vertex list
    if(v != NULL)
    { 
        v->next = vertex;
       //printf("insert_vertex: added vertex into the tail\n");
    }
    else
    {
        graph->vertex_head = vertex;
        //printf("insert_vertex: added vertex ad the vertex_head\n");
    }

    graph->num_vertices++;
    return SUCCESS;
}

/* \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ */

Vertex* get_last_vertex(const Graph* graph)
{
    Vertex *v = graph->vertex_head;
    while(v && v->next)
    {
        v = v->next;
    }

    return v;
}

/* \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ */

//compute edge weight for computation edge
int insert_computation_edge(Vertex *v1, Vertex *v2) {
    if (v1 != NULL && v2 != NULL) {
        double weight = v2->in_time - v1->out_time;
        insert_edge(v1, v2, weight, COMPUTATION_EDGE);
    }
    return 0;
}

/* \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ */
long long int get_actual_msg_size(MPI_Datatype type, long long int count) {
    int type_size = 0;
    //if (type != NULL) {
    PMPI_Type_size(type, &type_size);
    //}
    return type_size * count;
}


/* \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ */

//use different function from benchmarks to compute the weight of latency edge
int insert_latency_edge(Vertex *v1, Vertex *v2) {
    float weight = 0;
    long long int actual_size = get_actual_msg_size(v1->msg_type, v1->msg_size);
    double intercept = 5.9776e-05; 
    double slope = 3.2524e-09; 
    //float a = 0.00000000325241, b = 0.000059775933334;//the regression function from benchmark analysis
    if (v1 != NULL && v2 != NULL) {
        if (v1->mpi_id == MPI_Send_counter || v1->mpi_id == MPI_Isend_counter) {
            weight = actual_size * slope + intercept;
            insert_edge(v1, v2, weight, LATENCY_EDGE);
        }
    }
}

/* \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ */

int insert_edge
(
    Vertex *vertex1, 
    Vertex *vertex2, 
    double weight,
    EDGE_TYPE type 
)
{
    Edge *e,*e1;

    // Create a new edge
    e1 = (Edge *)malloc(sizeof(Edge));
    e1->dest_vertex = vertex2;
    e1->weight = weight;
    e1->edge_ptr = NULL;
    e1->on_crit_path = 0;
    e1->type = type;

    // Get the first edge of the vertex
    e = vertex1->edge_ptr;

    // If the vertex has some edges, link the new edge to the last edge
    if(e)
    {
        // Get the last edge of the vertex 
        while(e && e->edge_ptr)
        {
            e = e->edge_ptr;
        }
        e->edge_ptr = e1;

    }
    // If the vertex does not have any edges
    else
    {
        vertex1->edge_ptr = &(*e1); 
    }

    // Increase the indegree 
    vertex2->in_degree++;
    vertex1->out_degree++;
    return 0; 
}

/* \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ */

void print_graph(Graph *graph)
{
    Edge *e;
    Vertex *v = graph->vertex_head;
    int i = 0; 
    printf("_________________________start printing graph________________________\n");
    printf("number of vertices: %d\n", graph->num_vertices);

    while(v)
    {
        printf("%s_%d_%lld [w: %f] [in_time: %f, out_time: %f]\n", counters[v->mpi_id].name, v->rank, v->invoke_id, v->weight, v->in_time, v->out_time);
        e = v->edge_ptr;
        while(e)
        {
            printf("\t->%s_%d_%lld [%f] ", counters[e->dest_vertex->mpi_id].name, e->dest_vertex->rank, e->dest_vertex->invoke_id, e->weight);
            e = e->edge_ptr;
        }
        printf("\n");
        v = v->next;
    }
    printf("____________________________________________________________________\n");
}

/* \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ */

int generate_graph
(
    const Graph *graph, 
    const char* dotName, 
    const char* pdfName
)
{
    FILE *fp;
    char commandStr[512];
    int result;
    Vertex *v, *nv;
    Edge *e; 

    fp = fopen(dotName, "w");
    if(fp == NULL)
    {
        printf("Can not open %s file to save MPI task graph.\n", dotName);
        return -1;
    }

    fprintf(fp, "digraph MPI_TASK_GRAPH {\n");

    //for each vertex, if is_collective, insert, else subgraph ran_x, insert the edge
    v = graph->vertex_head;
    while(v)
    {
        int rank = v->rank;
        //print vertex name and label
        if (rank == -1) 
        {
            if (v->mpi_id == MPI_Finalize_counter) 
            {
                fprintf(fp, "\t%s_%lld [label=\"%s_%d_%lld\"", counters[v->mpi_id].name, v->invoke_id,
                        counters[v->mpi_id].name, v->rank, v->invoke_id); 
            } 
            else 
            {
                fprintf(fp, "\t%s_%lld [label=\"%s_%d_%lld\\n(%f)\"", 
                        counters[v->mpi_id].name, v->invoke_id, 
                        counters[v->mpi_id].name, v->rank, v->invoke_id, v->weight);
            }
        } 
        else 
        {
            fprintf(fp, "\t%s_%d_%lld [label=\"%s_%d_%lld\\n(%f)\"", 
                    counters[v->mpi_id].name, v->rank, v->invoke_id, 
                    counters[v->mpi_id].name, v->rank, v->invoke_id, v->weight);
        } 

        /*if(v->on_crit_path)
        {
            fprintf(fp, " color=red");
        }
        */

        fprintf(fp, "];\n");

        e = v->edge_ptr;
        while(e)
        {
            //if vertex is not -1, cluster it
            if (rank != -1) 
            {
                fprintf(fp, "\tsubgraph cluster_%d {\n", rank);
                fprintf(fp, "\t\tcolor=white;\n");        
                fprintf(fp, "\t\tlabel=\"Rank %d\";\n", rank);
                fprintf(fp, "\t\tfontcolor=blue\n");
                fprintf(fp, "\t\t%s_%d_%lld;\n", counters[v->mpi_id].name, v->rank, v->invoke_id);
                fprintf(fp, "\t}\n");
            }

            // add the labels for vertices and edges
            if (v->rank == -1) 
            {
                if (e->dest_vertex->rank == -1) 
                {   
                    //both v and dest is collective
                    fprintf(fp, "\t%s_%lld -> %s_%lld [label=\"%f",
                        counters[v->mpi_id].name, v->invoke_id,
                        counters[e->dest_vertex->mpi_id].name, e->dest_vertex->invoke_id,
                        e->weight);
                } 
                else 
                {   
                    //v is -1 but dest is not                    
                    fprintf(fp, "\t%s_%lld -> %s_%d_%lld [label=\"%f",
                        counters[v->mpi_id].name, v->invoke_id,
                        counters[e->dest_vertex->mpi_id].name, e->dest_vertex->rank, e->dest_vertex->invoke_id,
                        e->weight);
                }
            } 
            else 
            {
                if (e->dest_vertex->rank == -1) 
                {
                    //dest is collective
                    fprintf(fp, "\t%s_%d_%lld -> %s_%lld [label=\"%f",
                        counters[v->mpi_id].name, v->rank, v->invoke_id,
                        counters[e->dest_vertex->mpi_id].name, e->dest_vertex->invoke_id,
                        e->weight);
                } 
                else 
                {
                    fprintf(fp, "\t%s_%d_%lld -> %s_%d_%lld [label=\"%f",
                        counters[v->mpi_id].name, v->rank, v->invoke_id,
                        counters[e->dest_vertex->mpi_id].name, e->dest_vertex->rank, e->dest_vertex->invoke_id,
                        e->weight);
                }
            }   

            if(e->type == LATENCY_EDGE)
            {
                fprintf(fp, " (%llu)", get_actual_msg_size(v->msg_type, v->msg_size));
            }

            // finish this label
            fprintf(fp, "\"");

            // color attribute
            if(e->on_crit_path)
            {
                fprintf(fp, " color=red");
            }

            fprintf(fp, "];\n");

            
            e = e->edge_ptr;
        }
        v = v->next;
    }

	fprintf(fp, "}");
	fclose(fp);
    sprintf(commandStr, "dot -Tpdf %s -o %s", dotName, pdfName);
    result = system(commandStr); 
    sprintf(commandStr, "evince %s &", pdfName);
    result = system(commandStr);
    return result;
}

/* \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ */

int output_crit_path
(
    const Path* crit_path,
    const Graph* graph,
    const char* fname
)
{
    Vertex *v; 
    Edge *e; 
    FILE *fp;
    fp = fopen(fname, "w");
    //v = graph->vertex_head;
    Node *n = crit_path->head;
    Node *next_n;
    int w; 


    if(fp == NULL)
    {
        printf("Can not open %s to write critical path\n", fname);
        return -1;
    }

    while(n)
    {
        next_n = n->next;
        Vertex *v = n->vertex;
        e = v->edge_ptr;
        fprintf(fp, "%s %d\n", counters[v->mpi_id].name, v->rank);
        while(e)
        {
            if(next_n)
            {
                Vertex *nv = next_n->vertex;
                if(e->dest_vertex == nv && e->on_crit_path)
                {
                    if(e->type == LATENCY_EDGE)
                    {
                        fprintf(fp, "%llu\n", get_actual_msg_size(v->msg_type, v->msg_size));
                    }
                    else
                    {
                        // Otherwise output the computation time 
                        // round the weight to nearest integer
                        /*if((e->weight - floor(e->weight) >= 0.5))
                            w = ceil(e->weight);
                        else
                            w = floor(e->weight);
                            */

                        fprintf(fp, "%d\n", (int)round(e->weight));
                    }
                }
            }
            e = e->edge_ptr;
        }
        n = n->next;
    }
    /*while(v)
    {
        e = v->edge_ptr;
        while(e)
        {
            if(e->on_crit_path)
            {
                fprintf(fp, "%s %d\n", counters[v->mpi_id].name, v->rank);

                // Output the message size if it's latency edge
                if(e->type == LATENCY_EDGE)
                {
                    fprintf(fp, "%llu\n", get_actual_msg_size(v->msg_type, 
                                v->msg_size));
                }
                // Otherwise output the computation time 
                else
                {
                    fprintf(fp, "%d\n", (int)rintf(e->weight));
                    //fprintf(fp, "%f\n", e->weight);
                }
            }
            e = e->edge_ptr;
        }
        v = v->next;
    }
    */
	fclose(fp);

    return SUCCESS;
}

/* \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ */

void free_graph(Graph *graph)
{
    Edge *e;
    Vertex *v = graph->vertex_head;
    while(v)
    {
        e = v->edge_ptr;
        while(e)
        {
            free(e); 
            e = e->edge_ptr;
        }
        if(!v)
        {
            free(v);
        }
        v = v->next; 
    }
}

/* \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ */

int compute_critical_path
(
    const Path* topo_path, 
    Path* crit_path, 
    double *length
)
{
    Node *n = topo_path->head;
    while(n)
    {
        Vertex *vertex = n->vertex;
        Edge *edge = vertex->edge_ptr;
        while(edge)
        {
            // computation edge + weight on vertex
            double temp = vertex->crit_duration + edge->weight 
                          + vertex->weight;
            Vertex *nvertex = edge->dest_vertex;
            if(nvertex->crit_duration <= temp)
            {
                nvertex->crit_duration = temp;
                nvertex->prev_crit_vertex = vertex;
            }
            edge = edge->edge_ptr;
        }
        n = n->next;
    }

    // Find the max duration
    double maxVal = 0.0;
    n = topo_path->head; 
    Vertex *max_vertex = NULL;
    while(n)
    {
        if(maxVal <= n->vertex->crit_duration)
        {
            maxVal = n->vertex->crit_duration;
            max_vertex = n->vertex;
        }
        n = n->next;
    }
    *length = maxVal;

    if(max_vertex==NULL)
    {
        printf("Error in finding the critical path\n");
        return ERROR;
    }

    insert(crit_path, max_vertex);

    // traverse the precurssors to get the critical path
    max_vertex->on_crit_path = 1;
    Vertex *prev_v = max_vertex->prev_crit_vertex;
    while(prev_v)
    {
        insert(crit_path, prev_v);
        prev_v->on_crit_path = 1;
        prev_v = prev_v->prev_crit_vertex;
    }

    // Traverse the graph again to mark the edges on critical path
    n = topo_path->head;
    double crit_weight; 
    Vertex * v;
    Edge * e; 
    Edge * crit_e;
    //print_path(topo_path);
    while(n)
    {
        v = n->vertex;
        e = v->edge_ptr;
        crit_e = e;
        crit_weight = -1.0;
        while(e)
        {
            if(v->on_crit_path && e->dest_vertex->on_crit_path)
            {
                if(e->weight > crit_weight && e->dest_vertex->prev_crit_vertex == v)
                {
                    crit_weight = e->weight; 
                    // unmark previous critical edge
                    //printf("edge from %s to %s in no longer on critical path\n", 
                    //        counters[v->mpi_id].name,
                    //        counters[crit_e->dest_vertex->mpi_id].name);
                    crit_e->on_crit_path = 0;
                    e->on_crit_path = 1;
                    //printf("edge from %s to %s in on critical path\n", 
                    //        counters[v->mpi_id].name,
                    //        counters[e->dest_vertex->mpi_id].name);
                    crit_e = e;
                }
            }
            e = e->edge_ptr;
        }
        n = n->next;
    }

    return SUCCESS;
}

/* \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ */

void topological_sort
(
    const Graph *graph, 
    Path *path
)
{
    Vertex *v = graph->vertex_head;

    path->head = NULL;
    
    while(v)
    {
        if(v->in_degree == 0)
        {
            visit(v, path);
        }
        v = v->next;
    }
}

/* \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ */

void visit(Vertex *vertex, Path *path)
{
    if(!vertex->visited)
    {
        vertex->visited = 1;
        Edge* e = vertex->edge_ptr;
        while(e)
        {
            Vertex * nextV = e->dest_vertex;
            visit(nextV, path);
            e = e->edge_ptr;
        }
        insert(path, vertex);
    }
}

/* \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ */

void insert(Path *path, Vertex *vertex)
{
    Node * vhead = path->head;
    Node * newNode = (Node *)malloc(sizeof(Node));
    newNode->vertex = vertex;
    path->head = newNode;
    newNode->next = vhead;
}

/* \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ */

void append(Path *path, Vertex* vertex)
{
    Node * newNode = (Node *)malloc(sizeof(Node));
    newNode->vertex = vertex;
    if(path->head == NULL)
    {
        path->head = newNode;
    }
    else
    {
        path->head->next = newNode;
    }
}

/* \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ */

void print_path(const Path *path)
{
    printf("------------------------------------------------------------\n");
    printf("                           Path                             \n");
    Node *n = path->head;
    while(n)
    {
        Vertex *v = n->vertex;
        printf("%s_%d_%lld [w: %f] [in_time: %f, out_time: %f]\n", 
                counters[v->mpi_id].name, 
                v->rank, v->invoke_id, 
                v->weight, 
                v->in_time, v->out_time);
        n = n->next;
    }
    printf("\n");
    printf("------------------------------------------------------------\n");
}

/* \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ */

void Pre_MPI(void)
{
    //if(initialized)
    //{
        in_time = PMPI_Wtime();
    //}
}

/* \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ */

int Post_MPI(MPI_ID id)
{
    int rank, result;
    int is_collective; 

    //if(initialized)
    //{
        // Get execution time 
        out_time = PMPI_Wtime();
        assert(out_time >= in_time);

        // get current process id
        PMPI_Comm_rank(MPI_COMM_WORLD, &rank);	

        double exec_time = out_time - in_time;

        // Insert the vertex 
        Vertex *v = (Vertex *)malloc(sizeof(Vertex));
        if(!v)
        {
            printf("Error for creating new vertex. \n");
            return -1;
        }
        if(id == MPI_Barrier_counter ||
           id == MPI_Alltoall_counter ||
           id == MPI_Scatter_counter ||
           id == MPI_Gather_counter || 
           id == MPI_Reduce_counter ||
           id == MPI_Allreduce_counter || 
           id == MPI_Init_counter ||
           id == MPI_Finalize_counter)
        {
            is_collective = 1;
        }
        else
        {
            is_collective = 0;
        }

        init_vertex(v, vid++, rank, in_time, out_time, exec_time, 
                    id, counters[id].value, is_collective);

        insert_vertex(per_rank_graph, v);

        // Update the operation counter info
        counters[id].value++;
    //}
}


/* \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ */
void output_mpi_statistics
(
    const char* fname, 
    Graph* graph, 
    int num_graphs
)
{
    int i, j;
    double median, mean, min, max;
    FILE *fp;
    Vertex *v;

    //printf("TEST: create time array\n");
    // Get the time information
    Array** time_array = (Array **)malloc(sizeof(Array*)*MPI_Total_counter);
    for(i = 0; i < MPI_Total_counter; i++)
    {
        time_array[i] = create_array();
    }

    //printf("TEST: created time array\n");

    //for(i = 0; i < num_graphs; i++)
    //{
    v = graph->vertex_head;
    while(v)
    {
        //printf("Record time for vertex %lli %s\n", v->vertex_id,
        //        counters[v->mpi_id].name, v->weight);
        push_back(time_array[v->mpi_id], v->weight);
        v = v->next;
    }

    fp = fopen(fname, "w");
    // Generate headers 
    fprintf(fp, "Function\tInvocations\tMean\tMin\tMedian\tMax\n");

    // Write the data
    for(i = 0; i < MPI_Total_counter - 1; i++)
    {
        compute_statistics(time_array[i], &median,
                           &mean, &min, &max);
        /*for(j = 0; j < time_array[i]->num_elements; j++)
        {
            printf(" %s-%d-%d: %f \n", counters[i].name, i, j, time_array[i]->elements[j]);
        }
        printf("%s: (median)%f (mean)%f (min)%f (max)%f\n\n", 
                counters[i].name, median, mean, min, max);
       
        */
        if(time_array[i]->num_elements > 0)
        {
            fprintf(fp, "%s\t%d\t%f\t%f\t%f\t%f\n", counters[i].name, 
                    time_array[i]->num_elements, mean, min, median, max);
        }
    }

    // free array
    for(i = 0; i < MPI_Total_counter; i++)
    {
        free_array(time_array[i]);
    }
    free(time_array);

}


/******************************************************************************/
//                              MPI Operations                                //
/******************************************************************************/

_EXTERN_C_ int MPI_Init(int *argc, char ***argv) { 
    int _wrap_py_return_val = 0;
    int rank;
    Vertex *v;
    int is_collective = 1;
    double exec_time;
    struct timeval timer;
	gettimeofday(&timer, NULL);
    in_time = timer.tv_sec + (timer.tv_usec/1000000.0); 

    //in_time = PMPI_Wtime();
    _wrap_py_return_val = PMPI_Init(argc, argv);
    out_time = PMPI_Wtime();

    PMPI_Comm_rank(MPI_COMM_WORLD, &rank);	

    initialized = 1; 
    exec_time = out_time - in_time;

    // Create the graph
    per_rank_graph = create_graph();
    v = (Vertex *)malloc(sizeof(Vertex));
    if(!v)
    {
        printf("Error for creating new vertex. \n");
        return ERROR;
    }

    init_vertex(v, vid++, rank, in_time, out_time, exec_time, 
                MPI_Init_counter, counters[MPI_Init_counter].value, 
                is_collective);

    insert_vertex(per_rank_graph, v);

    // Update counter
    counters[MPI_Init_counter].value++;
    
    return _wrap_py_return_val;
}

/* \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ */

_EXTERN_C_ int MPI_Finalize() { 

    int _wrap_py_return_val = 0;
    int num_procs;
    int rank;
    Vertex *v; 
    double exec_time;
    int is_collective = 1;
    int graph_tag = 44;
    int i, j;
    MPI_Status  status;
    int vertex_size = sizeof(Vertex);

    // Special case, assume MPI_Finalize has executation time 0.0 
    in_time = PMPI_Wtime();
    out_time = in_time;
    exec_time = 0.0;

    // Get number of processors
    PMPI_Comm_size(MPI_COMM_WORLD, &num_procs);	
    // Get the rank 
    PMPI_Comm_rank(MPI_COMM_WORLD, &rank);	

    v = (Vertex *)malloc(sizeof(Vertex));
    if(!v)
    {
        printf("Error for creating new vertex. \n");
        return ERROR;
    }

    init_vertex(v, vid++, rank, in_time, out_time, exec_time, 
                MPI_Finalize_counter, counters[MPI_Finalize_counter].value, 
                is_collective);

    insert_vertex(per_rank_graph, v);

    // Update counter info
    counters[MPI_Finalize_counter].value++;

    /////////////////////////////////////////////////////////////////
    ////          Send per_rank_graph to root                    ////
    /////////////////////////////////////////////////////////////////

    if(rank == ROOT)
    {
        // Allocate memory for receiving graph information from other nodes
        num_vertices = (int *)malloc(sizeof(int)*num_procs);
        for(i = 1; i < num_procs; i++)
        {
            num_vertices[i] = 0;
        }
        num_vertices[0] = per_rank_graph->num_vertices;
    }

    if(rank != ROOT)
    {
        PMPI_Send(&per_rank_graph->num_vertices, 1, MPI_INT, ROOT, 
                VERTICES_TAG, MPI_COMM_WORLD);
    }

    else 
    {
        for(i = 1; i < num_procs; i++)
        {
            PMPI_Recv(&num_vertices[i], 1, MPI_INT, i, 
                    VERTICES_TAG, MPI_COMM_WORLD, &status);

        }
    }

    // Allocate memory on Root
    if(rank == ROOT)
    {
        root_graph = (Graph **)malloc(sizeof(Graph *) * num_procs);
        for(i = 1; i < num_procs; i++)
        {
            root_graph[i] = create_graph();
        }
        root_graph[0] = per_rank_graph;
    }

    //mark
    if (rank == 0) 
    {
        for (j = 1; j< num_procs; j++) 
        {
            for (i = 0; i < num_vertices[j]; i++) 
            {
                Vertex *new_vertex = (Vertex *)malloc(sizeof(Vertex));
                PMPI_Recv(new_vertex, sizeof(Vertex), MPI_BYTE, j, 
                            GRAPH_TAG, MPI_COMM_WORLD, &status);
                new_vertex->next = NULL;
                new_vertex->edge_ptr = NULL;
                new_vertex->prev_crit_vertex = NULL;
                insert_vertex(root_graph[j], new_vertex);
            }
        }
    } else {
        v = per_rank_graph->vertex_head;
        int count = 0;
        while(v)
        {
            PMPI_Send(&(*v), vertex_size, MPI_BYTE, ROOT, GRAPH_TAG,
                                                  MPI_COMM_WORLD);
            count++;
            v = v->next;
        }
    }

    if(rank == ROOT)
    {
       /* for(i = 0; i < num_procs; i++)
        {
            printf("Graph on node %d is \n", i);
            print_graph(root_graph[i]);
        }*/

        merge_graph(root_graph, num_procs);

        printf("After merging graph\n");
        printf("_______________________________________________________________\n");
        print_graph(root_graph[0]);

        // Output mpi operation statistics into stat_fname 
        output_mpi_statistics(stat_fname, root_graph[0], num_procs);

        // Sort the graph in topological order
        Path * topo_path = (Path *)malloc(sizeof(Path));
        topo_path->head = NULL;
        topological_sort(root_graph[0], topo_path);

        double pathLength = 0.0;
        Path * crit_path = (Path *) malloc(sizeof(Path));
        crit_path->head = NULL;
        compute_critical_path(topo_path, crit_path, &pathLength);
        //compute_critical_path(topo_path, &pathLength);
        //printf("_______________________________________________________________\n");
        //printf("        print critical path                                    \n");
        //print_path(crit_path);

        generate_graph(root_graph[0], dot_fname, pdf_fname);
        // Output critical path to ctrical_fname
        output_crit_path(crit_path, root_graph[0], critical_fname);

        // Free stuff
        free(topo_path);
        free(crit_path);

        free_graph(root_graph[0]);
        for(i = 1; i < num_procs; i++)
        {
            free(root_graph[i]);
        }
        
        free(root_graph);
    }
    else
    {
        // Free graph
        free_graph(per_rank_graph);
    }

    _wrap_py_return_val = PMPI_Finalize();

    return _wrap_py_return_val;
}

/* \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ */

_EXTERN_C_ int MPI_Barrier(MPI_Comm comm)
{
    int _wrap_py_return_val = 0;
 
    Pre_MPI();
    _wrap_py_return_val = PMPI_Barrier(comm);
    Post_MPI(MPI_Barrier_counter);
    
    return _wrap_py_return_val;
}

/* \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ */

_EXTERN_C_ int MPI_Alltoall(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm) 
{ 
    int _wrap_py_return_val = 0;
 
    Pre_MPI();
    _wrap_py_return_val = PMPI_Alltoall(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm);
    Post_MPI(MPI_Alltoall_counter);
    
    Vertex *v = get_last_vertex(per_rank_graph); 
    // update message size 
    v->msg_size = sendcount;
    v->msg_type = sendtype;

    return _wrap_py_return_val;
}

/* \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ */

_EXTERN_C_ int MPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm)
{ 
    int _wrap_py_return_val = 0;
 
    Pre_MPI();
    _wrap_py_return_val = PMPI_Scatter(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm);
    Post_MPI(MPI_Scatter_counter);
    
    Vertex *v = get_last_vertex(per_rank_graph); 
    // update message size 
    v->msg_size = sendcount;
    v->src_rank = root;
    v->dest_rank = root;
    v->msg_type = sendtype;

    return _wrap_py_return_val;
}

/* \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ */

_EXTERN_C_ int MPI_Gather(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm) 
{ 
    int _wrap_py_return_val = 0;
 
    Pre_MPI();
    _wrap_py_return_val = PMPI_Gather(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm);
    Post_MPI(MPI_Gather_counter);

    Vertex *v = get_last_vertex(per_rank_graph); 
    // update message size 
    v->msg_size = sendcount;
    v->src_rank = root;
    v->dest_rank = root;
    v->msg_type = sendtype;

    return _wrap_py_return_val;
}

/* \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ */

_EXTERN_C_ int MPI_Reduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm)
{ 
    int _wrap_py_return_val = 0;
 
    Pre_MPI();
    _wrap_py_return_val = PMPI_Reduce(sendbuf, recvbuf, count, datatype, op, root, comm);
    Post_MPI(MPI_Reduce_counter);

    Vertex *v = get_last_vertex(per_rank_graph); 
    // update message size 
    v->msg_size = count;
    v->src_rank = root;
    v->dest_rank = root;
    v->msg_type = datatype;

    return _wrap_py_return_val;
}

/* \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ */

_EXTERN_C_ int MPI_Allreduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
{ 
    int _wrap_py_return_val = 0;
 
    Pre_MPI();
    _wrap_py_return_val = PMPI_Allreduce(sendbuf, recvbuf, count, datatype, op, comm);
    Post_MPI(MPI_Allreduce_counter);

    Vertex *v = get_last_vertex(per_rank_graph); 
    // update message size 
    v->msg_size = count;
    v->msg_type = datatype;    

    return _wrap_py_return_val;
}

/* \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ */

_EXTERN_C_ int MPI_Send(void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm)
{ 
    int _wrap_py_return_val = 0;
 
    Pre_MPI();
    _wrap_py_return_val = PMPI_Send(buf, count, datatype, dest, tag, comm);
    Post_MPI(MPI_Send_counter);

    Vertex *v = get_last_vertex(per_rank_graph); 
    // update message size 
    v->msg_size = count;
    v->dest_rank = dest;
    v->msg_type = datatype;
    
    return _wrap_py_return_val;
}

/* \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ */

_EXTERN_C_ int MPI_Isend(void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request *request)
{ 
    int _wrap_py_return_val = 0;
    MPI_Request_node * request_node;
 
    Pre_MPI();
    _wrap_py_return_val = PMPI_Isend(buf, count, datatype, dest, tag, comm, request);
    Post_MPI(MPI_Isend_counter);

    Vertex *v = get_last_vertex(per_rank_graph); 
    // update message size 
    v->msg_size = count;
    v->dest_rank = dest;
    v->msg_type = datatype;

    // Insert MPI_request;
    if(request_head == NULL)
    {
        request_head = (MPI_Request_node *)malloc(sizeof(MPI_Request_node));
        request_head->vertex = v;
        request_head->request = request;
        request_head->next = NULL;
    }
    else
    {
        MPI_Request_node *tail = request_head;
        while (tail->next) tail = tail->next;

        request_node = (MPI_Request_node *)malloc(sizeof(MPI_Request_node));
        request_node->vertex = v;
        request_node->request = request;
        request_node->next = NULL;
        request_node->is_matched = 0;
        
        tail->next = request_node;
    }
    
    return _wrap_py_return_val;
}

/* \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ */

_EXTERN_C_ int MPI_Recv(void *buf, int count, MPI_Datatype datatype, int source, int tag, MPI_Comm comm, MPI_Status *status)
{ 
    int _wrap_py_return_val = 0;
 
    Pre_MPI();
    _wrap_py_return_val = PMPI_Recv(buf, count, datatype, source, tag, comm, status);
    Post_MPI(MPI_Recv_counter);

    Vertex *v = get_last_vertex(per_rank_graph); 
    // update message size 
    v->msg_size = count;
    v->src_rank = source;
    v->msg_type = datatype;
    
    return _wrap_py_return_val;
}

/* \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ */

_EXTERN_C_ int MPI_Irecv(void *buf, int count, MPI_Datatype datatype, int source, int tag, MPI_Comm comm, MPI_Request *request) 
{ 
    int _wrap_py_return_val = 0;
    Vertex *v; 
    MPI_Request_node * request_node;
 
    Pre_MPI();
    _wrap_py_return_val = PMPI_Irecv(buf, count, datatype, source, tag, comm, request);
    Post_MPI(MPI_Irecv_counter);

    v = get_last_vertex(per_rank_graph);
    v->msg_size = count;
    v->src_rank = source;
    v->msg_type = datatype;
    
    // Insert MPI_request;
    if(request_head == NULL)
    {
        request_head = (MPI_Request_node *)malloc(sizeof(MPI_Request_node));
        request_head->vertex = v;
        request_head->request = request;
        request_head->next = NULL;
    }
    else
    {
        //get tail of request 
        MPI_Request_node *tail = request_head;
        while (tail->next) tail = tail->next;

        request_node = (MPI_Request_node *)malloc(sizeof(MPI_Request_node));
        request_node->vertex = v;
        request_node->request = request;
        request_node->next = NULL;
        request_node->is_matched = 0;
        
        tail->next = request_node;
    }
    return _wrap_py_return_val;
}

/* \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ */

_EXTERN_C_ int MPI_Wait(MPI_Request *request, MPI_Status *status) 
{ 
    int _wrap_py_return_val = 0;
 
    Pre_MPI();
    _wrap_py_return_val = PMPI_Wait(request, status);
    Post_MPI(MPI_Wait_counter);

    Vertex *request_v; 
    Vertex *wait_vertex = get_last_vertex(per_rank_graph);
    MPI_Request_node *request_node = request_head;

    // Find the corresponding request 
    while(request_node)
    {
        if(!request_node->is_matched && request_node->request == request)
        {
            request_v = request_node->vertex;
            request_v->wait_vertex_id = wait_vertex->vertex_id;
            request_node->is_matched = 1;
            printf("----wait is matched to operation %s_%d_%lld\n", counters[request_v->mpi_id].name,
                    request_v->rank, request_v->invoke_id);
        }
        request_node = request_node->next;
    }
    
    return _wrap_py_return_val;
}

/* \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ */

_EXTERN_C_ int MPI_Waitall(int count, MPI_Request *array_of_requests, MPI_Status *array_of_statuses) 
{ 
    int _wrap_py_return_val = 0;
    int i; 
    for (i = 0; i < count; i++) {
        _wrap_py_return_val = MPI_Wait(&array_of_requests[i], &array_of_statuses[i]);
    }

    /*

    Pre_MPI();
    _wrap_py_return_val = PMPI_Waitall(count, array_of_requests, array_of_statuses);
    Post_MPI(MPI_Waitall_counter);
    Vertex *request_v; 
    Vertex *wait_vertex = get_last_vertex(per_rank_graph);
    MPI_Request_node *request_node = request_head;
    MPI_Request *wait_request; 
    int i;

    printf("---%d requests are being waited\n", count);
    for(i = 0; i < count; i++) //count is how many request is being waited
    {
        printf("------process the %d th request\n", i);
        wait_request = &array_of_requests[i];
        // Find the corresponding request 
        int c = 0;
        while(request_node)
        {
            printf("------process the %d th request_node in all requests, is_matched is %d\n", c++, request_node->is_matched);
            if(!request_node->is_matched && request_node->request == wait_request)
            {
                request_v = request_node->vertex;
                request_v->wait_vertex_id = wait_vertex->vertex_id;
                request_node->is_matched = 1;
                printf("----waitall is matched to operation %s_%d_%lld and wait_vertex_id is %lld\n", counters[request_v->mpi_id].name,
                        request_v->rank, request_v->invoke_id, request_v->wait_vertex_id);
                break;
            }
            request_node = request_node->next;
        }
    }
    */
    return _wrap_py_return_val;
}

