#include <math.h>
#include <assert.h>
#include "matrix.h"
#include <mpi.h>



typedef struct {
    int processor_count; //total number of processors
    MPI_Comm comm; //Communicator for entire world
    MPI_Comm row_comm; //Communicator for my row
    MPI_Comm col_comm; //Communicator for my col
    int length; //order of grid q^2 == p
    int my_row;
    int my_col;
    int my_rank;
} GRID_INFO_T;

void setup_grid(GRID_INFO_T* grid)
{
    int old_rank;
    int dimensions[2];
    int wrap_around[2];
    int coordinates[2];
    int free_coords[2];

    MPI_Comm_size(MPI_COMM_WORLD, &(grid->processor_count));
    MPI_Comm_rank(MPI_COMM_WORLD, &old_rank);

    grid->length = (int)sqrt((double)grid->processor_count);
    assert(grid->length*grid->length == grid->processor_count);

    dimensions[0] = dimensions[1] = grid->length;
    wrap_around[0] = wrap_around[1] =1;
    MPI_Cart_create(MPI_COMM_WORLD,2,dimensions,wrap_around,1,&(grid->comm));
    
    MPI_Comm_rank(grid->comm,&(grid->my_rank));
    MPI_Cart_coords(grid->comm, grid->my_rank, 2, coordinates);
    grid->my_row = coordinates[0];
    grid->my_col = coordinates[1];

    //set up row communicators
    free_coords[0] = 0;
    free_coords[1] = 1;
    MPI_Cart_sub(grid->comm, free_coords, &(grid->row_comm));
    //set up col communicators
    free_coords[0] = 1;
    free_coords[1] = 0;
    MPI_Cart_sub(grid->comm, free_coords, &(grid->col_comm));
}


void Fox(int n, GRID_INFO_T* grid, Matrix_T* local_A, Matrix_T* local_B, Matrix_T* local_C, Matrix_T* global_A,Matrix_T* global_B,Matrix_T* global_C)
{
    Matrix_T temp_A;
    int stage;
    int bcast_root; //for broadcast
    int n_bar;
    int source; //for sendrevc_replace
    int dest;   //for sendrevc_replace
    MPI_Status status;
    int root = 0;
    int flag;

    MPI_Initialized(&flag);
    if(!flag){
        MPI_Abort(grid->comm,1);
    }

    matrix_allocate(&temp_A,local_A->row,local_A->col);
    n_bar = n/grid->length;
    matrix_set_to_zero(local_C);

    //calculate source and dest for circular shift of B
    source = (grid->my_row+1) % grid->length;
    dest = (grid->my_row + grid->length -1) % grid->length;

    for(stage=0; stage < grid->length; stage++){
        bcast_root = (grid->my_row+stage) % grid->length;
        if(bcast_root == grid->my_col){
            MPI_Bcast(&(local_A->row),1,MPI_INT,bcast_root,grid->row_comm);
            MPI_Bcast(&(local_A->col),1,MPI_INT,bcast_root,grid->row_comm);
            MPI_Bcast(local_A->matrix,local_A->col*local_A->row,MPI_DOUBLE,bcast_root,grid->row_comm);
            matrix_multiply(local_A,local_B,local_C);
        }
        else{
            MPI_Bcast(&(temp_A.row),1,MPI_INT,bcast_root,grid->row_comm);
            MPI_Bcast(&(temp_A.col),1,MPI_INT,bcast_root,grid->row_comm);
            MPI_Bcast(temp_A.matrix,temp_A.col*temp_A.row,MPI_DOUBLE,bcast_root,grid->row_comm);
            matrix_multiply(&temp_A,local_B,local_C);
        }
        MPI_Sendrecv_replace(&(local_B->row),1,MPI_INT,dest,0,source,0,grid->col_comm,&status);
        MPI_Sendrecv_replace(&(local_B->col),1,MPI_INT,dest,1,source,1,grid->col_comm,&status);
        MPI_Sendrecv_replace(local_B->matrix,local_B->col*local_B->row,MPI_DOUBLE,dest,2,source,2,grid->col_comm,&status);
    }
    matrix_allocate(global_A,local_A->row*grid->length,local_A->col*grid->length);
    matrix_allocate(global_B,local_B->row*grid->length,local_B->col*grid->length);
    matrix_allocate(global_C,local_C->row*grid->length,local_C->col*grid->length);
    MPI_Gather(local_A->matrix,local_A->row*local_A->col,MPI_DOUBLE,global_A->matrix,local_A->row*local_A->col,MPI_DOUBLE,root,grid->comm);
    MPI_Gather(local_B->matrix,local_B->row*local_B->col,MPI_DOUBLE,global_B->matrix,local_B->row*local_B->col,MPI_DOUBLE,root,grid->comm);
    MPI_Gather(local_C->matrix,local_C->row*local_C->col,MPI_DOUBLE,global_C->matrix,local_C->row*local_C->col,MPI_DOUBLE,root,grid->comm);

    if(grid->my_rank==0){
        matrix_rearrange(global_A, local_A->row, local_A->col);
        matrix_print(global_A);
        printf("*\n");
        matrix_rearrange(global_B, local_B->row, local_B->col);
        matrix_print(global_B);
        printf("=\n");
        matrix_rearrange(global_C, local_C->row, local_C->col);
        matrix_print(global_C);
    }
}

int main()
{
    GRID_INFO_T grid;
    Matrix_T local_A ,local_B,local_C;
    Matrix_T global_A,global_B,global_C;
    int version,subversion;
    char processor_name[MPI_MAX_PROCESSOR_NAME];
    int processor_name_length;
    double start,finish,elapsed,seconds_per_tick;
    MPI_Init(0,0);

    setup_grid(&grid);
    version = MPI_VERSION;
    subversion = MPI_SUBVERSION;
    if(grid.my_rank == 0){
        //print version
        printf("MPI : %d.%d\n",version, subversion);
        MPI_Get_version(&version,&subversion);
        printf("MPI : %d.%d\n",version, subversion);
        //print Processor name
        MPI_Get_processor_name(processor_name,&processor_name_length);
        printf("Processor name: %s\n",processor_name);
        //get time
        start = MPI_Wtime();
        //get tick
    }

    matrix_allocate(&local_A,2,2);
    matrix_allocate(&local_B,2,2);
    matrix_allocate(&local_C,2,2);
    
    matrix_set(&local_A,grid.my_row*2*grid.length+grid.my_col);
    matrix_set_to_one(&local_B);

    Fox(2*grid.length,&grid,&local_A,&local_B,&local_C,&global_A,&global_B,&global_C);

    if(grid.my_rank == 0){
    //    matrix_print(&local_A);
    //    matrix_print(&local_B);
    //    matrix_print(&local_C);
        finish = MPI_Wtime();
        elapsed = finish - start;
        printf("run in %.10f seconds\n",elapsed);
        seconds_per_tick = MPI_Wtick();
        printf("resolution is %.20f seconds per tick\t",seconds_per_tick);
        printf("resolution is %.20f ticks per seconds",1.0/seconds_per_tick);
    }


    MPI_Finalize();
    return 0;
}