#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "mpi.h"

#define STOP                100
#define SLAVEX              200
#define NEWX                300
#define TRUE	            1
#define FALSE	            0

int main (int argc, char ** argv)
{
    /* Declaração de variáveis */
    
    int i, j, iterations;
    int J_ORDER, J_ROW_TEST, J_ITE_MAX;
    int mpiinit, nproc, myrank;
    int row, last_row, loop_control, index;
    float J_ERROR, J_AUX, MBR;
    float ** MA;
    float * MB;
    float * MX;
    float * MRT;
    float * MX_Master; 
    float * MX_Slave;
    float max_diff, max_x;
    MPI_Status status;
    MPI_Request request;
    FILE * f;
    
    if (argc < 2)
    {
        printf("Uso: %s nomedoarquivo.txt\n", argv[0]);
        return -1;
    }

    /* Abertura de arquivos */
    f = fopen(argv[1], "r");
    if (!f) return -1;

    /* Leitura dos arquivos */
    fscanf (f, "%d", &J_ORDER);
    fscanf (f, "%d", &J_ROW_TEST);
    fscanf (f, "%f", &J_ERROR);
    fscanf (f, "%d", &J_ITE_MAX);

    /* Alocação dinâmica e leitura das matrizes */
    MA = (float **) malloc( J_ORDER * sizeof(float *) );
    MX = (float *) calloc(J_ORDER, J_ORDER * sizeof(float) );
    MB = (float *) malloc( J_ORDER * sizeof(float) );
    MRT = (float *) malloc( J_ORDER * sizeof(float) );
    MRT = (float *) malloc( J_ORDER * sizeof(float) );
    MX_Master = (float *) malloc( J_ORDER * sizeof(float) );
    MX_Slave = (float *) malloc( J_ORDER * sizeof(float) );

    for (i = 0; i < J_ORDER; i++)
        MA[i] = (float *) malloc(J_ORDER * sizeof(float));

    for (i = 0; i < J_ORDER; i++)    
        for (j = 0; j < J_ORDER; j++)
            fscanf(f, "%f", &MA[i][j]);
    
    for (i = 0; i < J_ORDER; i++)
        fscanf(f, "%f", &MB[i]);
    
    #pragma omp parallel for
    for (i = 0; i< J_ORDER; i++) {
        MX[i] = MB[i]/MA[i][i];
        MRT[i] = MA[J_ROW_TEST][i];
    }
    MBR = MB[J_ROW_TEST];   
    
    /* Inicialização MPI */
    mpiinit = MPI_Init (&argc, &argv);
    if(mpiinit != MPI_SUCCESS) {
        printf("\n Failed to initialize MPI \n");
        MPI_Abort(MPI_COMM_WORLD, mpiinit);
    }
    MPI_Comm_size(MPI_COMM_WORLD, &nproc);
    MPI_Comm_rank(MPI_COMM_WORLD, &myrank); 

    /* Envia a quantidade de linhas segundo o número de processos */
    if (J_ORDER % (nproc - 1) == 0) {
        row = J_ORDER / (nproc -1);
        last_row = row;
    }
    else {
        row = J_ORDER / (nproc -1);
        last_row = row + J_ORDER - (row*(nproc - 1));
    }

    iterations = 0;
    loop_control = FALSE;
    
    /* Preparando as condições do Método */
    #pragma omp parallel default(shared)
    {
        float diagonal;
        #pragma omp for
        for (i = 0; i < J_ORDER; i++){
                diagonal = MA[i][i];
                #pragma omp parallel shared(i, J_ORDER)
                {
                    #pragma omp for
                    for (j = 0; j < J_ORDER; j++){
                        MA[i][j] = MA[i][j]/diagonal;
                    }
                    MB[i] = MB[i]/diagonal;
//                    MX[i] = MB[i];
//                    MA[i][i] = 0;
                }
        }
    } 
    #pragma omp parallel
    {
        #pragma omp for nowait
        for (i = 0; i < J_ORDER; i++)
            MA[i][i] = 0;
    
        #pragma omp for nowait
        for (i = 0; i < J_ORDER; i++)
            MX[i] = MB[i];
    }
        
    /* Nó Mestre - Controla a parada, ou seja, controla o número de iterações e o erro */
    if (myrank == 0) {
        while(!loop_control) {
            index = 0;
            /* Recebe os novos valores de MX */
            for (i = 1; i < nproc; i++) {
                /* Todos os escravos menos o último */
                if (i < nproc -1) {
                    MPI_Recv(&MX_Master[index], row, MPI_FLOAT, i, SLAVEX, MPI_COMM_WORLD, &status);
                    index = index + row;
                }
                /* Último escravo */
                else {
                    MPI_Recv(&MX_Master[J_ORDER - last_row], last_row, MPI_FLOAT, i, SLAVEX, MPI_COMM_WORLD, &status);
                }
            }
            iterations++;
            
            /* Calcula o erro máximo */
            max_diff = 0;
            max_x = 0;
            #pragma omp parallel shared(i, J_ORDER)
            {
                #pragma omp parallel for
                for (i = 0; i < J_ORDER; i++)
                {
                    if(fabs(MX_Master[i] - MX[i]) > max_diff)
                        max_diff = fabs(MX_Master[i] - MX[i]);
                    if(fabs(MX_Master[i]) > max_x)
                        max_x = fabs(MX_Master[i]);
                }
            }
            
            /* Verifica condições de parada e envia o sinal de parada*/
            if(iterations == J_ITE_MAX || (max_diff / max_x) <= J_ERROR) {
                loop_control = TRUE;
                for (i = 1; i < nproc; i++) {
                    MPI_Isend(&loop_control, 1, MPI_INT, i, STOP, MPI_COMM_WORLD, &request);
                }
            }    
            else {
                for (i = 1; i < nproc; i++) {
                    MPI_Isend(&loop_control, 1, MPI_INT, i, STOP, MPI_COMM_WORLD, &request);
                }
                /* Atualiza o valor de MX */  
                for (i = 1; i < nproc; i++) {
                    MPI_Send(&MX_Master[0], J_ORDER, MPI_FLOAT, i, NEWX, MPI_COMM_WORLD);
                }
                for (i = 0; i < J_ORDER; i++) {
                    MX[i] = MX_Master[i];
                }
            }
        }
    }
    /* Nós Escravos - Efetua os cálculos segundo JacobiRichardson */
    else {
        while (!loop_control) {
            /* Todos os escravos menos o último */
            if (myrank < nproc - 1){
                for (i = row*(myrank-1); i < (row*(myrank-1) + row); i++) {
                    MX_Slave[i] = 0;
                    for (j = 0; j < J_ORDER; j++) {
                        MX_Slave[i] -= MA[i][j]*MX[j];
                    }
                    MX_Slave[i] += MB[i];
                }
            }
            /* Último escravo */
            else {
                for (i = row*(myrank-1); i < (row*(myrank-1) + last_row); i++) {
                    MX_Slave[i] = 0;
                    for (j = 0; j < J_ORDER; j++) {
                        MX_Slave[i] -= MA[i][j]*MX[j];
                    }
                    MX_Slave[i] += MB[i];
                } 
            }
            /* Manda o novos valores de X para o nó Mestre */
            /* Todos os escravos menos o ultimo */
            if (myrank < nproc -1) {
                MPI_Send(&MX_Slave[row*(myrank-1)], row, MPI_FLOAT, 0, SLAVEX, MPI_COMM_WORLD);
            }
            /* Último escravo */
            else {
                MPI_Send(&MX_Slave[J_ORDER - last_row], last_row, MPI_FLOAT, 0, SLAVEX, MPI_COMM_WORLD);
            }
            /* Recebe sinal de parada */
            MPI_Recv(&loop_control, 1, MPI_INT, 0, STOP, MPI_COMM_WORLD, &status);
            if (!loop_control) {
                MPI_Recv(&MX_Slave[0], J_ORDER, MPI_FLOAT, 0, NEWX, MPI_COMM_WORLD, &status);  
                for (i = 0; i < J_ORDER; i++) {
                    MX[i] = MX_Slave[i];
                }              
            }
        }
    }
    
    /* Teste de linha */
    if (myrank == 0) {
        J_AUX = 0.0f;
        #pragma omp parallel shared(i, J_ORDER)
            {
                #pragma omp parallel for
                for (i = 0; i < J_ORDER; i++) {
                        J_AUX += MRT[i] * MX_Master[i];
                }
             }
            

        /* Saída formatada */
        printf ("\n---------------------------------------------------------\n");
        printf ("Iterations: %d\n", iterations);
        printf ("Row test: %d => [%f] =? %f", J_ROW_TEST, J_AUX, MBR);
        printf ("\n---------------------------------------------------------\n");
    
        /* Desalocando memória */
        fclose(f);
        for (i = 0; i < J_ORDER; i++) free(MA[i]);
        free(MA);
        free(MX);
        free(MB);
        free(MRT);
    }
    
    MPI_Finalize();    
    return 0;
}
