#include <mpi.h>
#include <iostream>
#include <cstdlib>
#include <sys/time.h>
#include <cmath>
#include <omp.h>
#include <pmmintrin.h>
#include <vector>
using namespace std;

extern int n, thread_count;
extern float **A;

void recycle_pipeline_gauss(int my_rank, int num_proc) {
    int pre_rank = (my_rank - 1 + num_proc) % num_proc;
    int nex_rank = (my_rank + 1) % num_proc;
    for (int k = 0; k < n; k++) {
        if (k % num_proc == my_rank) {
            float ele = A[k][k];
            for (int j = k + 1; j < n; j++)
                A[k][j] = A[k][j] / ele;
            A[k][k] = 1.0;
            if (nex_rank != my_rank)
                MPI_Send(A[k], n, MPI_FLOAT, nex_rank, 2, MPI_COMM_WORLD);
        }
        else {
            MPI_Recv(A[k], n, MPI_FLOAT, pre_rank, 2, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
            if (k % num_proc != nex_rank)
                MPI_Send(A[k], n, MPI_FLOAT, nex_rank, 2, MPI_COMM_WORLD);
        }
        for (int i = my_rank; i < n; i += num_proc) {
            if (i > k) {
                for (int j = k + 1; j < n; j++){
                    A[i][j] = A[i][j] - A[i][k] * A[k][j]; 
                }
                A[i][k] = 0.0;
            }
        }
    }
}

void recycle_pipeline_gauss_opt(int my_rank, int num_proc) {
    int pre_rank = (my_rank - 1 + num_proc) % num_proc;
    int nex_rank = (my_rank + 1) % num_proc;
    __m128 v0, v1, v2;
    int k, j, i;
    #pragma omp parallel num_threads(thread_count), private(k, j, i, v0, v1, v2)
    for (k = 0; k < n; k++) {
        #pragma omp single
        {
            if (k % num_proc == my_rank) {
                v1 = _mm_set_ps(A[k][k], A[k][k], A[k][k], A[k][k]);
                for (j = k + 1; j <= n - 4; j += 4) {
                    v0 = _mm_loadu_ps(A[k] + j);
                    v0 = _mm_div_ps(v0, v1);
                    _mm_storeu_ps(A[k] + j, v0);
                }
                float ele = A[k][k];
                for (j; j < n; j++)
                    A[k][j] = A[k][j] / ele;
                A[k][k] = 1.0;
                if (nex_rank != my_rank)
                    MPI_Send(A[k], n, MPI_FLOAT, nex_rank, 2, MPI_COMM_WORLD);
            }
            else {
                MPI_Recv(A[k], n, MPI_FLOAT, pre_rank, 2, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
                if (nex_rank != k % num_proc)
                    MPI_Send(A[k], n, MPI_FLOAT, nex_rank, 2, MPI_COMM_WORLD);
            }
        }
        #pragma omp for
        for (i = my_rank; i < n; i += num_proc) {
            if (i <= k) 
                continue;
            v1 = _mm_set_ps(A[i][k], A[i][k], A[i][k], A[i][k]);
            for (j = k + 1; j <= n - 4; j += 4){
                v2 = _mm_loadu_ps(A[k] + j);
                v0 = _mm_loadu_ps(A[i] + j);
                v2 = _mm_mul_ps(v1, v2);
                v0 = _mm_sub_ps(v0, v2);
                _mm_storeu_ps(A[i] + j, v0);
            }
            for (j; j < n; j++)
                A[i][j] = A[i][j] - A[i][k] * A[k][j]; 
            A[i][k] = 0.0;
        }
    }
}
