#include<iostream>
#include <pthread.h>
#include<iomanip>
#include <iostream>
#include<vector>
#include<sys/time.h>
#include<mpi.h>
#include<omp.h>
#include <arm_neon.h>

using namespace std;
int taskNum = 1;
int pThreadNum = 1;
int n;
float** m;
void generate()
{
    for (int i = 1; i <= n; i++)
    {
        m[i][i] = 1.0;
        for (int p = i + 1; p <= n; p++)
        {
            m[i][p] = rand() % 100 + 1;
        }
    }
    for (int k = 1; k <= n; k++)
    {
        for (int i = k + 1; i <= n; i++)
        {
            for (int p = 1; p <= n; p++)
            {
                m[i][p] += m[k][p];
            }
        }
    }
}
void show()
{
    for (int i = 1; i <= n; i++)
    {
        for (int p = 1; p <= n; p++)
        {
            cout << setw(5) << m[i][p] << " ";
        }
        cout << endl;
    }
}
void init()
{
    m = new float* [n + 1];
    for (int p = 0; p <= n; p++)
    {
        m[p] = new float[n + 1];
        for (int q = 0; q <= n; q++)
        {
            m[p][q] = 0.0;
        }
    }
    generate();
}


int preProcess(int rank, int numProcess)
{
    if (rank > 0)
    {
        return rank - 1;
    }
    else
    {
        return numProcess - 1;
    }
}
int nextProcess(int rank, int numProcesses)
{
    if (rank < numProcesses - 1)
    {
        return rank + 1;
    }
    else
    {
        return 0;
    }
}


void noOpenmp(int argc, char* argv[], int rank, int numProcesses)
{
    struct timeval t1, t2;
    double timeuse;
    double averageTime = 0.0;


    gettimeofday(&t1, NULL);
    for (int k = 1; k <= n; k++)
    {
        {
            if ((k - 1) % numProcesses == rank)
            {
                for (int j = k + 1; j <= n; j++)
                    m[k][j] = m[k][j] / m[k][k];
                m[k][k] = 1;
                m[k][0] = rank;

                MPI_Send(&m[k][0], n + 1, MPI_FLOAT, nextProcess(rank, numProcesses), 0, MPI_COMM_WORLD);

            }
            else
            {
                MPI_Recv(&m[k][0], n + 1, MPI_FLOAT, preProcess(rank, numProcesses), 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
                if (nextProcess(rank, numProcesses) != m[k][0])
                {
                    MPI_Send(&m[k][0], n + 1, MPI_FLOAT, nextProcess(rank, numProcesses), 0, MPI_COMM_WORLD);
                }
            }
        }
        for (int i = k + 1; i <= n; i++)
        {
            if ((i - 1) % numProcesses == rank)
            {
                for (int j = k + 1; j <= n; j++)
                {
                    m[i][j] = m[i][j] - m[i][k] * m[k][j];
                }
                m[i][k] = 0;
            }
        }
    }


    gettimeofday(&t2, NULL);

    timeuse = (t2.tv_sec - t1.tv_sec) + (double)(t2.tv_usec - t1.tv_usec) / 1000000.0;
    MPI_Reduce(&timeuse, &averageTime, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);

    if (rank == 0) {
        averageTime /= numProcesses;
        cout << averageTime << endl;
    }

}
void NEONtryPipline(int argc, char* argv[], int rank, int numProcesses)
{

    struct timeval t1, t2;
    double timeuse;
    double averageTime = 0.0;


    gettimeofday(&t1, NULL);


    for (int k = 0; k < n; k++)
    {

        if (k % numProcesses == rank)
        {

            for (int j = k + 1; j < n; j++)
                m[k][j] = m[k][j] / m[k][k];
            m[k][k] = 1;
            MPI_Send(&m[k][0], n, MPI_FLOAT, nextProcess(rank, numProcesses), 0, MPI_COMM_WORLD);



            int j = 0;
            float32x4_t t1, t2, vx, vaik;

            for (int i = numProcesses + k; i < n; i += numProcesses)
            {
                vaik = vdupq_n_f32(m[i][k]);
                for (j = k + 1; j + 4 < n; j += 4)
                {
                    t1 = vld1q_f32(&m[i][j]);
                    t2 = vld1q_f32(&m[k][j]);
                    vx = vmulq_f32(vaik, t2);
                    t1 = vsubq_f32(t1, vx);
                    vst1q_f32(&m[i][j], t1);
                }
                for (; j < n; j++)
                {
                    m[i][j] = m[i][j] - m[i][k] * m[k][j];
                }
                m[i][k] = 0;
            }
        }
        else
        {
            MPI_Recv(&m[k][0], n, MPI_FLOAT, preProcess(rank, numProcesses), 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
            if ((k + 1) % numProcesses != rank)
            {
                MPI_Send(&m[k][0], n, MPI_FLOAT, nextProcess(rank, numProcesses), 0, MPI_COMM_WORLD);
            }
        }
    }




    gettimeofday(&t2, NULL);

    timeuse = (t2.tv_sec - t1.tv_sec) + (double)(t2.tv_usec - t1.tv_usec) / 1000000.0;
    MPI_Reduce(&timeuse, &averageTime, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);

    if (rank == 0) {
        averageTime /= numProcesses;
        cout << averageTime << endl;
    }



}
void tryPiplineOpenmpNEON(int argc, char* argv[], int rank, int numProcesses)
{
    struct timeval t1, t2;
    double timeuse;
    double averageTime = 0.0;


    gettimeofday(&t1, NULL);
#pragma omp parallel num_threads(pThreadNum)
    for (int k = 1; k <= n; k++)
    {
#pragma omp single
        {
            if ((k - 1) % numProcesses == rank)
            {
                for (int j = k + 1; j <= n; j++)
                    m[k][j] = m[k][j] / m[k][k];
                m[k][k] = 1;
                m[k][0] = rank;

                MPI_Send(&m[k][0], n + 1, MPI_FLOAT, nextProcess(rank, numProcesses), 0, MPI_COMM_WORLD);

            }
            else
            {
                MPI_Recv(&m[k][0], n + 1, MPI_FLOAT, preProcess(rank, numProcesses), 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
                if (nextProcess(rank, numProcesses) != m[k][0])
                {
                    MPI_Send(&m[k][0], n + 1, MPI_FLOAT, nextProcess(rank, numProcesses), 0, MPI_COMM_WORLD);
                }
            }
        }
#pragma omp for
        for (int i = k + 1; i <= n; i++)
        {
            if ((i - 1) % numProcesses == rank)
            {
                for (int j = k + 1; j <= n; j++)
                {
                    m[i][j] = m[i][j] - m[i][k] * m[k][j];
                }
                m[i][k] = 0;
            }
        }

    }


    gettimeofday(&t2, NULL);

    timeuse = (t2.tv_sec - t1.tv_sec) + (double)(t2.tv_usec - t1.tv_usec) / 1000000.0;
    MPI_Reduce(&timeuse, &averageTime, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);

    if (rank == 0) {
        averageTime /= numProcesses;
        cout << averageTime << endl;
    }



}
void common(int argc, char* argv[], int rank, int numProcesses)
{
    struct timeval t1, t2;
    double timeuse;
    double averageTime = 0.0;


    gettimeofday(&t1, NULL);
#pragma omp parallel num_threads(pThreadNum)
    for (int k = 1; k <= n; k++)
    {
#pragma omp single
        {
            for (int j = k + 1; j <= n; j++)
                m[k][j] = m[k][j] / m[k][k];
            m[k][k] = 1;
        }
#pragma omp for
        for (int i = k + 1; i <= n; i++)
        {
            for (int j = k + 1; j <= n; j++)
            {
                m[i][j] = m[i][j] - m[i][k] * m[k][j];
            }
            m[i][k] = 0;
        }

    }


    gettimeofday(&t2, NULL);

    timeuse = (t2.tv_sec - t1.tv_sec) + (double)(t2.tv_usec - t1.tv_usec) / 1000000.0;
    MPI_Reduce(&timeuse, &averageTime, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);

    if (rank == 0) {
        averageTime /= numProcesses;
        cout << averageTime << endl;
    }
}
int main(int argc, char* argv[])
{
    MPI_Init(&argc, &argv);
    int numProcesses, rank;
    MPI_Comm_size(MPI_COMM_WORLD, &numProcesses);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);


    for (int i = 1; i <= 5; i++)
    {
        n = 2000;
        init();

        pThreadNum = i * 2;
        common(argc, argv, rank, numProcesses);

    }
    return 0;
}

