#include <xmmintrin.h>
#include <emmintrin.h>
#include <pmmintrin.h>
#include <tmmintrin.h>
#include <smmintrin.h>
#include <nmmintrin.h>
#include <immintrin.h>
#include <mpi.h>
#include <omp.h>
#include <iostream>
#include <cmath>
#include <stdio.h>
#include <windows.h>
#include <algorithm>
#define N 1024
#define TASK 10
#define INTERVAL 10
using namespace std;
float m[N][N];
typedef long long ll;
typedef struct {
    int k;
    int t_id;
}threadParam_t;


int NUM_THREADS = 8;
int remain = N;


void init()
{
    for (int i = 0;i < N;i++)
    {
        for (int j = 0;j < i;j++)
        {
            m[i][j] = 0;
        }
        for (int j = i;j < N;j++)
        {
            m[i][j] = rand() % 10000 + 1;
        }
    }
    for (int i = 0;i < 2000;i++)
    {
        int row1 = rand() % N;
        int row2 = rand() % N;
        int judge = rand() % 2;
        if (judge == 1)
        {
            for (int j = 0;j < N;j++)
            {
                m[row1][j] += m[row2][j] * (rand() % 100);
            }
        }
        else
        {
            for (int j = 0;j < N;j++)
            {
                m[row1][j] -= m[row2][j] * (rand() % 100);
            }
        }
    }
}

void serial()
{
    for (int i = 0; i < N - 1; i++)
    {
        for (int j = i + 1; j < N; j++)
        {
            m[i][j] = m[i][j] / m[i][i];
        }
        m[i][i] = 1;
        for (int k = i + 1; k < N; k++)
        {
            for (int j = i + 1; j < N; j++)
            {
                m[k][j] = m[k][j] - m[i][j] * m[k][i];
            }
            m[k][i] = 0;
        }
    }
}

void mpi()
{
    double start_time = 0;
    double end_time;
    int thread_num = 0;
    int rank = 0;
    int i = 0;
    int j = 0;
    int k = 0;
    MPI_Status status;
    MPI_Init(NULL, NULL);
    MPI_Comthread_num(MPI_COMM_WORLD, &thread_num);
    MPI_Comrank(MPI_COMM_WORLD, &rank);
    int r1 = (N - N % thread_num) / thread_num * rank;
    int r2 = (N - N % thread_num) / thread_num * (rank + 1);
    if (N - r2 < (N - N % thread_num) / thread_num)
    {
        r2 = N;
    }
    if (rank == 0)
    {
        init();
    }
    start_time = MPI_Wtime();
    for (k = 0; k < N; ++k)
    {
        if (k >= r1 && k <= r2)
        {
            for (j = k + 1;j < N;++j)
            {
                m[k][j] = m[k][j] / m[k][k];
            }
            m[k][k] = 1.0;
            for (j = 0;j < thread_num;++j)
            {
                MPI_Send(&m[k][0], N, MPI_FLOAT, j, 1, MPI_COMM_WORLD);
            }
        }
        else
        {
            MPI_Recv(&m[k][0], N, MPI_FLOAT, j, 1, MPI_COMM_WORLD, &status);
        }
        for (i = max(r1, k + 1); i < r2; ++i)
        {
            for (j = k + 1;j < N;++j)
            {
                m[i][j] = m[i][j] - m[i][k] * m[k][j];
            }
            m[i][k] = 0.0;
        }
    }
    MPI_Barrier(MPI_COMM_WORLD);
    if (rank == 0)
    {
        end_time = MPI_Wtime();
        cout << "mpi:" << (end_time - start_time) * 1000 << "ms" << endl;
    }
    MPI_Finalize();
}


void mpi_SIMD()
{
    double start_time = 0;
    double end_time;
    int thread_num = 0;
    int rank = 0;
    int i = 0;
    int j = 0;
    int k = 0;
    __m128 diver, divee, mult1, mult2, sub1;
    MPI_Status status;
    MPI_Init(NULL, NULL);
    MPI_Comthread_num(MPI_COMM_WORLD, &thread_num);
    MPI_Comrank(MPI_COMM_WORLD, &rank);
    int r1 = (N - N % thread_num) / thread_num * rank;
    int r2 = (N - N % thread_num) / thread_num * (rank + 1);
    if (N - r2 < (N - N % thread_num) / thread_num)
    {
        r2 = N;
    }
    if (rank == 0)
    {
        init();
    }
    start_time = MPI_Wtime();
    for (k = 0; k < N; ++k)
    {
        if (k >= r1 && k <= r2)
        {
            diver = _mm_load_ps1(&m[k][k]);
            for (j = k + 1;j < N && ((N - j) & 3);++j)
            {
                m[k][j] = m[k][j] / m[k][k];
            }
            for (;j < N;j += 4)
            {
                divee = _mm_loadu_ps(&m[k][j]);
                divee = _mm_div_ps(divee, diver);
                _mm_storeu_ps(&m[k][j], divee);
            }
            m[k][k] = 1.0;
            for (j = 0;j < thread_num;++j)
            {
                MPI_Send(&m[k][0], N, MPI_FLOAT, j, 1, MPI_COMM_WORLD);
            }
        }
        else
        {
            MPI_Recv(&m[k][0], N, MPI_FLOAT, j, 1, MPI_COMM_WORLD, &status);
        }
        for (i = max(r1, k + 1); i < r2; ++i)
        {
            mult1 = _mm_load_ps1(&m[i][k]);
            for (j = k + 1;j < N && ((N - j) & 3);++j)
            {
                m[i][j] = m[i][j] - m[i][k] * m[k][j];
            }
            for (;j < N;j += 4)
            {
                sub1 = _mm_loadu_ps(&m[i][j]);
                mult2 = _mm_loadu_ps(&m[k][j]);
                mult2 = _mm_mul_ps(mult1, mult2);
                sub1 = _mm_sub_ps(sub1, mult2);
                _mm_storeu_ps(&m[i][j], sub1);
            }
            m[i][k] = 0.0;
        }
    }
    MPI_Barrier(MPI_COMM_WORLD);
    if (rank == 0)
    {
        end_time = MPI_Wtime();
        cout << "mpi_SIMD:" << (end_time - start_time) * 1000 << "ms" << endl;
    }
    MPI_Finalize();
}

void mpi_omp()
{
    double start_time = 0;
    double end_time;
    int thread_num = 0;
    int rank = 0;
    int i = 0;
    int j = 0;
    int k = 0;
    float tmp1, tmp2;
    MPI_Status status;
    MPI_Init(NULL, NULL);
    MPI_Comthread_num(MPI_COMM_WORLD, &thread_num);
    MPI_Comrank(MPI_COMM_WORLD, &rank);
    int r1 = (N - N % thread_num) / thread_num * rank;
    int r2 = (N - N % thread_num) / thread_num * (rank + 1);
    if (N - r2 < (N - N % thread_num) / thread_num)
    {
        r2 = N;
    }
    if (rank == 0)
    {
        init();
    }
#pragma omp parallel num_threads(NUM_THREADS), shared(matrix), private(i,j,k,tmp1,tmp2,thread_num,rank)
    start_time = MPI_Wtime();
    for (k = 0; k < N; ++k)
    {
        if (k >= r1 && k <= r2)
        {
            tmp1 = m[k][k];
#pragma omp single
            for (j = k + 1;j < N;++j)
            {
                m[k][j] = m[k][j] / tmp1;
            }
            m[k][k] = 1.0;
            for (j = 0;j < thread_num;++j)
            {
                MPI_Send(&m[k][0], N, MPI_FLOAT, j, 1, MPI_COMM_WORLD);
            }
        }
        else
        {
            MPI_Recv(&m[k][0], N, MPI_FLOAT, j, 1, MPI_COMM_WORLD, &status);
        }
#pragma omp for schedule(dynamic)
        for (i = max(r1, k + 1); i < r2; ++i)
        {
            tmp2 = m[i][k];
            for (j = k + 1;j < N;++j)
            {
                m[i][j] = m[i][j] - tmp2 * m[k][j];
            }
            m[i][k] = 0.0;
        }
    }
    MPI_Barrier(MPI_COMM_WORLD);
    if (rank == 0)
    {
        end_time = MPI_Wtime();
        cout << "mpi_omp:" << (end_time - start_time) * 1000 << "ms" << endl;
    }
    MPI_Finalize();
}


void mpi_omp_simd()
{
    double start_time = 0;
    double end_time;
    int thread_num = 0;
    int rank = 0;
    int i = 0;
    int j = 0;
    int k = 0;
    __m128 diver, divee, mult1, mult2, sub1;
    MPI_Status status;
    MPI_Init(NULL, NULL);
    MPI_Comthread_num(MPI_COMM_WORLD, &thread_num);
    MPI_Comrank(MPI_COMM_WORLD, &rank);
    int r1 = (N - N % thread_num) / thread_num * rank;
    int r2 = (N - N % thread_num) / thread_num * (rank + 1);
    if (N - r2 < (N - N % thread_num) / thread_num)
    {
        r2 = N;
    }
    if (rank == 0)
    {
        init();
    }
    start_time = MPI_Wtime();
#pragma omp parallel num_threads(NUM_THREADS), shared(matrix), private(i,j,k,diver,divee,mult1,mult2,sub1,thread_num,rank)
    for (k = 0; k < N; ++k)
    {
        if (k >= r1 && k <= r2)
        {
            diver = _mm_load_ps1(&m[k][k]);
#pragma omp single
            for (j = k + 1;j < N && ((N - j) & 3);++j)
            {
                m[k][j] = m[k][j] / m[k][k];
            }
            for (;j < N;j += 4)
            {
                divee = _mm_loadu_ps(&m[k][j]);
                divee = _mm_div_ps(divee, diver);
                _mm_storeu_ps(&m[k][j], divee);
            }
#pragma omp barrier
            m[k][k] = 1.0;
            for (j = 0;j < thread_num;++j)
            {
                MPI_Send(&m[k][0], N, MPI_FLOAT, j, 1, MPI_COMM_WORLD);
            }
        }
        else
        {
            MPI_Recv(&m[k][0], N, MPI_FLOAT, j, 1, MPI_COMM_WORLD, &status);
        }
#pragma omp for schedule(dynamic)
        for (i = max(r1, k + 1); i < r2; ++i)
        {
            mult1 = _mm_load_ps1(&m[i][k]);
            for (j = k + 1;j < N && ((N - j) & 3);++j)
            {
                m[i][j] = m[i][j] - m[i][k] * m[k][j];
            }
            for (;j < N;j += 4)
            {
                sub1 = _mm_loadu_ps(&m[i][j]);
                mult2 = _mm_loadu_ps(&m[k][j]);
                mult2 = _mm_mul_ps(mult1, mult2);
                sub1 = _mm_sub_ps(sub1, mult2);
                _mm_storeu_ps(&m[i][j], sub1);
            }
            m[i][k] = 0.0;
        }
#pragma omp barrier
    }
    MPI_Barrier(MPI_COMM_WORLD);
    if (rank == 0)
    {
        end_time = MPI_Wtime();
        cout << "mpi_omp_simd:" << (end_time - start_time) * 1000 << "ms" << endl;
    }
    MPI_Finalize();
}

int main()
{
    serial();
    mpi();
    mpi_SIMD();
    cout << endl;
    for (NUM_THREADS = 2;NUM_THREADS <= 8;NUM_THREADS++)
    {
        cout << "threads_num: " << NUM_THREADS << endl;
        mpi_omp();
        mpi_omp_simd();
        cout << endl;
    }
    return 0;
}
