#include <pthread.h>
#include <omp.h>
#include <iostream>
#include <cmath>
#include <arm_neon.h>
#include <semaphore.h>
#include <stdio.h>
#include <sys/time.h>
#include <algorithm>
#include <mpi.h>
#define ROW 1024
#define TASK 8
#define INTERVAL 10000
using namespace std;
float matrix[ROW][ROW];
float revmat[ROW][ROW];
typedef long long ll;
typedef struct {
	int k;
	int t_id;
}threadParam_t;

sem_t sem_leader;
sem_t sem_Divsion[32];
sem_t sem_Elimination[32];
pthread_barrier_t division;
pthread_barrier_t elemation;
int NUM_THREADS = 8;
int remain = ROW;
pthread_mutex_t remainLock;

void init()
{
	for(int i = 0;i < ROW;i++)
	{
		for(int j = 0;j < i;j++)
        {
            matrix[i][j] = 0;
        }
		for(int j = i;j<ROW;j++)
        {
            matrix[i][j] = rand() % 10000 + 1;
        }
	}
	for(int i = 0;i < 2000;i++)
	{
		int row1 = rand() % ROW;
		int row2 = rand() % ROW;
		int judge = rand() % 2;
		if(judge == 1)
        {
            for(int j = 0;j < ROW;j++)
            {
                matrix[row1][j] += matrix[row2][j] * (rand() % 100);
            }
        }
		else
        {
            for(int j = 0;j < ROW;j++)
            {
                matrix[row1][j] -= matrix[row2][j] * (rand() % 100);
            }
        }
	}
}

void plain()
{
	for(int i = 0; i < ROW - 1; i++)
    {
		for(int j = i + 1; j < ROW; j++)
        {
			matrix[i][j] = matrix[i][j] / matrix[i][i];
		}
		matrix[i][i] = 1;
		for(int j = i + 1; j < ROW; j++)
		{
			for(int k = i + 1; k < ROW; k++)
			{
				matrix[j][k] = matrix[j][k] - matrix[i][k] * matrix[j][i];
			}
			matrix[j][i] = 0;
		}
	}
}

void mpi()
{
    double start_time = 0;
    double end_time;
    int m_size = 0;
    int m_rank = 0;
    int i = 0;
    int j = 0;
    int k = 0;
    MPI_Status status;
    MPI_Init(NULL,NULL);
    MPI_Comm_size(MPI_COMM_WORLD, &m_size);
	MPI_Comm_rank(MPI_COMM_WORLD, &m_rank);
	int r1 = (ROW - ROW % m_size) / m_size * m_rank;
    int r2 = (ROW - ROW % m_size) / m_size * (m_rank + 1);
    if (ROW - r2 < (ROW - ROW % m_size) / m_size)
    {
        r2 = ROW;
    }
    if(m_rank == 0)
    {
        init();
    }
    start_time = MPI_Wtime();
    for(k = 0; k < ROW; ++k)
	{
	    if(k >= r1 && k <= r2)
        {
            for (j = k + 1;j < ROW;++j)
            {
                matrix[k][j] = matrix[k][j] / matrix[k][k];
            }
            matrix[k][k] = 1.0;
            for(j = 0;j < m_size;++j)
            {
                MPI_Send(&matrix[k][0],ROW,MPI_FLOAT,j,1,MPI_COMM_WORLD);
            }
        }
        else
        {
            MPI_Recv(&matrix[k][0],ROW,MPI_FLOAT,j,1,MPI_COMM_WORLD,&status);
        }
		for (i = max(r1,k + 1); i < r2; ++i)
		{
			for (j = k + 1;j < ROW;++j)
            {
                matrix[i][j] = matrix[i][j] - matrix[i][k] * matrix[k][j];
            }
			matrix[i][k] = 0.0;
		}
	}
	MPI_Barrier(MPI_COMM_WORLD);
	if(m_rank == 0)
    {
        end_time = MPI_Wtime();
        cout <<"mpi:"<<(end_time - start_time) * 1000<<"ms"<<endl;
    }
    MPI_Finalize();
}

void mpi_omp()
{
    double start_time = 0;
    double end_time;
    int m_size = 0;
    int m_rank = 0;
    int i = 0;
    int j = 0;
    int k = 0;
    float tmp1,tmp2;
    MPI_Status status;
    MPI_Init(NULL,NULL);
    MPI_Comm_size(MPI_COMM_WORLD, &m_size);
	MPI_Comm_rank(MPI_COMM_WORLD, &m_rank);
	int r1 = (ROW - ROW % m_size) / m_size * m_rank;
    int r2 = (ROW - ROW % m_size) / m_size * (m_rank + 1);
    if (ROW - r2 < (ROW - ROW % m_size) / m_size)
    {
        r2 = ROW;
    }
    if(m_rank == 0)
    {
        init();
    }
    #pragma omp parallel num_threads(NUM_THREADS), shared(matrix), private(i,j,k,tmp1,tmp2,m_size,m_rank)
    start_time = MPI_Wtime();
    for(k = 0; k < ROW; ++k)
	{
	    if(k >= r1 && k <= r2)
        {
            tmp1 = matrix[k][k];
            #pragma omp single
            for (j = k + 1;j < ROW;++j)
            {
                matrix[k][j] = matrix[k][j] / tmp1;
            }
            matrix[k][k] = 1.0;
            for(j = 0;j < m_size;++j)
            {
                MPI_Send(&matrix[k][0],ROW,MPI_FLOAT,j,1,MPI_COMM_WORLD);
            }
        }
        else
        {
            MPI_Recv(&matrix[k][0],ROW,MPI_FLOAT,j,1,MPI_COMM_WORLD,&status);
        }
        #pragma omp for schedule(dynamic)
		for (i = max(r1,k + 1); i < r2; ++i)
		{
		    tmp2 = matrix[i][k];
			for (j = k + 1;j < ROW;++j)
            {
                matrix[i][j] = matrix[i][j] - tmp2 * matrix[k][j];
            }
			matrix[i][k] = 0.0;
		}
	}
	MPI_Barrier(MPI_COMM_WORLD);
	if(m_rank == 0)
    {
        end_time = MPI_Wtime();
        cout <<"mpi_omp:"<<(end_time - start_time) * 1000<<"ms"<<endl;
    }
    MPI_Finalize();
}

void mpi_SIMD()
{
    double start_time = 0;
    double end_time;
    int m_size = 0;
    int m_rank = 0;
    int i = 0;
    int j = 0;
    int k = 0;
    float32x4_t diver,divee,mult1,mult2,sub1;
    MPI_Status status;
    MPI_Init(NULL,NULL);
    MPI_Comm_size(MPI_COMM_WORLD, &m_size);
	MPI_Comm_rank(MPI_COMM_WORLD, &m_rank);
	int r1 = (ROW - ROW % m_size) / m_size * m_rank;
    int r2 = (ROW - ROW % m_size) / m_size * (m_rank + 1);
    if (ROW - r2 < (ROW - ROW % m_size) / m_size)
    {
        r2 = ROW;
    }
    if(m_rank == 0)
    {
        init();
    }
    start_time = MPI_Wtime();
    for(k = 0; k < ROW; ++k)
	{
	    if(k >= r1 && k <= r2)
        {
            diver = vld1q_dup_f32(&matrix[k][k]);
            for (j = k + 1;j < ROW && ((ROW - j) & 3);++j)
            {
                matrix[k][j] = matrix[k][j] / matrix[k][k];
            }
            for (;j < ROW;j += 4)
            {
                divee = vld1q_f32(&matrix[k][j]);
                divee = vdivq_f32(divee, diver);
                vst1q_f32(&matrix[k][j], divee);
            }
            matrix[k][k] = 1.0;
            for(j = 0;j < m_size;++j)
            {
                MPI_Send(&matrix[k][0],ROW,MPI_FLOAT,j,1,MPI_COMM_WORLD);
            }
        }
        else
        {
            MPI_Recv(&matrix[k][0],ROW,MPI_FLOAT,j,1,MPI_COMM_WORLD,&status);
        }
		for (i = max(r1,k + 1); i < r2; ++i)
		{
			mult1 = vld1q_dup_f32(&matrix[i][k]);
			for (j = k + 1;j < ROW && ((ROW - j) & 3);++j)
            {
                matrix[i][j] = matrix[i][j] - matrix[i][k] * matrix[k][j];
            }
			for (;j < ROW;j += 4)
			{
				sub1 = vld1q_f32(&matrix[i][j]);
				mult2 = vld1q_f32(&matrix[k][j]);
				mult2 = vmulq_f32(mult1, mult2);
				sub1 = vsubq_f32(sub1, mult2);
				vst1q_f32(&matrix[i][j], sub1);
			}
			matrix[i][k] = 0.0;
		}
	}
	MPI_Barrier(MPI_COMM_WORLD);
	if(m_rank == 0)
    {
        end_time = MPI_Wtime();
        cout <<"mpi_SIMD:"<<(end_time - start_time) * 1000<<"ms"<<endl;
    }
    MPI_Finalize();
}

void mpi_omp_SIMD()
{
    double start_time = 0;
    double end_time;
    int m_size = 0;
    int m_rank = 0;
    int i = 0;
    int j = 0;
    int k = 0;
    float32x4_t diver,divee,mult1,mult2,sub1;
    MPI_Status status;
    MPI_Init(NULL,NULL);
    MPI_Comm_size(MPI_COMM_WORLD, &m_size);
	MPI_Comm_rank(MPI_COMM_WORLD, &m_rank);
	int r1 = (ROW - ROW % m_size) / m_size * m_rank;
    int r2 = (ROW - ROW % m_size) / m_size * (m_rank + 1);
    if (ROW - r2 < (ROW - ROW % m_size) / m_size)
    {
        r2 = ROW;
    }
    if(m_rank == 0)
    {
        init();
    }
    start_time = MPI_Wtime();
    #pragma omp parallel num_threads(NUM_THREADS), shared(matrix), private(i,j,k,diver,divee,mult1,mult2,sub1,m_size,m_rank)
    for(k = 0; k < ROW; ++k)
	{
	    if(k >= r1 && k <= r2)
        {
            diver = vld1q_dup_f32(&matrix[k][k]);
            #pragma omp single
            for (j = k + 1;j < ROW && ((ROW - j) & 3);++j)
            {
                matrix[k][j] = matrix[k][j] / matrix[k][k];
            }
            for (;j < ROW;j += 4)
            {
                divee = vld1q_f32(&matrix[k][j]);
                divee = vdivq_f32(divee, diver);
                vst1q_f32(&matrix[k][j], divee);
            }
            #pragma omp barrier
            matrix[k][k] = 1.0;
            for(j = 0;j < m_size;++j)
            {
                MPI_Send(&matrix[k][0],ROW,MPI_FLOAT,j,1,MPI_COMM_WORLD);
            }
        }
        else
        {
            MPI_Recv(&matrix[k][0],ROW,MPI_FLOAT,j,1,MPI_COMM_WORLD,&status);
        }
		#pragma omp for schedule(dynamic)
		for (i = max(r1,k + 1); i < r2; ++i)
		{
			mult1 = vld1q_dup_f32(&matrix[i][k]);
			for (j = k + 1;j < ROW && ((ROW - j) & 3);++j)
            {
                matrix[i][j] = matrix[i][j] - matrix[i][k] * matrix[k][j];
            }
			for (;j < ROW;j += 4)
			{
				sub1 = vld1q_f32(&matrix[i][j]);
				mult2 = vld1q_f32(&matrix[k][j]);
				mult2 = vmulq_f32(mult1, mult2);
				sub1 = vsubq_f32(sub1, mult2);
				vst1q_f32(&matrix[i][j], sub1);
			}
			matrix[i][k] = 0.0;
		}
		#pragma omp barrier
	}
	MPI_Barrier(MPI_COMM_WORLD);
	if(m_rank == 0)
    {
        end_time = MPI_Wtime();
        cout <<"mpi_omp_SIMD:"<<(end_time - start_time) * 1000<<"ms"<<endl;
    }
    MPI_Finalize();
}

void mpi_block()
{
    double start_time = 0;
    double end_time;
    int m_size = 0;
    int m_rank = 0;
    int i = 0;
    int j = 0;
    int k = 0;
    int addr = 0;
    float(*space)[ROW] = NULL;
    float local[ROW];
    MPI_Init(NULL,NULL);
    MPI_Comm_size(MPI_COMM_WORLD, &m_size);
	MPI_Comm_rank(MPI_COMM_WORLD, &m_rank);
	int* thread_count = new int[m_size];
	int* s_count = new int[m_size + 1];
	fill(thread_count,thread_count + ROW % m_size,(int)ceil((float)ROW / m_size) * ROW);
	fill(thread_count + ROW % m_size,thread_count + m_size,ROW / m_size * ROW);
	for (i = 0;i < m_size;i++)
	{
		s_count[i] = addr;
		addr += thread_count[i];
	}
	s_count[m_size] = addr;
	space = new float[thread_count[m_rank]/ROW][ROW];
	if (m_rank == 0)
	{
		init();
		start_time = MPI_Wtime();
	}
	MPI_Scatterv(matrix, thread_count, s_count, MPI_FLOAT, space, thread_count[m_rank], MPI_FLOAT, 0, MPI_COMM_WORLD);
	for (k = 0;k < ROW;k++)
	{
		float* t_Div;
		int tmp = upper_bound(s_count, s_count + m_size + 1, k * ROW) - s_count - 1;
		if (tmp == m_rank)
		{
			t_Div = space[k - s_count[m_rank] / ROW];
			for (int j = k + 1;j < ROW;j++)
            {
                t_Div[j] /= t_Div[k];
            }
			t_Div[k] = 1.0;
		}
		else
		{
		    t_Div = local;
		}
		MPI_Bcast(t_Div, ROW, MPI_FLOAT, tmp, MPI_COMM_WORLD);
		for (i = max(s_count[m_rank] / ROW, k + 1) - s_count[m_rank] / ROW;i < s_count[m_rank + 1] / ROW - s_count[m_rank] / ROW;i++)
		{
			for (j = k + 1;j < ROW;j++)
            {
                space[i][j] -= space[i][k] * t_Div[j];
            }
			space[i][k] = 0;
		}
	}
	MPI_Gatherv(space, thread_count[m_rank], MPI_FLOAT, matrix, thread_count, s_count, MPI_FLOAT, 0, MPI_COMM_WORLD);
	if (m_rank == 0)
	{
		end_time = MPI_Wtime();
        cout <<"mpi_block:"<<(end_time - start_time) * 1000<<"ms"<<endl;
	}
	MPI_Finalize();
}

void mpi_pipeline()
{
    double start_time = 0;
    double end_time;
    int m_size = 0;
    int m_rank = 0;
    int i = 0;
    int j = 0;
    int k = 0;
    int r = 0;
    int addr = 0;
    float(*space)[ROW] = NULL;
    float local[ROW];
    MPI_Init(NULL,NULL);
    MPI_Comm_size(MPI_COMM_WORLD, &m_size);
	MPI_Comm_rank(MPI_COMM_WORLD, &m_rank);
	int* thread_count = new int[m_size];
	int* s_count = new int[m_size + 1];
	fill(thread_count,thread_count + ROW % m_size,(int)ceil((float)ROW / m_size) * ROW);
	fill(thread_count + ROW % m_size,thread_count + m_size,ROW / m_size * ROW);
	for (i = 0;i < m_size;i++)
	{
		s_count[i] = addr;
		addr += thread_count[i];
	}
	s_count[m_size] = addr;
	space = new float[thread_count[m_rank]/ROW][ROW];
	if (m_rank == 0)
	{
		init();
		start_time = MPI_Wtime();
	}
	MPI_Scatterv(matrix, thread_count, s_count, MPI_FLOAT, space, thread_count[m_rank], MPI_FLOAT, 0, MPI_COMM_WORLD);
	for (k = 0;k < s_count[m_rank] / ROW;k++)
	{
		MPI_Recv(local, ROW, MPI_FLOAT, m_rank - 1, k, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
		if(m_rank != m_size-1)
        {
            MPI_Send(local, ROW, MPI_FLOAT, m_rank + 1, k, MPI_COMM_WORLD);
        }
		for (i = 0; i < thread_count[m_rank] / ROW;i++)
		{
			for (j = k + 1;j < ROW;j++)
            {
                space[i][j] -= space[i][k] * local[j];
            }
			space[i][k] = 0;
		}
	}
	for (k = s_count[m_rank] / ROW; k < s_count[m_rank + 1] / ROW;k++)
	{
		int myRow = k - s_count[m_rank] / ROW;
		for (j = k + 1;j < ROW;j++)
        {
            space[myRow][j] /= space[myRow][k];
        }
		space[myRow][k] = 1.0;
		if (m_rank != m_size - 1)
        {
            MPI_Send(space[myRow], ROW, MPI_FLOAT, m_rank + 1, k, MPI_COMM_WORLD);
        }
		for (r = myRow + 1;r < thread_count[m_rank] / ROW;r++)
		{
			for (j = k + 1;j < ROW;j++)
            {
                space[r][j] -= space[r][k] * space[myRow][j];
            }
			space[r][k] = 0;
		}
	}
	MPI_Gatherv(space, thread_count[m_rank], MPI_FLOAT, matrix, thread_count, s_count, MPI_FLOAT, 0, MPI_COMM_WORLD);
	if (m_rank == 0)
	{
		end_time = MPI_Wtime();
        cout <<"mpi_pipeline:"<<(end_time - start_time) * 1000<<"ms"<<endl;
	}
	MPI_Finalize();
}

void gettime(void (*func)())
{
	timeval tv_begin, tv_end;
	int counter(0);
	double time = 0;
	while (INTERVAL > time)
	{
		init();
		gettimeofday(&tv_begin, 0);
		func();
		gettimeofday(&tv_end, 0);
		counter++;
		time += ((ll)tv_end.tv_sec - (ll)tv_begin.tv_sec) * 1000.0 + ((ll)tv_end.tv_usec - (ll)tv_begin.tv_usec) / 1000.0;
	}
	cout<<time / counter<<"ms"<<'\n';
}

void gettime(void (*func)(void* (*threadFunc)(void*)), void* (*threadFunc)(void*))
{
	timeval tv_begin, tv_end;
	int counter(0);
	double time = 0;
	while (INTERVAL > time)
	{
		init();
		gettimeofday(&tv_begin, 0);
		func(threadFunc);
		gettimeofday(&tv_end, 0);
		counter++;
		time += ((ll)tv_end.tv_sec - (ll)tv_begin.tv_sec) * 1000.0 + ((ll)tv_end.tv_usec - (ll)tv_begin.tv_usec) / 1000.0;
	}
	cout<<time / counter<<"ms"<<'\n';
}

int main()
{
	cout<<"plain: ";
	gettime(plain);
    mpi();
	mpi_SIMD();
	mpi_pipeline();
	mpi_block();
	cout<<endl;
	for (NUM_THREADS = 2;NUM_THREADS <= 8;NUM_THREADS++)
	{
        cout<<"threads_num: "<<NUM_THREADS<<endl;
		mpi_omp();
		mpi_omp_SIMD();
		cout<<endl;
	}
	return 0;
}
