#include <pthread.h>
#include <omp.h>
#include <iostream>
#include <cmath>
#include <arm_neon.h>
#include <semaphore.h>
#include <stdio.h>
#include <sys/time.h>
#include <algorithm>
#include <mpi.h>
#define N 1024
#define TASK 8
#define INTERVAL 10000
using namespace std;
float m[N][N];
typedef long long ll;
typedef struct {
	int k;
	int t_id;
}threadParam_t;

sem_t sem_leader;
sem_t sem_Divsion[32];
sem_t sem_Elimination[32];
pthread_barrier_t division;
pthread_barrier_t elemation;
int NUM_THREADS = 8;
int remain = N;
pthread_mutex_t remainLock;

void init()
{
	for(int i = 0;i < N;i++)
	{
		for(int j = 0;j < i;j++)
        {
            m[i][j] = 0;
        }
		for(int j = i;j<N;j++)
        {
            m[i][j] = rand() % 10000 + 1;
        }
	}
	for(int i = 0;i < 2000;i++)
	{
		int row1 = rand() % N;
		int row2 = rand() % N;
		int judge = rand() % 2;
		if(judge == 1)
        {
            for(int j = 0;j < N;j++)
            {
                m[row1][j] += m[row2][j] * (rand() % 100);
            }
        }
		else
        {
            for(int j = 0;j < N;j++)
            {
                m[row1][j] -= m[row2][j] * (rand() % 100);
            }
        }
	}
}

void serial(int n){ //串行
    struct timeval h, t;
    float time = 0.0;
    gettimeofday(&h, NULL);
      for(int k=0;k<n;k++){
          for(int j=k+1;j<n;j++)
              m[k][j]/=m[k][k];
          m[k][k]=1.0;
          for(int i=k+1;i<n;i++){
              for(int j=k+1;j<n;j++)
                  m[i][j]-=m[i][k]*m[k][j];
              m[i][k]=0;
          }
      }
    gettimeofday(&t, NULL);
    time += 1000*(t.tv_sec - h.tv_sec) + 0.001*(t.tv_usec - h.tv_usec);
    cout<<"serial: "<<time<<" ms"<<endl;
    return;
}

void mpi()
{
    double start_time = 0;
    double end_time;
    int thread_num = 0;
    int rank = 0;
    int i = 0;
    int j = 0;
    int k = 0;
    MPI_Status status;
    MPI_Init(NULL,NULL);
    MPI_Comthread_num(MPI_COMM_WORLD, &thread_num);
	MPI_Comrank(MPI_COMM_WORLD, &rank);
	int r1 = (N - N % thread_num) / thread_num * rank;
    int r2 = (N - N % thread_num) / thread_num * (rank + 1);
    if (N - r2 < (N - N % thread_num) / thread_num)
    {
        r2 = N;
    }
    if(rank == 0)
    {
        init();
    }
    start_time = MPI_Wtime();
    for(k = 0; k < N; ++k)
	{
	    if(k >= r1 && k <= r2)
        {
            for (j = k + 1;j < N;++j)
            {
                m[k][j] = m[k][j] / m[k][k];
            }
            m[k][k] = 1.0;
            for(j = 0;j < thread_num;++j)
            {
                MPI_Send(&m[k][0],N,MPI_FLOAT,j,1,MPI_COMM_WORLD);
            }
        }
        else
        {
            MPI_Recv(&m[k][0],N,MPI_FLOAT,j,1,MPI_COMM_WORLD,&status);
        }
		for (i = max(r1,k + 1); i < r2; ++i)
		{
			for (j = k + 1;j < N;++j)
            {
                m[i][j] = m[i][j] - m[i][k] * m[k][j];
            }
			m[i][k] = 0.0;
		}
	}
	MPI_Barrier(MPI_COMM_WORLD);
	if(rank == 0)
    {
        end_time = MPI_Wtime();
        cout <<"mpi:"<<(end_time - start_time) * 1000<<"ms"<<endl;
    }
    MPI_Finalize();
}

void mpi_omp()
{
    int thread_num;
    int rank;
    MPI_Comthread_num(MPI_COMM_WORLD,&thread_num);
    MPI_Comrank(MPI_COMM_WORLD,&rank);
    struct timeval h, t;
    float time = 0.0;
    MPI_Status status1;
    gettimeofday(&h, NULL);
    for (int k = 0; k < n; k++) {
        vector<int> b;
        vector<int> e;
        //初始化b和e
        for(int i=0;i<thread_num;i++) {
            b.push_back(0);
            e.push_back(0);
            int width = (n - k - 1) / thread_num;
            b[i] = k + width * i + 1;
            e[i] = k + (i + 1) * width + 1;
        }
        e[thread_num-1]+=(n - k - 1) % thread_num;
        for(int i=0;i<thread_num;i++){
            if ((n - k - 1) / thread_num == 0) {
                b[i] = (i == 0) ? k + 1 : 0;
                e[i] = (i == 0) ? n : 0;
            }
        }
        //除法并将结果发送给各个线程
        if(rank==0) {
            for (int j = k + 1; j < n; j++)
                m[k][j] /= m[k][k];
            m[k][k] = 1.0;

            for(int i=1;i<thread_num;i++){
                MPI_Send(&m[k][0],n,MPI_FLOAT,i,77,MPI_COMM_WORLD);
            }
        } else{
            MPI_Recv(&m[k][0],n,MPI_FLOAT,0,77,MPI_COMM_WORLD,&status1);
        }

        for (int i = b[rank]; i < e[rank]; i++) {
            for (int j = k + 1; j < n; j++)
                m[i][j] = m[i][j] - m[i][k] * m[k][j];
            m[i][k] = 0;
        }


        MPI_Status status2;
        int size=(e[rank]-b[rank])*n;
        if(rank!=0){
            MPI_Send(&m[b[rank]][0],size,MPI_FLOAT,0,99,MPI_COMM_WORLD);
        }else{
            for(int i=1;i<thread_num;i++){
                int i_size=(e[i]-b[i])*n;
                MPI_Recv(&m[b[i]][0],i_size,MPI_FLOAT,i,99,MPI_COMM_WORLD,&status2);
            }
        }
        MPI_Barrier(MPI_COMM_WORLD);

        MPI_Bcast(&m[0][0],n*n,MPI_FLOAT,0,MPI_COMM_WORLD);
    }
    gettimeofday(&t, NULL);
    time += 1000*(t.tv_sec - h.tv_sec) + 0.001*(t.tv_usec - h.tv_usec);
    cout<<"mpi: "<<time<<" ms"<<endl;
}


void mpi_omp_neon()
{
    double start_time = 0;
    double end_time;
    int thread_num = 0;
    int rank = 0;
    int i = 0;
    int j = 0;
    int k = 0;
    float32x4_t diver,divee,mult1,mult2,sub1;
    MPI_Status status;
    MPI_Init(NULL,NULL);
    MPI_Comthread_num(MPI_COMM_WORLD, &thread_num);
	MPI_Comrank(MPI_COMM_WORLD, &rank);
	int r1 = (N - N % thread_num) / thread_num * rank;
    int r2 = (N - N % thread_num) / thread_num * (rank + 1);
    if (N - r2 < (N - N % thread_num) / thread_num)
    {
        r2 = N;
    }
    if(rank == 0)
    {
        init();
    }
    start_time = MPI_Wtime();
    #pragma omp parallel num_threads(NUM_THREADS), shared(m), private(i,j,k,diver,divee,mult1,mult2,sub1,thread_num,rank)
    for(k = 0; k < N; ++k)
	{
	    if(k >= r1 && k <= r2)
        {
            diver = vld1q_dup_f32(&m[k][k]);
            #pragma omp single
            for (j = k + 1;j < N && ((N - j) & 3);++j)
            {
                m[k][j] = m[k][j] / m[k][k];
            }
            for (;j < N;j += 4)
            {
                divee = vld1q_f32(&m[k][j]);
                divee = vdivq_f32(divee, diver);
                vst1q_f32(&m[k][j], divee);
            }
            #pragma omp barrier
            m[k][k] = 1.0;
            for(j = 0;j < thread_num;++j)
            {
                MPI_Send(&m[k][0],N,MPI_FLOAT,j,1,MPI_COMM_WORLD);
            }
        }
        else
        {
            MPI_Recv(&m[k][0],N,MPI_FLOAT,j,1,MPI_COMM_WORLD,&status);
        }
		#pragma omp for schedule(dynamic)
		for (i = max(r1,k + 1); i < r2; ++i)
		{
			mult1 = vld1q_dup_f32(&m[i][k]);
			for (j = k + 1;j < N && ((N - j) & 3);++j)
            {
                m[i][j] = m[i][j] - m[i][k] * m[k][j];
            }
			for (;j < N;j += 4)
			{
				sub1 = vld1q_f32(&m[i][j]);
				mult2 = vld1q_f32(&m[k][j]);
				mult2 = vmulq_f32(mult1, mult2);
				sub1 = vsubq_f32(sub1, mult2);
				vst1q_f32(&m[i][j], sub1);
			}
			m[i][k] = 0.0;
		}
		#pragma omp barrier
	}
	MPI_Barrier(MPI_COMM_WORLD);
	if(rank == 0)
    {
        end_time = MPI_Wtime();
        cout <<"mpi_omp_neon:"<<(end_time - start_time) * 1000<<"ms"<<endl;
    }
    MPI_Finalize();
}


void mpi_pipeline()
{
    double start_time = 0;
    double end_time;
    int thread_num = 0;
    int rank = 0;
    int i = 0;
    int j = 0;
    int k = 0;
    int r = 0;
    int addr = 0;
    float(*space)[N] = NULL;
    float local[N];
    MPI_Init(NULL,NULL);
    MPI_Comthread_num(MPI_COMM_WORLD, &thread_num);
	MPI_Comrank(MPI_COMM_WORLD, &rank);
	int* thread_count = new int[thread_num];
	int* s_count = new int[thread_num + 1];
	fill(thread_count,thread_count + N % thread_num,(int)ceil((float)N / thread_num) * N);
	fill(thread_count + N % thread_num,thread_count + thread_num,N / thread_num * N);
	for (i = 0;i < thread_num;i++)
	{
		s_count[i] = addr;
		addr += thread_count[i];
	}
	s_count[thread_num] = addr;
	space = new float[thread_count[rank]/N][N];
	if (rank == 0)
	{
		init();
		start_time = MPI_Wtime();
	}
	MPI_Scatterv(m, thread_count, s_count, MPI_FLOAT, space, thread_count[rank], MPI_FLOAT, 0, MPI_COMM_WORLD);
	for (k = 0;k < s_count[rank] / N;k++)
	{
		MPI_Recv(local, N, MPI_FLOAT, rank - 1, k, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
		if(rank != thread_num-1)
        {
            MPI_Send(local, N, MPI_FLOAT, rank + 1, k, MPI_COMM_WORLD);
        }
		for (i = 0; i < thread_count[rank] / N;i++)
		{
			for (j = k + 1;j < N;j++)
            {
                space[i][j] -= space[i][k] * local[j];
            }
			space[i][k] = 0;
		}
	}
	for (k = s_count[rank] / N; k < s_count[rank + 1] / N;k++)
	{
		int myRow = k - s_count[rank] / N;
		for (j = k + 1;j < N;j++)
        {
            space[myRow][j] /= space[myRow][k];
        }
		space[myRow][k] = 1.0;
		if (rank != thread_num - 1)
        {
            MPI_Send(space[myRow], N, MPI_FLOAT, rank + 1, k, MPI_COMM_WORLD);
        }
		for (r = myRow + 1;r < thread_count[rank] / N;r++)
		{
			for (j = k + 1;j < N;j++)
            {
                space[r][j] -= space[r][k] * space[myRow][j];
            }
			space[r][k] = 0;
		}
	}
	MPI_Gatherv(space, thread_count[rank], MPI_FLOAT, m, thread_count, s_count, MPI_FLOAT, 0, MPI_COMM_WORLD);
	if (rank == 0)
	{
		end_time = MPI_Wtime();
        cout <<"mpi_pipeline:"<<(end_time - start_time) * 1000<<"ms"<<endl;
	}
	MPI_Finalize();
}

void mpi_block()
{
    double start_time = 0;
    double end_time;
    int thread_num = 0;
    int rank = 0;
    int i = 0;
    int j = 0;
    int k = 0;
    int addr = 0;
    float(*space)[N] = NULL;
    float local[N];
    MPI_Init(NULL,NULL);
    MPI_Comthread_num(MPI_COMM_WORLD, &thread_num);
	MPI_Comrank(MPI_COMM_WORLD, &rank);
	int* thread_count = new int[thread_num];
	int* s_count = new int[thread_num + 1];
	fill(thread_count,thread_count + N % thread_num,(int)ceil((float)N / thread_num) * N);
	fill(thread_count + N % thread_num,thread_count + thread_num,N / thread_num * N);
	for (i = 0;i < thread_num;i++)
	{
		s_count[i] = addr;
		addr += thread_count[i];
	}
	s_count[thread_num] = addr;
	space = new float[thread_count[rank]/N][N];
	if (rank == 0)
	{
		init();
		start_time = MPI_Wtime();
	}
	MPI_Scatterv(m, thread_count, s_count, MPI_FLOAT, space, thread_count[rank], MPI_FLOAT, 0, MPI_COMM_WORLD);
	for (k = 0;k < N;k++)
	{
		float* t_Div;
		int tmp = upper_bound(s_count, s_count + thread_num + 1, k * N) - s_count - 1;
		if (tmp == rank)
		{
			t_Div = space[k - s_count[rank] / N];
			for (int j = k + 1;j < N;j++)
            {
                t_Div[j] /= t_Div[k];
            }
			t_Div[k] = 1.0;
		}
		else
		{
		    t_Div = local;
		}
		MPI_Bcast(t_Div, N, MPI_FLOAT, tmp, MPI_COMM_WORLD);
		for (i = max(s_count[rank] / N, k + 1) - s_count[rank] / N;i < s_count[rank + 1] / N - s_count[rank] / N;i++)
		{
			for (j = k + 1;j < N;j++)
            {
                space[i][j] -= space[i][k] * t_Div[j];
            }
			space[i][k] = 0;
		}
	}
	MPI_Gatherv(space, thread_count[rank], MPI_FLOAT, m, thread_count, s_count, MPI_FLOAT, 0, MPI_COMM_WORLD);
	if (rank == 0)
	{
		end_time = MPI_Wtime();
        cout <<"mpi_block:"<<(end_time - start_time) * 1000<<"ms"<<endl;
	}
	MPI_Finalize();
}

int main()
{
	serial();
    mpi();
    mpi_omp();  
    mpi_omp_neon();
	mpi_pipeline();
	cout<<endl;
    cout<<"threads_num: "<<NUM_THREADS<<endl;
	for (NUM_THREADS = 2;NUM_THREADS <= 8;NUM_THREADS++)
	{
        cout<<"threads_num: "<<NUM_THREADS<<endl;
		mpi_omp();
		mpi_omp_neon();
		cout<<endl;
	}
	return 0;
}
