#include<iostream>
#include <pthread.h>
#include<iomanip>
#include <iostream>
#include <immintrin.h>
#include <xmmintrin.h> //SSE
#include <emmintrin.h> //SSE2
#include <pmmintrin.h> //SSE3
#include <tmmintrin.h> //SSSE3
#include <smmintrin.h> //SSE4.1
#include <nmmintrin.h> //SSSE4.2
#include <immintrin.h> //AVX、AVX
#include<vector>
#include<sys/time.h>
#include<mpi.h>
using namespace std;
int taskNum = 1;//由pThreadNum决定
int pThreadNum = 1;//这玩意最好不要超过350个
int n;//矩阵大小
float** m;
void generate()
{
    for (int i = 0; i < n; i++)
    {
        m[i][i] = 1.0;
        for (int p = i + 1; p < n; p++)
        {
            m[i][p] = rand() % 100 + 1;
        }
    }
    for (int k = 0; k < n; k++)
    {
        for (int i = k + 1; i < n; i++)
        {
            for (int p = 0; p < n; p++)
            {
                m[i][p] += m[k][p];
            }
        }
    }
}
void show()
{
    for (int i = 0; i < n; i++)
    {
        for (int p = 0; p < n; p++)
        {
            cout << setw(5) << m[i][p] << " ";
        }
        cout << endl;
    }
}
void commonGaosi()
{
    for (int i = 0; i < n; i++)
    {
        for (int j = i + 1; j < n; j++)
        {
            m[i][j] /= m[i][i];
        }
        for (int k = i + 1; k < n; k++)
        {
            for (int j = i + 1; j < n; j++)
            {
                m[k][j] -= m[i][j] * m[k][i];
            }
            m[k][i] = 0;
        }
    }
}
void init()
{
    m = new float* [n];
    for (int p = 0; p < n; p++)
    {
        m[p] = new float[n];
        for (int q = 0; q < n; q++)
        {
            m[p][q] = 0.0;
        }
    }
    generate();
}
void mpiBarrier(int argc, char* argv[])
{
    MPI_Init(&argc, &argv);
    int numProcesses, rank;
    MPI_Comm_size(MPI_COMM_WORLD, &numProcesses);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);


    //先捋清楚这个rank是什么几把
    //这个rank是这个够吧进程的排名，也就是这是第几个够吧进程，就是一个普普通通的下标！。
    //numProcesses表示通信器中的进程总数。
    //int r1 = rank * (n / numProcesses); // 每个进程负责的起始行
    //int r2 = (rank + 1) * (n / numProcesses); // 每个进程负责的结束行
    //那么也许我们可以这个够吧的使用它：


    struct timeval t1, t2;
    double timeuse;
    double averageTime = 0.0;

    gettimeofday(&t1, NULL);
    for (int k = 0; k < n; k++) {
        for (int j = k + 1; j < n; j++) 
            m[k][j] = m[k][j] / m[k][k];
        m[k][k] = 1;
        MPI_Barrier(MPI_COMM_WORLD);
        for (int i = rank + k + 1; i < n; i += numProcesses) 
        {
            for (int j = k + 1; j < n; j++) 
            {
                m[i][j] = m[i][j] - m[i][k] * m[k][j];
            }
            m[i][k] = 0;
        }
        for (int i = k + 1; i < n; i++)
        {
            if (rank == (i - k - 1) % numProcesses)
            {
                for (int j = 0; j < numProcesses; j++)
                {
                    if (rank != j)
                    {
                        MPI_Send(&m[i][0], n, MPI_FLOAT, j, 0, MPI_COMM_WORLD);
                    }
                }
            }
            else
            {
                MPI_Recv(&m[i][0], n, MPI_FLOAT, (i - k - 1) % numProcesses,0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
            }
        }
    }

    gettimeofday(&t2, NULL);

    timeuse = (t2.tv_sec - t1.tv_sec) + (double)(t2.tv_usec - t1.tv_usec) / 1000000.0;
    MPI_Reduce(&timeuse, &averageTime, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);

    if (rank == 0) {
        averageTime /= numProcesses;
        cout << averageTime << endl;
    }


    /*for (int i = 0; i < numProcesses; i++)
    {
        if (i == rank)
        {
            show();
            cout << endl;
        }
        MPI_Barrier(MPI_COMM_WORLD);
    }*/
}

void tryBlock(int argc, char* argv[])
{
    MPI_Init(&argc, &argv);
    int numProcesses, rank;
    MPI_Comm_size(MPI_COMM_WORLD, &numProcesses);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    

    for (int k = 0; k < n; k++)
    {
        if (k < n / 2)
        {

        }
        else
        {

        }
    }


}


int preProcess(int rank, int numProcess) 
{
    if (rank > 0)
    {
        return rank - 1;
    }
    else
    {
        return numProcess - 1;
    }
}
int nextProcess(int rank, int numProcesses)
{
    if (rank < numProcesses - 1)
    {
        return rank + 1;
    }
    else
    {
        return 0;
    }
}
void tryPipline(int argc, char* argv[])
{
    MPI_Init(&argc, &argv);
    int numProcesses, rank;
    MPI_Comm_size(MPI_COMM_WORLD, &numProcesses);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);



    for (int k = 0; k < n; k++)
    {

        if (k % numProcesses == rank)//这就已经是对角线了把
        {
            //在属于自己的任务里面，先做除法。
            for (int j = k + 1; j < n; j++)
                m[k][j] = m[k][j] / m[k][k];
            m[k][k] = 1;            
            MPI_Send(&m[k][0], n, MPI_FLOAT, nextProcess(rank, numProcesses), 0, MPI_COMM_WORLD);
            
            //我要干的活在这里。

            for (int i = numProcesses + k; i < n; i += numProcesses)
            {
                for (int j = k + 1; j < n; j++)
                {
                    m[i][j] = m[i][j] - m[i][k] * m[k][j];
                }
                m[i][k] = 0;
            }
        }
        else
        {
            //根本不用去管他现在到底是第几行的数据传进来。为了填满矩阵，传进来的总数一定是n次，也就是说累加k就可以了。
            MPI_Recv(&m[k][0], n, MPI_FLOAT, preProcess(rank, numProcesses), 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
            if ((k + 1) % numProcesses != rank)//如果它的下一个就是这个流水线开始的进程，那么就不send了。
            {
                MPI_Send(&m[k][0], n, MPI_FLOAT, nextProcess(rank, numProcesses), 0, MPI_COMM_WORLD);
            }
        }
    }

    /*for (int i = 0; i < numProcesses; i++)
    {
        if (i == rank)
        {
            cout << rank << endl;
            show();
            cout << endl;
        }
        MPI_Barrier(MPI_COMM_WORLD);
    }*/
}

void zhujiao(int argc, char* argv[], int rank, int numProcesses)
{



    struct timeval t1, t2;
    double timeuse;
    double averageTime = 0.0;
    gettimeofday(&t1, NULL);

    int HowManyWork = n / numProcesses * numProcesses == n ? n / numProcesses : n / numProcesses + 1;
    int r1 = rank * HowManyWork;
    int r2 = r1 + HowManyWork;

    for (int k = 0; k < n; k++)
    {
        if (r1 <= k && k < r2)
        {
            for (int j = k + 1; j < n; j++)
                m[k][j] = m[k][j] / m[k][k];
            m[k][k] = 1;
            for (int i = 0; i < numProcesses; i++)
            {
                if (i != rank)
                {
                    MPI_Send(&m[k][0], n, MPI_FLOAT, i, 0, MPI_COMM_WORLD);
                }
            }
        }
        else
        {
            MPI_Recv(&m[k][0], n, MPI_FLOAT, k / HowManyWork, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
        }
        for (int i = k + 1; i < n; i++)
        {
            for (int j = k + 1; j < n; j++)
            {
                m[i][j] = m[i][j] - m[i][k] * m[k][j];
            }
            m[i][k] = 0;
        }
    }


    gettimeofday(&t2, NULL);
    timeuse = (t2.tv_sec - t1.tv_sec) + (double)(t2.tv_usec - t1.tv_usec) / 1000000.0;
    MPI_Reduce(&timeuse, &averageTime, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
    if (rank == 0) {
        averageTime /= numProcesses;
        cout << averageTime << endl;
    }

}

int main(int argc, char* argv[]) 
{
    int provided;
    //MPI_Init_thread(&argc, &argv);

    MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided);
    int numProcesses, rank;
    MPI_Comm_size(MPI_COMM_WORLD, &numProcesses);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);


    pThreadNum = 4;


    for (int i = 1; i <= 15; i++)
    {
        n = i * 200;
        init();

        //mpiBarrier(argc, argv);

        zhujiao(argc, argv, rank, numProcesses);
        //show();
        //commonGaosi();
    }
    return 0;
}

