#include<iostream>
#include<iomanip>
#include<pthread.h>
#include <immintrin.h>
#include <xmmintrin.h> //SSE
#include <emmintrin.h> //SSE2
#include <pmmintrin.h> //SSE3
#include <tmmintrin.h> //SSSE3
#include <smmintrin.h> //SSE4.1
#include <nmmintrin.h> //SSSE4.2
#include <immintrin.h> //AVX、AVX
#include<semaphore.h>


#include<mpi.h>
#include<sys/time.h>


using namespace std;

int n;
float** generate()
{
    float** m = new float* [n];
    for (int i = 0; i < n; i++)
    {
        m[i] = new float[n];
        for (int j = 0; j < n; j++)
        {
            m[i][j] = 0;
        }
    }


    for (int i = 0; i < n; i++)
    {
        m[i][i] = 1.0;
        for (int p = i + 1; p < n; p++)
        {
            m[i][p] = rand() % 100 + 1;
        }
    }
    for (int k = 0; k < n; k++)
    {
        for (int i = k + 1; i < n; i++)
        {
            for (int p = 0; p < n; p++)
            {
                m[i][p] += m[k][p];
            }
        }
    }
    return m;
}
void show(float** m)
{
    for (int i = 0; i < n; i++)
    {
        for (int p = 0; p < n; p++)
        {
            cout << setw(5) << m[i][p] << " ";
        }
        cout << endl;
    }
}




void swap(float &a, float &b)
{
    float temp = a;
    a = b;
    b = temp;
}
void T(float** m)
{
    for (int i = 0; i < n; i++)
        for (int j = i + 1; j < n; j++)
            swap(m[i][j], m[j][i]);
}


double sse__do_on_init_matrix(float** m)
{
    struct timeval t1, t2;
    double timeuse = 0;


    gettimeofday(&t1, NULL);


    float UtempSum = 0, LtempSum = 0;
    int k = 0;
    for (int r = 1; r < n; r++)
        for (int i = r; i < n; i++)
        {
            UtempSum = 0;
            LtempSum = 0;
            k = 0;
            for (; k + 4 < r; k += 4)
            {
                __m128 m_r_k = _mm_loadu_ps(&m[r][k]);
                __m128 m_k_i = _mm_set_ps(m[k + 3][i], m[k + 2][i], m[k + 1][i], m[k][i]);
                __m128 UtempSumVec = _mm_mul_ps(m_r_k, m_k_i);
                if (i != r)
                {
                    __m128 m_i_k = _mm_loadu_ps(&m[i][k]);
                    __m128 m_k_r = _mm_set_ps(m[k + 3][r], m[k + 2][r], m[k + 1][r], m[k][r]);
                    __m128 LtempSumVec = _mm_mul_ps(m_i_k, m_k_r);
                    LtempSum += LtempSumVec[0] + LtempSumVec[1] + LtempSumVec[2] + LtempSumVec[3];
                }
                UtempSum += UtempSumVec[0] + UtempSumVec[1] + UtempSumVec[2] + UtempSumVec[3];
            }
            for (; k < r; k++)
            {
                UtempSum += m[r][k] * m[k][i];
                if (i != r)
                    LtempSum += m[i][k] * m[k][r];
            }

            m[r][i] = m[r][i] - UtempSum;
            if (i != r)
                m[i][r] = (m[i][r] - LtempSum) / m[r][r];
        }
    gettimeofday(&t2, NULL);
    timeuse = (t2.tv_sec - t1.tv_sec) + (double)(t2.tv_usec - t1.tv_usec) / 1000000.0;
    return timeuse;
}



double do_on_init_matrix(float** m)
{
    struct timeval t1, t2;
    double timeuse = 0;


    gettimeofday(&t1, NULL);
    float UtempSum = 0,LtempSum = 0;
    for (int r = 1; r < n; r++)
        for (int i = r; i < n; i++)
        {
            UtempSum = 0;
            LtempSum = 0;
            for (int k = 0; k < r; k++)
            {
                UtempSum += m[r][k] * m[k][i];
                if (i != r)
                    LtempSum += m[i][k] * m[k][r];
            }
            m[r][i] = m[r][i] - UtempSum;
            if (i != r)
                m[i][r] = (m[i][r] - LtempSum) / m[r][r];
        }
    gettimeofday(&t2, NULL);
    timeuse = (t2.tv_sec - t1.tv_sec) + (double)(t2.tv_usec - t1.tv_usec) / 1000000.0;
    return timeuse;
}




//这下面是cache命中更高的LU分解。还是主公下面把


typedef struct
{
    int t_id;
    int r;
    int Jincheng_rank;
    int numProcesses;
    int begin;
    int end;
}threadParam_t;




/*↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓pthread↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓*/
float** pM;
float** pL;
float** pU;
int taskNum = 1;
int pThreadNum = 1;

sem_t sem_leader;
sem_t* sem_workerstart;
sem_t* sem_workerend;
void* threadFunc(void* param)
{
    //在这里对pM,pL,pU进行操作。
    threadParam_t* p = (threadParam_t*)param;
    int t_id = p->t_id;
    int num_2_handle = n / pThreadNum + 1;



    float UtempSum = 0;
    float LtempSum = 0;

    for (int r = 1; r < n; r++)
    {
        sem_wait(&sem_workerstart[t_id]);
        for (int i = r; i < n; i++)
        {
            if (i >= num_2_handle * t_id && i < num_2_handle * (t_id + 1)) 
            {
                UtempSum = 0;
                LtempSum = 0;
                for (int k = 0; k < r; k++)
                {
                    UtempSum += pL[r][k] * pU[i][k];
                    if (i != r)
                        LtempSum += pL[i][k] * pU[r][k];
                }
                pU[i][r] = pM[r][i] - UtempSum;
                if (i != r)
                    pL[i][r] = (pM[i][r] - LtempSum) / pU[r][r];
            }
        }

        sem_post(&sem_leader);
        sem_wait(&sem_workerend[t_id]);
    }

    pthread_exit(NULL);
}
void* projectThreadFunc(void* param)
{
    //在这里对pM,pL,pU进行操作。
    threadParam_t* p = (threadParam_t*)param;
    int t_id = p->t_id;
    int num_2_handle = n / pThreadNum + 1;



    float UtempSum = 0;
    float LtempSum = 0;

    for (int r = 1; r < n; r++)
    {
        sem_wait(&sem_workerstart[t_id]);
        for (int i = r; i < n; i++)
        {
            if (i % pThreadNum == t_id)
            {
                UtempSum = 0;
                LtempSum = 0;
                for (int k = 0; k < r; k++)
                {
                    UtempSum += pL[r][k] * pU[i][k];
                    if (i != r)
                        LtempSum += pL[i][k] * pU[r][k];
                }
                pU[i][r] = pM[r][i] - UtempSum;
                if (i != r)
                    pL[i][r] = (pM[i][r] - LtempSum) / pU[r][r];
            }
        }

        sem_post(&sem_leader);
        sem_wait(&sem_workerend[t_id]);
    }

    pthread_exit(NULL);
}
int which_step_is_now;
pthread_mutex_t get_step_mutex;
void* MissionPoolThreadFunc(void* param)
{
    //在这里对pM,pL,pU进行操作。
    pthread_mutex_init(&get_step_mutex, NULL);

    threadParam_t* p = (threadParam_t*)param;
    int t_id = p->t_id;
    int num_2_handle = n / pThreadNum + 1;



    float UtempSum = 0;
    float LtempSum = 0;
    int i = 0;
    for (int r = 1; r < n; r++)
    {
        sem_wait(&sem_workerstart[t_id]);
        i = 0;
        while (1)
        {
            pthread_mutex_lock(&get_step_mutex);

            if (which_step_is_now >= n)
            {
                pthread_mutex_unlock(&get_step_mutex);
                break;
            }
            else
            {
                i = which_step_is_now;
                which_step_is_now++;
                pthread_mutex_unlock(&get_step_mutex);

                UtempSum = 0;
                LtempSum = 0;
                for (int k = 0; k < r; k++)
                {
                    UtempSum += pL[r][k] * pU[i][k];
                    if (i != r)
                        LtempSum += pL[i][k] * pU[r][k];
                }
                pU[i][r] = pM[r][i] - UtempSum;
                if (i != r)
                    pL[i][r] = (pM[i][r] - LtempSum) / pU[r][r];
            }
        }

        sem_post(&sem_leader);
        sem_wait(&sem_workerend[t_id]);
    }

    pthread_mutex_destroy(&get_step_mutex);
    pthread_exit(NULL);
}
void* MissionPoolSSEThreadFunc(void* param)
{
    //在这里对pM,pL,pU进行操作。
    pthread_mutex_init(&get_step_mutex, NULL);

    threadParam_t* p = (threadParam_t*)param;
    int t_id = p->t_id;
    int num_2_handle = n / pThreadNum + 1;



    float UtempSum = 0;
    float LtempSum = 0;
    int i = 0;
    for (int r = 1; r < n; r++)
    {
        sem_wait(&sem_workerstart[t_id]);
        i = 0;
        while (1)
        {
            pthread_mutex_lock(&get_step_mutex);

            if (which_step_is_now >= n)
            {
                pthread_mutex_unlock(&get_step_mutex);
                break;
            }
            else
            {
                i = which_step_is_now;
                which_step_is_now++;
                pthread_mutex_unlock(&get_step_mutex);


                UtempSum = 0;
                LtempSum = 0;
                int k = 0;
                for (; k + 4 < r; k += 4)
                {
                    __m128 l_r_k = _mm_loadu_ps(&pL[r][k]);
                    __m128 u_i_k = _mm_loadu_ps(&pU[i][k]);
                    __m128 UtempSumVec = _mm_mul_ps(l_r_k, u_i_k);
                    UtempSum += UtempSumVec[0] + UtempSumVec[1] + UtempSumVec[2] + UtempSumVec[3];
                    if (i != r)
                    {
                        __m128 l_i_k = _mm_loadu_ps(&pL[i][k]);
                        __m128 u_r_k = _mm_loadu_ps(&pU[r][k]);
                        __m128 LtempSumVec = _mm_mul_ps(l_i_k, u_r_k);
                        LtempSum += LtempSumVec[0] + LtempSumVec[1] + LtempSumVec[2] + LtempSumVec[3];
                    }
                }
                for (; k < r; k++)
                {
                    UtempSum += pL[r][k] * pU[i][k];
                    if (i != r)
                        LtempSum += pL[i][k] * pU[r][k];
                }
                pU[i][r] = pM[r][i] - UtempSum;
                if (i != r)
                    pL[i][r] = (pM[i][r] - LtempSum) / pU[r][r];
            }
        }

        sem_post(&sem_leader);
        sem_wait(&sem_workerend[t_id]);
    }

    pthread_mutex_destroy(&get_step_mutex);
    pthread_exit(NULL);
}

double pthread_do_on_LU(float** m)
{
    pL = new float* [n];
    pU = new float* [n];
    for (int i = 0; i < n; i++)
    {
        pL[i] = new float[n];
        pU[i] = new float[n];
        for (int j = 0; j < n; j++)
        {
            if (i == 0)
                pU[i][j] = m[i][j];
            else
                pU[i][j] = 0;

            if (i == j)
                pL[i][j] = 1;
            else
                pL[i][j] = 0;

            if (j == 0)
                pL[i][j] = m[i][j] / pU[0][0];
        }
    }
    pM = m;
    T(pU);

    int type = 2;

    struct timeval t1, t2;
    double timeuse = 0;
    gettimeofday(&t1, NULL);

    pthread_t* handle = new pthread_t[pThreadNum];
    threadParam_t* param = new threadParam_t[pThreadNum];

    for (int id = 0; id < pThreadNum; id++)
    {
        param[id].t_id = id;
        switch (type)
        {
        case 0:
            pthread_create(&handle[id], NULL, threadFunc, (void*)&param[id]);
            break;
        case 1:
            pthread_create(&handle[id], NULL, projectThreadFunc, (void*)&param[id]);
            break;
        case 2:
            pthread_create(&handle[id], NULL, MissionPoolThreadFunc, (void*)&param[id]);
            break;
        }
    }


    float UtempSum = 0;
    float LtempSum = 0;
    for (int r = 1; r < n; r++)
    {
        which_step_is_now = r;
        for (int id = 0; id < pThreadNum; id++)
            sem_post(&sem_workerstart[id]);
        for (int id = 0; id < pThreadNum; id++)
            sem_wait(&sem_leader);
        for (int id = 0; id < pThreadNum; id++)
            sem_post(&sem_workerend[id]);
    }

    for (int id = 0; id < pThreadNum; id++)
        pthread_join(handle[id], NULL);


    gettimeofday(&t2, NULL);
    timeuse = (t2.tv_sec - t1.tv_sec) + (double)(t2.tv_usec - t1.tv_usec) / 1000000.0;


    /*show(m);
    cout << endl;
    show(pL);
    cout << endl;
    T(pU);
    show(pU);*/
    return timeuse;
}

/*↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑pthread↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑*/


double sse_do_on_LU(float** m)
{
    float** L = new float* [n];
    float** U = new float* [n];
    for (int i = 0; i < n; i++)
    {
        L[i] = new float[n];
        U[i] = new float[n];
        for (int j = 0; j < n; j++)
        {
            if (i == 0)
                U[i][j] = m[i][j];
            else
                U[i][j] = 0;

            if (i == j)
                L[i][j] = 1;
            else
                L[i][j] = 0;

            if (j == 0)
                L[i][j] = m[i][j] / U[0][0];
        }
    }
    T(U);


    struct timeval t1, t2;
    double timeuse = 0;

    float UtempSum = 0;
    float LtempSum = 0;

    gettimeofday(&t1, NULL);
    for (int r = 1; r < n; r++)
        for (int i = r; i < n; i++)
        {
            UtempSum = 0;
            LtempSum = 0;
            int k = 0;
            for (; k + 4 < r; k += 4)
            {
                __m128 l_r_k = _mm_loadu_ps(&L[r][k]);
                __m128 u_i_k = _mm_loadu_ps(&U[i][k]);
                __m128 UtempSumVec = _mm_mul_ps(l_r_k, u_i_k);
                UtempSum += UtempSumVec[0] + UtempSumVec[1] + UtempSumVec[2] + UtempSumVec[3];
                if (i != r)
                {
                    __m128 l_i_k = _mm_loadu_ps(&L[i][k]);
                    __m128 u_r_k = _mm_loadu_ps(&U[r][k]);
                    __m128 LtempSumVec = _mm_mul_ps(l_i_k, u_r_k);
                    LtempSum += LtempSumVec[0] + LtempSumVec[1] + LtempSumVec[2] + LtempSumVec[3];
                }
            }
            for (; k < r; k++)
            {
                UtempSum += L[r][k] * U[i][k];
                if (i != r)
                    LtempSum += L[i][k] * U[r][k];
            }
            U[i][r] = m[r][i] - UtempSum;
            if (i != r)
                L[i][r] = (m[i][r] - LtempSum) / U[r][r];
        }
    gettimeofday(&t2, NULL);
    timeuse = (t2.tv_sec - t1.tv_sec) + (double)(t2.tv_usec - t1.tv_usec) / 1000000.0;


    return timeuse;
}






double pingjun_mpi(int argc, char* argv[], int numProcesses, int rank)
{
    float** m;
    if (rank == 0)
    {
        m = generate();
        for (int i = 0; i < n; i++)
        {
            for (int j = 1; j < numProcesses; j++)
            {
                MPI_Send(&m[i][0], n, MPI_FLOAT, j, 0, MPI_COMM_WORLD);
            }
        }
    }
    else
    {
        m = new float* [n];
        for (int i = 0; i < n; i++)
        {
            m[i] = new float[n];
            MPI_Recv(&m[i][0], n, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
        }
    }
    float** L = new float* [n];
    float** U = new float* [n];
    for (int i = 0; i < n; i++)
    {
        L[i] = new float[n];
        U[i] = new float[n];
        for (int j = 0; j < n; j++)
        {
            if (i == 0)
                U[i][j] = m[i][j];
            else
                U[i][j] = 0;

            if (i == j)
                L[i][j] = 1;
            else
                L[i][j] = 0;

            if (j == 0)
                L[i][j] = m[i][j] / U[0][0];
        }
    }
    T(U);
    
    

    struct timeval t1, t2;
    double timeuse = 0;
    gettimeofday(&t1, NULL);
    double averageTime = 0.0;


    int how_many_to_do = (n - 1) % numProcesses == 0 ? (n - 1) / numProcesses : (n - 1) / numProcesses + 1;
    
    
    int begin = rank * how_many_to_do + 1;
    int end = begin + how_many_to_do > n ? n : begin + how_many_to_do;



    float UtempSum = 0;
    float LtempSum = 0;


    for (int i = 0; i < n; i++)
    {
        if (rank != 0)
        {
            MPI_Recv(&U[i][0], i, MPI_FLOAT, rank - 1, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
            MPI_Recv(&L[i][0], i, MPI_FLOAT, rank - 1, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
        }
        if (i >= begin)
            for (int r = begin; r < end && r < n && r <= i; r++)
            {
                UtempSum = 0;
                LtempSum = 0;
                for (int k = 0; k < r; k++)
                {
                    UtempSum += L[r][k] * U[i][k];
                    if (i != r)
                        LtempSum += L[i][k] * U[r][k];
                }
                U[i][r] = m[r][i] - UtempSum;
                if (i != r)
                    L[i][r] = (m[i][r] - LtempSum) / U[r][r];
            }
        if (rank != numProcesses - 1)
        {
            MPI_Send(&U[i][0], i, MPI_FLOAT, rank + 1, 0, MPI_COMM_WORLD);
            MPI_Send(&L[i][0], i, MPI_FLOAT, rank + 1, 0, MPI_COMM_WORLD);
        }
    }





    gettimeofday(&t2, NULL);
    timeuse = (t2.tv_sec - t1.tv_sec) + (double)(t2.tv_usec - t1.tv_usec) / 1000000.0;
    MPI_Reduce(&timeuse, &averageTime, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
    averageTime /= numProcesses;



    int showLU = 0;
    if (showLU)
    {
        if (rank == 0)
        {
            show(m);
            cout << endl;
        }


        for (int i = 0; i < numProcesses; i++)
        {
            if (i == rank)
            {
                cout << i << endl;
                show(L);
                cout << endl;
                show(U);
                cout << endl << endl;
            }
            MPI_Barrier(MPI_COMM_WORLD);
        }


    }

    if (rank == 0)
    {
        cout << averageTime << endl;
    }

    return averageTime;
}
double project_mpi(int argc, char* argv[], int numProcesses, int rank)
{
    float** m;
    if (rank == 0)
    {
        m = generate();
        for (int i = 0; i < n; i++)
        {
            for (int j = 1; j < numProcesses; j++)
            {
                MPI_Send(&m[i][0], n, MPI_FLOAT, j, 0, MPI_COMM_WORLD);
            }
        }
    }
    else
    {
        m = new float* [n];
        for (int i = 0; i < n; i++)
        {
            m[i] = new float[n];
            MPI_Recv(&m[i][0], n, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
        }
    }
    float** L = new float* [n];
    float** U = new float* [n];
    for (int i = 0; i < n; i++)
    {
        L[i] = new float[n];
        U[i] = new float[n];
        for (int j = 0; j < n; j++)
        {
            if (i == 0)
                U[i][j] = m[i][j];
            else
                U[i][j] = 0;

            if (i == j)
                L[i][j] = 1;
            else
                L[i][j] = 0;

            if (j == 0)
                L[i][j] = m[i][j] / U[0][0];
        }
    }
    T(U);



    struct timeval t1, t2;
    double timeuse = 0;
    gettimeofday(&t1, NULL);
    double averageTime = 0.0;




    float UtempSum = 0;
    float LtempSum = 0;
    for (int r = 1; r < n; r++)
    {
        if (r % numProcesses == rank)
        {
            UtempSum = 0;
            LtempSum = 0;
            for (int k = 0; k < r; k++)
            {
                UtempSum += L[r][k] * U[r][k];
            }
            U[r][r] = m[r][r] - UtempSum;
        }
        MPI_Bcast(&U[r][r], 1, MPI_FLOAT, r % numProcesses, MPI_COMM_WORLD);
        for (int i = r + 1; i < n; i++)
        {
            if (i % numProcesses == rank)
            {
                UtempSum = 0;
                LtempSum = 0;
                for (int k = 0; k < r; k++)
                {
                    UtempSum += L[r][k] * U[i][k];
                    LtempSum += L[i][k] * U[r][k];
                }
                U[i][r] = m[r][i] - UtempSum;
                L[i][r] = (m[i][r] - LtempSum) / U[r][r];
            }
        }
        for (int i = r + 1; i < n; i++)
        {
            MPI_Bcast(&U[i][r], 1, MPI_FLOAT, i % numProcesses, MPI_COMM_WORLD);
            MPI_Bcast(&L[i][r], 1, MPI_FLOAT, i % numProcesses, MPI_COMM_WORLD);
        }
    }


    gettimeofday(&t2, NULL);
    timeuse = (t2.tv_sec - t1.tv_sec) + (double)(t2.tv_usec - t1.tv_usec) / 1000000.0;
    MPI_Reduce(&timeuse, &averageTime, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
    averageTime /= numProcesses;



    int showLU = 0;
    if (showLU)
    {
        if (rank == 0)
        {
            show(m);
            cout << endl;
        }



        if (rank == 0)
        {
            cout << rank << endl;
            show(L);
            cout << endl;
            T(U);
            show(U);
            cout << endl << endl;
        }


    }

    if (rank == 0)
    {
        cout << averageTime << endl;
    }

    return averageTime;
}
double pipeline_mpi(int argc, char* argv[], int numProcesses, int rank)
{
    float** m;
    if (rank == 0)
    {
        m = generate();
        for (int i = 0; i < n; i++)
        {
            for (int j = 1; j < numProcesses; j++)
            {
                MPI_Send(&m[i][0], n, MPI_FLOAT, j, 0, MPI_COMM_WORLD);
            }
        }
    }
    else
    {
        m = new float* [n];
        for (int i = 0; i < n; i++)
        {
            m[i] = new float[n];
            MPI_Recv(&m[i][0], n, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
        }
    }
    float** L = new float* [n];
    float** U = new float* [n];
    for (int i = 0; i < n; i++)
    {
        L[i] = new float[n];
        U[i] = new float[n];
        for (int j = 0; j < n; j++)
        {
            if (i == 0)
                U[i][j] = m[i][j];
            else
                U[i][j] = 0;

            if (i == j)
                L[i][j] = 1;
            else
                L[i][j] = 0;

            if (j == 0)
                L[i][j] = m[i][j] / U[0][0];
        }
    }
    T(U);



    struct timeval t1, t2;
    double timeuse = 0;
    gettimeofday(&t1, NULL);
    double averageTime = 0.0;




    float UtempSum = 0;
    float LtempSum = 0;
    for (int r = 1; r < n; r++)
    {
        if (r % numProcesses == rank)
        {
            UtempSum = 0;
            LtempSum = 0;
            for (int k = 0; k < r; k++)
            {
                UtempSum += L[r][k] * U[r][k];
            }
            U[r][r] = m[r][r] - UtempSum;
        }
        MPI_Bcast(&U[r][r], 1, MPI_FLOAT, r % numProcesses, MPI_COMM_WORLD);
        for (int i = r + 1; i < n; i++)
        {
            if (i % numProcesses == rank)
            {
                UtempSum = 0;
                LtempSum = 0;
                for (int k = 0; k < r; k++)
                {
                    UtempSum += L[r][k] * U[i][k];
                    LtempSum += L[i][k] * U[r][k];
                }
                U[i][r] = m[r][i] - UtempSum;
                L[i][r] = (m[i][r] - LtempSum) / U[r][r];
            }
        }
        for (int i = r + 1; i < n; i++)
        {
            MPI_Bcast(&U[i][r], 1, MPI_FLOAT, i % numProcesses, MPI_COMM_WORLD);
            MPI_Bcast(&L[i][r], 1, MPI_FLOAT, i % numProcesses, MPI_COMM_WORLD);
        }
    }


    gettimeofday(&t2, NULL);
    timeuse = (t2.tv_sec - t1.tv_sec) + (double)(t2.tv_usec - t1.tv_usec) / 1000000.0;
    MPI_Reduce(&timeuse, &averageTime, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
    averageTime /= numProcesses;



    int showLU = 0;
    if (showLU)
    {
        if (rank == 0)
        {
            show(m);
            cout << endl;
        }



        if (rank == 0)
        {
            cout << rank << endl;
            show(L);
            cout << endl;
            T(U);
            show(U);
            cout << endl << endl;
        }


    }

    if (rank == 0)
    {
        cout << averageTime << endl;
    }

    return averageTime;
}
void mpi_do_on_LU(int argc, char* argv[])
{
    int provided;
    MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided);
    int numProcesses, rank;
    MPI_Comm_size(MPI_COMM_WORLD, &numProcesses);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);



    int type = 0;

    int test = 0;
    if (test)
    {
        n = 5;
        switch (type)
        {
        case 0:
            pingjun_mpi(argc, argv, numProcesses, rank);
            break;
        case 1:
            project_mpi(argc, argv, numProcesses, rank);
            break;
        case 2:
            pipeline_mpi(argc, argv, numProcesses, rank);
            break;
        }
    }
    else
        for (int i = 1; i <= 10; i++)
        {
            n = i * 200;
            switch (type)
            {
            case 0:
                pingjun_mpi(argc, argv, numProcesses, rank);
                break;
            case 1:
                project_mpi(argc, argv, numProcesses, rank);
                break;
            case 2:
                pipeline_mpi(argc, argv, numProcesses, rank);
                break;
            }
        }
}



bool** okU;
bool** okL;

void* MPIMissionPoolThreadFunc(void* param)
{
    //在这里对pM,pL,pU进行操作。

    threadParam_t* p = (threadParam_t*)param;
    int t_id = p->t_id;
    int rank = p->Jincheng_rank;
    int numProcesses = p->numProcesses;
    int num_2_handle = n / (pThreadNum * numProcesses) + 1;
    int begin = p->begin;
    int end = p->end;


    float UtempSum = 0;
    float LtempSum = 0;
    int i = 0;




    for (int j = 1; j < n; j += pThreadNum)
    {
        if (j >= begin)
        {

            sem_wait(&sem_workerstart[t_id]);

            i = j + t_id;
            if (i < n && i >= begin)
                for (int r = begin; r < end && r < n && r <= i; r++)
                {
                    UtempSum = 0;
                    LtempSum = 0;
                    for (int k = 0; k < r; k++)
                    {
                        while (!(okL[r][k] && okU[i][k]))
                        {
                            continue;
                        }
                        UtempSum += pL[r][k] * pU[i][k];
                        if (i != r)
                        {
                            while (!(okL[i][k] && okU[r][k]))
                                continue;
                            LtempSum += pL[i][k] * pU[r][k];
                        }
                    }
                    pU[i][r] = pM[r][i] - UtempSum;
                    okU[i][r] = 1;
                    if (i != r)
                    {
                        while (!okU[r][r])
                            continue;
                        pL[i][r] = (pM[i][r] - LtempSum) / pU[r][r];
                        okL[i][r] = 1;
                    }
                }

            sem_post(&sem_leader);
            sem_wait(&sem_workerend[t_id]);
        }
    }


    pthread_exit(NULL);
}

double final_entry(int numProcesses, int rank)
{
    float** m;
    if (rank == 0)
    {
        m = generate();
        for (int i = 0; i < n; i++)
        {
            for (int j = 1; j < numProcesses; j++)
                MPI_Send(&m[i][0], n, MPI_FLOAT, j, 0, MPI_COMM_WORLD);
        }
    }
    else
    {
        m = new float* [n];
        for (int i = 0; i < n; i++)
        {
            m[i] = new float[n];
            MPI_Recv(&m[i][0], n, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
        }
    }
    pL = new float* [n];
    pU = new float* [n];
    for (int i = 0; i < n; i++)
    {
        pL[i] = new float[n];
        pU[i] = new float[n];
        for (int j = 0; j < n; j++)
        {
            if (i == 0)
                pU[i][j] = m[i][j];
            else
                pU[i][j] = 0;

            if (i == j)
                pL[i][j] = 1;
            else
                pL[i][j] = 0;

            if (j == 0)
                pL[i][j] = m[i][j] / pU[0][0];
        }
    }
    T(pU);
    pM = m;    
    pthread_mutex_init(&get_step_mutex, NULL);


    


    pthread_t* handle = new pthread_t[pThreadNum];
    threadParam_t* param = new threadParam_t[pThreadNum];


    int how_many_to_do = (n - 1) % numProcesses == 0 ? (n - 1) / numProcesses : (n - 1) / numProcesses + 1;
    int begin = rank * how_many_to_do + 1;
    int end = begin + how_many_to_do > n ? n : begin + how_many_to_do;


    okU = new bool* [n];
    okL = new bool* [n];
    for (int i = 0; i < n; i++)
    {
        okU[i] = new bool[n];
        okL[i] = new bool[n];
        for (int j = 0; j < n; j++)
        {
            if (i == 0 || j == 0)
            {
                okU[i][j] = 1;
                okL[i][j] = 1;
            }
            else
            {
                okU[i][j] = 0;
                okL[i][j] = 0;
            }
        }
    }




    bool haveSSE = 0;
    for (int id = 0; id < pThreadNum; id++)
    {
        param[id].Jincheng_rank = rank;
        param[id].t_id = id;
        param[id].numProcesses = numProcesses;
        param[id].begin = begin;
        param[id].end = end;
        if (haveSSE)
            pthread_create(&handle[id], NULL, MissionPoolSSEThreadFunc, (void*)&param[id]);
        else
            pthread_create(&handle[id], NULL, MPIMissionPoolThreadFunc, (void*)&param[id]);
    }



    int all_time = begin;
    int fuck = 0;
    while (all_time < n)
    {
        all_time += fuck * pThreadNum;
        fuck++;
    }



    struct timeval t1, t2;
    double timeuse = 0;
    gettimeofday(&t1, NULL);
    double averageTime = 0.0;



    for (int i = 1; i < n; i += pThreadNum)
    {
        if (rank != 0)
        {
            for (int j = i; j < n && j < i + pThreadNum; j++)
            {
                MPI_Recv(&pU[j][1], j, MPI_FLOAT, rank - 1, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
                MPI_Recv(&pL[j][1], j, MPI_FLOAT, rank - 1, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
                MPI_Recv(&okU[j][1], j, MPI_BYTE, rank - 1, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
                MPI_Recv(&okL[j][1], j, MPI_BYTE, rank - 1, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
            }
        }
        if (i >= begin)
        {
            for (int id = 0; id < pThreadNum; id++)
                sem_post(&sem_workerstart[id]);
            for (int id = 0; id < pThreadNum; id++)
                sem_wait(&sem_leader);
            for (int id = 0; id < pThreadNum; id++)
                sem_post(&sem_workerend[id]);
        }

        if (rank != numProcesses - 1)
        {
            for (int j = i; j < n && j < i + pThreadNum; j++)
            {
                MPI_Send(&pU[j][1], j, MPI_FLOAT, rank + 1, 0, MPI_COMM_WORLD);
                MPI_Send(&pL[j][1], j, MPI_FLOAT, rank + 1, 0, MPI_COMM_WORLD);
                MPI_Send(&okU[j][1], j, MPI_BYTE, rank + 1, 0, MPI_COMM_WORLD);
                MPI_Send(&okL[j][1], j, MPI_BYTE, rank + 1, 0, MPI_COMM_WORLD);
            }
        }
    }

    gettimeofday(&t2, NULL);
    timeuse = (t2.tv_sec - t1.tv_sec) + (double)(t2.tv_usec - t1.tv_usec) / 1000000.0;
    MPI_Reduce(&timeuse, &averageTime, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
    averageTime /= numProcesses;


    pthread_mutex_destroy(&get_step_mutex);

    int showLU = 0;
    if (showLU)
    {
        if (rank == numProcesses - 1)
        {
            show(m);
            cout << endl;
        }


        if (rank == numProcesses - 1)
        {
            show(pL);
            cout << endl;
            show(pU);
            cout << endl << endl;
        }


    }

    if (rank == 0)
    {
        cout << averageTime << endl;
    }

    return averageTime;
}


double pthread_sse(float** m)
{

    pL = new float* [n];
    pU = new float* [n];
    for (int i = 0; i < n; i++)
    {
        pL[i] = new float[n];
        pU[i] = new float[n];
        for (int j = 0; j < n; j++)
        {
            if (i == 0)
                pU[i][j] = m[i][j];
            else
                pU[i][j] = 0;

            if (i == j)
                pL[i][j] = 1;
            else
                pL[i][j] = 0;

            if (j == 0)
                pL[i][j] = m[i][j] / pU[0][0];
        }
    }
    pM = m;
    T(pU);

    int type = 2;

    struct timeval t1, t2;
    double timeuse = 0;
    gettimeofday(&t1, NULL);

    pthread_t* handle = new pthread_t[pThreadNum];
    threadParam_t* param = new threadParam_t[pThreadNum];

    for (int id = 0; id < pThreadNum; id++)
    {
        param[id].t_id = id;
        pthread_create(&handle[id], NULL, MissionPoolSSEThreadFunc, (void*)&param[id]);
    }


    float UtempSum = 0;
    float LtempSum = 0;
    for (int r = 1; r < n; r++)
    {
        which_step_is_now = r;
        for (int id = 0; id < pThreadNum; id++)
            sem_post(&sem_workerstart[id]);
        for (int id = 0; id < pThreadNum; id++)
            sem_wait(&sem_leader);
        for (int id = 0; id < pThreadNum; id++)
            sem_post(&sem_workerend[id]);
    }

    for (int id = 0; id < pThreadNum; id++)
        pthread_join(handle[id], NULL);


    gettimeofday(&t2, NULL);
    timeuse = (t2.tv_sec - t1.tv_sec) + (double)(t2.tv_usec - t1.tv_usec) / 1000000.0;



    /*show(m);
    cout << endl;
    show(pL);
    cout << endl;
    T(pU);
    show(pU);*/
    return timeuse;
}

void final_summary(int argc, char* argv[])
{
    int provided;
    MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided);
    int numProcesses, rank;
    MPI_Comm_size(MPI_COMM_WORLD, &numProcesses);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);



    int type = 1;
    int test = 1;
    if (test)
    {
        n = 1400;
        switch (type)
        {
        case 0:

            break;
        case 1:
            final_entry(numProcesses, rank);
            break;
        case 2:
            break;
        }
    }
    else
        for (int i = 1; i <= 10; i++)
        {
            n = i * 200;
            final_entry(numProcesses, rank);
        }
}



double commonLU(float** m)
{
    float** L = new float* [n];
    float** U = new float* [n];
    for (int i = 0; i < n; i++)
    {
        L[i] = new float[n];
        U[i] = new float[n];
        for (int j = 0; j < n; j++)
        {
            if (i == 0) 
                U[i][j] = m[i][j];
            else
                U[i][j] = 0;

            if (i == j)
                L[i][j] = 1;
            else
                L[i][j] = 0;

            if (j == 0)
                L[i][j] = m[i][j] / U[0][0];
        }
    }
    T(U);
    //L和U初始化完毕



    struct timeval t1, t2;
    double timeuse = 0;


    gettimeofday(&t1, NULL);

    float UtempSum = 0;
    float LtempSum = 0;
    for (int r = 1; r < n; r++)
        for (int i = r; i < n; i++)
        {
            UtempSum = 0;
            LtempSum = 0;
            for (int k = 0; k < r; k++)
            {
                UtempSum += L[r][k] * U[i][k];
                if (i != r)
                    LtempSum += L[i][k] * U[r][k];
            }
            U[i][r] = m[r][i] - UtempSum;
            if (i != r)
                L[i][r] = (m[i][r] - LtempSum) / U[r][r];
        }


    gettimeofday(&t2, NULL);
    timeuse = (t2.tv_sec - t1.tv_sec) + (double)(t2.tv_usec - t1.tv_usec) / 1000000.0;




    show(m);
    cout << endl;
    show(L);
    cout << endl;
    T(U);
    show(U);
    return timeuse;
}
void timeTest(int argc, char* argv[])
{
    int mpi = 1;
    int finalTest = 1;
    if (mpi == 1)
    {
        if (finalTest)
            final_summary(argc, argv);
        else
            mpi_do_on_LU(argc, argv);
    }
    else
        for (int i = 1; i <= 10; i++)
        {
            n = i * 200;
            //gettimeofday(&t1, NULL);


            float** fuck = generate();
            //cout << do_on_init_matrix(fuck) << endl;
            //cout << commonLU(fuck) << endl;
            //cout << sse__do_on_init_matrix(fuck) << endl;
            //cout << sse_do_on_LU(fuck) << endl;
            //cout << pthread_do_on_LU(fuck) << endl;
            //cout << pthread_sse(fuck) << endl;


        }
}
void correctionTest(int argc, char* argv[])
{
    n = 5;
    float** fuck = generate();
    //show(fuck);
    //do_on_init_matrix(fuck);
    //sse__do_on_init_matrix(fuck);


    //pthread_do_on_LU(fuck);
    //mpi_do_on_LU(argc, argv);
    //pthread_sse(fuck);

    //commonLU(fuck);
    final_summary(argc, argv);
    cout << endl;

}
int main(int argc, char* argv[])
{

    pThreadNum = 10;


    sem_workerstart = new sem_t[pThreadNum];
    sem_workerend = new sem_t[pThreadNum];


    sem_init(&sem_leader, 0, 0);
    for (int i = 0; i < pThreadNum; i++)
    {
        sem_init(&sem_workerstart[i], 0, 0);
        sem_init(&sem_workerend[i], 0, 0);
    }

    timeTest(argc, argv);
    //correctionTest(argc, argv);


    sem_destroy(&sem_leader);
    for (int i = 0; i < pThreadNum; i++)
    {
        sem_destroy(&sem_workerstart[i]);
        sem_destroy(&sem_workerend[i]);
    }
}
