#include <mpi.h>
#include <omp.h>
#include <cmath>
#include <time.h>
#include <fstream>
#include <sstream>
#include <iomanip>
#include <iostream>
#include <string.h>
#include <algorithm>
#include <xmmintrin.h> //SSE
#include <emmintrin.h> //SSE2
#include <pmmintrin.h> //SSE3
#include <tmmintrin.h> //SSSE3
#include <smmintrin.h> //SSE4.1
#include <nmmintrin.h> //SSSE4.2
using namespace std;

int N = 5;
const int L = 100;
const int n = 100;
const int LOOP = 1;
int num_proc = 8;
int NUM_THREADS = 8;
float **A;
float **matrix = nullptr;

//矩阵初始化
void init(float ** A)
{
    srand((unsigned) time(NULL));
    for (int i = 0; i < N; i++)
        for (int j = 0; j < N; j++)
            A[i][j] = rand() / 100;
}

//串行算法
void normal() 
{
    init(A);
    double start, end;
    start = MPI_Wtime();
    for (int k = 0; k < n; k++) {
        for (int j = k + 1; j < n; j++)
            A[k][j] = A[k][j] / A[k][k];
        A[k][k] = 1.0;
        for (int i = k + 1; i < n; i++) {
            for (int j = k + 1; j < n; j++)
                A[i][j] = A[i][j] - A[i][k] * A[k][j];
            A[i][k] = 0;
        }
    }
    end = MPI_Wtime();
    if (rank == 0)
        cout << "串行算法" << (end - start) * 1000 << "ms" << endl;
}

//SSE算法
void sse()
{
    int i, j, k;
    for (k = 0; k < n; k++) {
        __m128 temp ;
        __m128 t = _mm_set1_ps(A[k][k]);
        for(j = k + 1; j % 4 != 0 && j < n; j++) {
            A[k][j] = A[k][j] / A[k][k];
        }
        for (; j <= n - 4; j += 4) {
            temp = _mm_load_ps(A[k] + j);
            temp = _mm_div_ps(temp, t);
            _mm_store_ps(A[k] + j, temp);
        }
        for (; j < n; j++) {
            A[k][j] = A[k][j] / A[k][k];
        }
        A[k][k] = 1.0;
        for (i = k + 1; i < n; i++) {
            __m128 t1;
            __m128 t2;
            for (j = k + 1; j % 4 != 0 && j < n; j++)
                A[i][j] = A[i][j] - A[i][k] * A[k][j];
            for (; j <= n - 4; j += 4) {
                t1 = _mm_load_ps(A[i] + j);
                t2 = _mm_load_ps(A[k] + j);
                __m128 t3 = _mm_set1_ps(A[i][k]);
                t3 = _mm_mul_ps(t3, t2);
                t1 = _mm_sub_ps(t1, t3);
                _mm_store_ps(A[i] + j, t1);
            }
            for (; j < n; j++)
                A[i][j] = A[i][j] - A[i][k] * A[k][j];
            A[i][k] = 0;
        }
    }
}

//MPI：循环划分，阻塞通信
void mpi_loop(int rank)
{
    int j;
    double start, end;
    if (rank == 0) { // 主进程0初始化数据并分发任务
        init(A);
        for (int k = 0; k < n; k++)
            if (k % num_proc != 0) // 将第k行发送给对应的进程k%num_proc
                MPI_Send(&A[k][0], n, MPI_FLOAT, k % num_proc, 0, MPI_COMM_WORLD);
    }
    else { // 其他进程接收分配的数据
        for (int k = 0; k < n; k++)
            if (k % num_proc == rank) // 进程k%num_proc接收第k行数据
                MPI_Recv(&A[k][0], n, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
    }
    MPI_Barrier(MPI_COMM_WORLD); // 同步各进程
    start = MPI_Wtime();
    for (int k = 0; k < n; k++) {
        if (k % num_proc == rank) { // 选定由本进程处理的行
            for (j = k + 1; j < n; j++) // 各行消元
                A[k][j] = A[k][j] / A[k][k];
            A[k][k] = 1.0;
            for (j = rank + 1; j < num_proc; j++) // 发送处理完成的各行
                MPI_Send(&A[k][0], n, MPI_FLOAT, j, 2, MPI_COMM_WORLD);
        }
        else {
            if (k % num_proc < rank)  // 接收处理完成的各行
                MPI_Recv(&A[k][0], n, MPI_FLOAT, k % num_proc, 2, MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
        }
        for (int i = 0; i < n; i++) {
            if (i % num_proc == rank && i > k) { // 并行更新矩阵元素
                for (j = k + 1; j < n; j++)
                    A[i][j] = A[i][j] - A[i][k] * A[k][j];
                A[i][k] = 0; // 消元位置元素置0
            }
        }
    }
    end = MPI_Wtime();
    if (rank == num_proc-1)
        cout << "MPI,循环划分,阻塞通信" << (end - start) * 1000 << "ms" << endl;
}

//MPI：块划分，阻塞通信
void mpi_block(int rank)
{
    int j;
    double start, end;
    int range = (n - n % num_proc) / num_proc; // 确定每个进程处理的行块长度
    int s = rank * range;
    int e = (rank + 1) * range;
    if (rank == num_proc - 1) e = n; // 最后一个进程处理剩余的行
    if (rank == 0) { // 主进程初始化矩阵A
        init(A);
        for (j = 1; j < num_proc; j++) {
            int row_start = j * range;
            int row_end = (j + 1) * range;
            if (j == num_proc - 1) row_end = n; // 确保最后一个块包含所有剩余行
            MPI_Send(&A[row_start][0], n * (row_end - row_start), MPI_FLOAT, j, 0, MPI_COMM_WORLD); // 发送数据块到对应进程
        }
    }
    else // 其他进程接收自己的数据块
        MPI_Recv(&A[s][0], n * (e - s), MPI_FLOAT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
    MPI_Barrier(MPI_COMM_WORLD); // 所有进程同步，确保数据分发完毕
    start = MPI_Wtime();
    for (int k = 0; k < n; k++) {
        if (k >= s && k < e) {
            for (j = k + 1; j < n; j++)
                A[k][j] = A[k][j] / A[k][k];
            A[k][k] = 1.0;
            for (j = rank + 1; j < num_proc; j++)
                MPI_Send(&A[k][0], n, MPI_FLOAT, j, 2, MPI_COMM_WORLD);
        }
        else {
            int tmp = k / range;
            if (tmp < rank) {
                MPI_Recv(&A[k][0], n, MPI_FLOAT, tmp, 2, MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
            }
        }
        for (int i = max(s, k + 1); i < e; i++) {
            for (j = k + 1; j < n; j++)
                A[i][j] = A[i][j] - A[i][k] * A[k][j];
            A[i][k] = 0;
        }
    }
    end= MPI_Wtime();
    if (rank == num_proc-1)
        cout << "高斯消元mpi块划分优化"<< (end - start) * 1000 << "ms" << endl;
}

//MPI+SSE：块划分，阻塞通信
void mpi_sse_block(int rank)
{
    int j;
    double start, end;
    // 计算每个进程负责处理的行块长度
    int len = (n - n % num_proc) / num_proc;
    int s = rank * len;
    int e = (rank + 1) * len;
    if (rank == num_proc - 1) {
        e = n; // 最后一个进程负责剩余的所有行
    }

    if (rank == 0) {
        // 主进程初始化矩阵A
        init(A);
        for (j = 1; j < num_proc; j++) {
            int row_start = j * len;
            int row_end = (j + 1) * len;
            if (j == num_proc - 1) {
                row_end = n; // 最后一个块包含剩余的行
            }
            // 主进程发送数据块到对应的进程
            MPI_Send(&A[row_start][0], n * (row_end - row_start), MPI_FLOAT, j, 0, MPI_COMM_WORLD);
        }
    } else {
        // 其他进程接收自己的数据块
        MPI_Recv(&A[s][0], n * (e - s), MPI_FLOAT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
    }

    // 所有进程同步，确保数据分发完毕
    MPI_Barrier(MPI_COMM_WORLD);

    // 开始计时
    start = MPI_Wtime();

    for (int k = 0; k < n; k++) {
        if (k >= s && k < e) {
            // 当前进程处理第k行，使用SSE优化
            __m128 vt = _mm_serow_start_ps(A[k][k]);
            for (j = k + 1; j + 4 <= n; j += 4) {
                __m128 va = _mm_loadu_ps(&A[k][j]);
                va = _mm_div_ps(va, vt);
                _mm_storeu_ps(&A[k][j], va);
            }
            for (; j < n; j++) {
                A[k][j] = A[k][j] / A[k][k];
            }
            A[k][k] = 1.0;

            // 将处理好的第k行数据发送给其他进程
            for (j = rank + 1; j < num_proc; j++) {
                MPI_Send(&A[k][0], n, MPI_FLOAT, j, 2, MPI_COMM_WORLD);
            }
        } else {
            int tmp = k / len;
            if (tmp < rank) {
                // 接收已经处理好的第k行数据
                MPI_Recv(&A[k][0], n, MPI_FLOAT, tmp, 2, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
            }
        }

        // 消去当前进程负责的行中第k列以下的元素，使用SSE优化
        for (int i = max(s, k + 1); i < e; i++) {
            __m128 vaik = _mm_serow_start_ps(A[i][k]);
            for (j = k + 1; j + 4 <= n; j += 4) {
                __m128 vakj = _mm_loadu_ps(&A[k][j]);
                __m128 vaij = _mm_loadu_ps(&A[i][j]);
                __m128 vx = _mm_mul_ps(vakj, vaik);
                vaij = _mm_sub_ps(vaij, vx);
                _mm_storeu_ps(&A[i][j], vaij);
            }
            for (; j < n; j++) {
                A[i][j] = A[i][j] - A[k][j] * A[i][k];
            }
            A[i][k] = 0;
        }
    }

    // 结束计时
    end = MPI_Wtime();

    // 输出计算时间
    if (rank == num_proc - 1) {
        cout << "mpi_sse_block" << (end - start) * 1000 << "ms" << endl;
    }
}

//MPI+SSE：循环划分，阻塞通信
void mpi_sse_loop(int rank)
{
    int j;
    double start, end;
    if (rank == 0) {
        // 主进程初始化矩阵A
        init(A);
        for (int k = 0; k < n; k++) {
            if (k % num_proc != 0) {
                // 主进程发送第k行数据到对应的进程
                MPI_Send(&A[k][0], n, MPI_FLOAT, k % num_proc, 0, MPI_COMM_WORLD);
            }
        }
    } else {
        for (int k = 0; k < n; k++) {
            if (k % num_proc == rank) {
                // 接收属于当前进程的行数据
                MPI_Recv(&A[k][0], n, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
            }
        }
    }
    // 所有进程同步，确保数据分发完毕
    MPI_Barrier(MPI_COMM_WORLD);

    // 开始计时
    start = MPI_Wtime();

    for (int k = 0; k < n; k++) {
        if (k % num_proc == rank) {
            // 当前进程处理第k行，使用SSE优化
            __m128 vt = _mm_serow_start_ps(A[k][k]);
            for (j = k + 1; j + 4 <= n; j += 4) {
                __m128 va = _mm_loadu_ps(&A[k][j]);
                va = _mm_div_ps(va, vt);
                _mm_storeu_ps(&A[k][j], va);
            }
            for (; j < n; j++)
                A[k][j] = A[k][j] / A[k][k];
            A[k][k] = 1.0;
            // 将处理好的第k行数据发送给其他进程
            for (j = rank + 1; j < num_proc; j++)
                MPI_Send(&A[k][0], n, MPI_FLOAT, j, 2, MPI_COMM_WORLD);
        } else {
            int tmp = k % num_proc;
            if (tmp < rank) {
                // 接收已经处理好的第k行数据
                MPI_Recv(&A[k][0], n, MPI_FLOAT, tmp, 2, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
            }
        }

        // 消去当前进程负责的行中第k列以下的元素，使用SSE优化
        for (int i = 0; i < n; i++) {
            if (i % num_proc == rank && i > k) {
                __m128 vaik = _mm_serow_start_ps(A[i][k]);
                for (j = k + 1; j + 4 <= n; j += 4) {
                    __m128 vakj = _mm_loadu_ps(&A[k][j]);
                    __m128 vaij = _mm_loadu_ps(&A[i][j]);
                    __m128 vx = _mm_mul_ps(vakj, vaik);
                    vaij = _mm_sub_ps(vaij, vx);
                    _mm_storeu_ps(&A[i][j], vaij);
                }
                for (; j < n; j++)
                    A[i][j] = A[i][j] - A[k][j] * A[i][k];
                A[i][k] = 0;
            }
        }
    }
    end = MPI_Wtime();
    // 输出计算时间
    if (rank == num_proc - 1) {
        cout << "mpi_sse_loop " << (end - start) * 1000 << "ms" << endl;
    }
}

//MPI+OMP：块划分，阻塞通信
void mpi_omp_block(int rank)
{
    int i = 0, j = 0, k = 0;
    float temp = 0;
    int range = (n - n % num_proc) / num_proc;
    int s = rank * range;
    int e = (rank + 1) * range;
    if (rank == num_proc - 1) e = n;
    if (rank == 0) {
        init(A);
        for (j = 1; j < num_proc; j++) {
            int row_start = j * range;
            int row_end = (j + 1) * range;
            if (j == num_proc - 1)
                row_end = n;
            MPI_Send(&A[row_start][0], n * (row_end - row_start), MPI_FLOAT, j, 0, MPI_COMM_WORLD);
        }
    } else {
        for (k = 0; k < n; k++)
            if (k % num_proc == rank)
                MPI_Recv(&A[k][0], n, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
    }
    MPI_Barrier(MPI_COMM_WORLD);
    double start = MPI_Wtime();
     // 并行区域，使用OpenMP并行化
#pragma omp parallel num_threads(NUM_THREADS), private(i, j, k, temp)
    for (k = 0; k < n; k++) { // 当前进程处理第k行
        #pragma omp single // 只用一个进程处理归一化部分
        if (k >= s && k < e) {
            temp = A[k][k];
            for (j = k + 1; j < n; j++)
                A[k][j] = A[k][j] / temp;
            A[k][k] = 1.0;
            for (j = rank + 1; j < num_proc; j++)
                MPI_Send(&A[k][0], n, MPI_FLOAT, j, 2, MPI_COMM_WORLD);
        } else {
            if (k / range < rank)
                MPI_Recv(&A[k][0], n, MPI_FLOAT, k / range, 2, MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
        }
        #pragma omp for schedule(guided)  // 并行执行消去操作，使用OpenMP并行化
        for (i = max(s, k + 1); i < e; i++) {
            temp = A[i][k];
            for (j = k + 1; j < n; j++)
                A[i][j] = A[i][j] - temp * A[k][j];
            A[i][k] = 0;
        }
    }
    double end = MPI_Wtime();
    if (rank == num_proc-1)
        cout << "mpi_omp_block" << (end - start) * 1000 << "ms" << endl;
}

//MPI+OMP：循环划分，阻塞通信
void mpi_omp_loop(int rank)
{
    int i = 0, j = 0, k = 0;
    float temp = 0;
    if (rank == 0) {
        init(A);
        for (k = 0; k < n; k++) {
            int tmp = k % num_proc;
            if (tmp != 0)
                MPI_Send(&A[k][0], n, MPI_FLOAT, tmp, 0, MPI_COMM_WORLD);
        }
    } else {
        for (k = 0; k < n; k++) {
            if (k % num_proc == rank)
                MPI_Recv(&A[k][0], n, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
        }
    }
    MPI_Barrier(MPI_COMM_WORLD);
    double start = MPI_Wtime();
#pragma omp parallel num_threads(NUM_THREADS), private(i, j, k, temp)
    for (k = 0; k < n; k++) {
        #pragma omp single
        if (k % num_proc == rank) {
            temp = A[k][k];
            for (j = k + 1; j < n; j++)
                A[k][j] = A[k][j] / temp;
            A[k][k] = 1.0;
            for (j = rank + 1; j < num_proc; j++)
                MPI_Send(&A[k][0], n, MPI_FLOAT, j, 2, MPI_COMM_WORLD);
        } else {
            if (k % num_proc < rank)
                MPI_Recv(&A[k][0], n, MPI_FLOAT, k % num_proc, 2, MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
        }
        #pragma omp for schedule(guided)
        for (i = 0; i < n; i++) {
            if (i % num_proc == rank && i > k) {
                temp = A[i][k];
                for (j = k + 1; j < n; j++)
                    A[i][j] = A[i][j] - temp * A[k][j];
                A[i][k] = 0;
            }
        }
    }
    double end = MPI_Wtime();
    if (rank == num_proc-1)
        cout << "mpi_omp_loop" << (end - start) * 1000 << "ms" << endl;
}

//MPI+SSE+OMP：块划分，阻塞通信
void mpi_sse_omp_block(int rank)
{
    int i = 0, j = 0, k = 0;
    float temp = 0;
    int range = (n - n % num_proc) / num_proc;
    int s = rank * range;
    int e = (rank + 1) * range;
    if (rank == num_proc - 1)  e = n;
    if (rank == 0) {
        init(A);
        for (j = 1; j < num_proc; j++) {
            int row_start = j * range;
            int row_end = (j + 1) * range;
            if (j == num_proc - 1)  row_end = n;
            MPI_Send(&A[row_start][0], n * (row_end - row_start), MPI_FLOAT, j, 0, MPI_COMM_WORLD);
        }
    } else {
        MPI_Recv(&A[s][0], n * (e - s), MPI_FLOAT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
    }
    MPI_Barrier(MPI_COMM_WORLD);
    double start = MPI_Wtime();
#pragma omp parallel num_threads(NUM_THREADS), private(i, j, k, temp)
    for (k = 0; k < n; k++) {
        #pragma omp single
        if (k >= s && k < e) {
            temp = A[k][k];
            __m128 vt = _mm_set1_ps(temp);
            for (j = k + 1; j + 4 <= n; j += 4) {
                __m128 va = _mm_loadu_ps(&A[k][j]);
                va = _mm_div_ps(va, vt);
                _mm_storeu_ps(&A[k][j], va);
            }
            for (; j < n; j++)
                A[k][j] = A[k][j] / A[k][k];
            A[k][k] = 1.0;
            for (j = rank + 1; j < num_proc; j++)
                MPI_Send(&A[k][0], n, MPI_FLOAT, j, 2, MPI_COMM_WORLD);
        } else {
            if (k / range < rank)
                MPI_Recv(&A[k][0], n, MPI_FLOAT, k / range, 2, MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
        }
#pragma omp for schedule(guided)
        for (i = max(s, k + 1); i < e; i++) {
            temp = A[i][k];
            __m128 vaik = _mm_set1_ps(temp);
            for (j = k + 1; j + 4 <= n; j += 4) {
                __m128 vakj = _mm_loadu_ps(&A[k][j]);
                __m128 vaij = _mm_loadu_ps(&A[i][j]);
                __m128 vx = _mm_mul_ps(vakj, vaik);
                vaij = _mm_sub_ps(vaij, vx);
                _mm_storeu_ps(&A[i][j], vaij);
            }
            for (; j < n; j++)
                A[i][j] = A[i][j] - A[k][j] * A[i][k];
            A[i][k] = 0;
        }
    }
    double end = MPI_Wtime();
    if (rank == num_proc - 1)
        cout << " mpi_sse_omp_block" << (end - start) * 1000 << "ms" << endl;
}

//MPI+SSE+OMP：循环划分，阻塞通信
void mpi_sse_omp_loop(int rank)
{
    int i = 0, j = 0, k = 0;
    float temp = 0;
    if (rank == 0) {
        init(A);
        for (k = 0; k < n; k++) {
            if (k % num_proc != 0)
                MPI_Send(&A[k][0], n, MPI_FLOAT, k % num_proc, 0, MPI_COMM_WORLD);
        }
    } else {
        for (k = 0; k < n; k++) {
            if (k % num_proc == rank)
                MPI_Recv(&A[k][0], n, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
        }
    }
    MPI_Barrier(MPI_COMM_WORLD);
    double start = MPI_Wtime();
#pragma omp parallel num_threads(NUM_THREADS), private(i, j, k, temp)
    for (k = 0; k < n; k++) {
#pragma omp single
        if (k % num_proc == rank) {
            temp = A[k][k];
            __m128 vt = _mm_set1_ps(temp);
            for (j = k + 1; j + 4 <= n; j += 4) {
                __m128 va = _mm_loadu_ps(&A[k][j]);
                va = _mm_div_ps(va, vt);
                _mm_storeu_ps(&A[k][j], va);
            }
            for (; j < n; j++)
                A[k][j] = A[k][j] / A[k][k];
            A[k][k] = 1.0;
            for (j = rank + 1; j < num_proc; j++)
                MPI_Send(&A[k][0], n, MPI_FLOAT, j, 2, MPI_COMM_WORLD);
        } else {
            if (k % num_proc < rank)
                MPI_Recv(&A[k][0], n, MPI_FLOAT, k % num_proc, 2, MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
        }
#pragma omp for schedule(guided)
        for (i = 0; i < n; i++) {
            if (i % num_proc == rank && i > k) {
                temp = A[i][k];
                __m128 vaik = _mm_set1_ps(temp);
                for (j = k + 1; j + 4 <= n; j += 4) {
                    __m128 vakj = _mm_loadu_ps(&A[k][j]);
                    __m128 vaij = _mm_loadu_ps(&A[i][j]);
                    __m128 vx = _mm_mul_ps(vakj, vaik);
                    vaij = _mm_sub_ps(vaij, vx);
                    _mm_storeu_ps(&A[i][j], vaij);
                }
                for (; j < n; j++)
                    A[i][j] = A[i][j] - A[k][j] * A[i][k];
                A[i][k] = 0;
            }
        }
    }
    double end = MPI_Wtime();
    if (rank == num_proc - 1)
        cout << "mpi_sse_omp_loop" << (end - start) * 1000 << "ms" << endl;
}

//MPI：流水线，阻塞通信
void mpi_pipeline_loop(int rank) 
{
    // 只有0号进程进行初始化工作
    if (rank == 0)
        init(A);

    double start = MPI_Wtime();  // 记录开始时间
    // 计算每个进程负责的任务数
    int range = rank < n % num_proc ? n / num_proc + 1 : n / num_proc;
    // 动态分配缓冲区，用于发送数据
    auto *temp = new float[range * n];
    if (rank == 0) { // 0号进程分发任务给其他进程
        for (int p = 1; p < num_proc; p++) {
            for (int i = p; i < n; i += num_proc) {
                for (int j = 0; j < n; j++)
                    temp[i / num_proc * n + j] = A[i][j];
            }
            int count = p < n % num_proc ? n / num_proc + 1 : n / num_proc;
            MPI_Send(temp, count * n, MPI_FLOAT, p, 0, MPI_COMM_WORLD);
        }
    } else { // 非0号进程接收任务
        MPI_Recv(&A[rank][0], range * n, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
        for (int i = 0; i < range; i++) {
            for (int j = 0; j < n; j++) {
                A[rank + i * num_proc][j] = A[rank + i][j];
            }
        }
    }

    // 定义前一个和下一个处理器ID
    int pre_proc = (rank + (num_proc - 1)) % num_proc;
    int next_proc = (rank + 1) % num_proc;

    // 开始进行消元运算
    for (int k = 0; k < n; k++) {
        if (k % num_proc == rank) { // 负责处理主元行的进程
            for (int j = k + 1; j < n; j++) {
                A[k][j] /= A[k][k];
            }
            A[k][k] = 1;
            MPI_Send(&A[k][0], n, MPI_FLOAT, next_proc, 1, MPI_COMM_WORLD); // 将主元行发送给下一个进程
        } else { // 其余进程接收主元行
            MPI_Recv(&A[k][0], n, MPI_FLOAT, pre_proc, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
            if (next_proc != k % num_proc)
                MPI_Send(&A[k][0], n, MPI_FLOAT, next_proc, 1, MPI_COMM_WORLD);
        }

        // 消元操作
        int begin = n / num_proc * num_proc + rank < n ? n / num_proc * num_proc + rank : n / num_proc * num_proc + rank - num_proc;
        for (int i = begin; i > k; i -= num_proc) {
            for (int j = k + 1; j < n; j++)
                A[i][j] = A[i][j] - A[i][k] * A[k][j];
            A[i][k] = 0;
        }
    }

    double end = MPI_Wtime();  // 记录结束时间
    cout<<"mpi_pipline_loop"<<(end - start) * 1000<<"ms";
}

//MPI：块划分，非阻塞通信
void mpi_block_nio(int rank)
{
    int j;
    int range = (n - n % num_proc) / num_proc; // 确定每个进程处理的行块长度
    int s = rank * range;     // 当前进程处理的起始行
    int e = (rank + 1) * range; // 当前进程处理的结束行
    if (rank == num_proc - 1) e = n; // 确保最后一个进程处理所有剩余行
    if (rank == 0) { // 主进程初始化矩阵A
        init(A);
        for (j = 1; j < num_proc; j++) {
            int row_start = j * range;
            int row_end = (j + 1) * range;
            if (j == num_proc - 1) row_end = n; // 确保最后一个块包含所有剩余行
            MPI_Send(&A[row_start][0], n * (row_end - row_start), MPI_FLOAT, j, 0, MPI_COMM_WORLD); // 发送数据块到对应进程
        }
    } else { // 其他进程接收自己的数据块
        MPI_Recv(&A[s][0], n * (e - s), MPI_FLOAT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
    }
    MPI_Barrier(MPI_COMM_WORLD);

    double start = MPI_Wtime();
    for (int k = 0; k < n; k++) {
        if (k >= s && k < e) {
            for (j = k + 1; j < n; j++)
                A[k][j] = A[k][j] / A[k][k];
            A[k][k] = 1.0;
            MPI_Request request;
            for (j = rank + 1; j < num_proc; j++)
                MPI_Isend(&A[k][0], n, MPI_FLOAT, j, 2, MPI_COMM_WORLD, &request);
        } else {
            if ((k / range) < rank) {
                MPI_Request request;
                MPI_Irecv(&A[k][0], n, MPI_FLOAT, k / range, 2, MPI_COMM_WORLD, &request);
                MPI_Wait(&request, MPI_STATUS_IGNORE); // 等待非阻塞接收完成
            }
        }
        for (int i = max(s, k + 1); i < e; i++) {
            for (j = k + 1; j < n; j++)
                A[i][j] = A[i][j] - A[i][k] * A[k][j];
            A[i][k] = 0;
        }
    }
    double end = MPI_Wtime();
    if (rank == 0)
        cout << "mpi_block_non" << (end - start) * 1000 << " ms" << endl;
}


int main(int argc, char** argv)
{
    MPI_Init(&argc, &argv);
    int rank;
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    normal(rank);
    sse(rank);
    mpi_loop(rank);
    mpi_block(rank);
    mpi_sse_block(rank);
    mpi_sse_loop(rank);
    mpi_omp_block(rank);
    mpi_omp_loop(rank);
    mpi_sse_omp_block(rank);
    mpi_sse_omp_loop(rank);
    mpi_pipeline_loop(rank);
    mpi_block_nio(rank);
    MPI_Finalize();
    return 0;
}