#include <mpi.h>
#include <omp.h>
#include <arm_neon.h>
#include <iostream>
#include <vector>
#include <cstdlib>
#include <sys/time.h>

using namespace std;

// 随机初始化矩阵
void initializeMatrix(vector<vector<float>>& matrix, int N) {
    for (int i = 0; i < N; ++i) {
        for (int j = 0; j < N; ++j) {
            matrix[i][j] = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
        }
    }
}

int main(int argc, char* argv[]) {
    MPI_Init(&argc, &argv);

    int numProcesses, currentRank;
    MPI_Comm_size(MPI_COMM_WORLD, &numProcesses);
    MPI_Comm_rank(MPI_COMM_WORLD, &currentRank);

    // 设置矩阵大小
    const int N = 1024;

    // 分配矩阵内存
    vector<vector<float>> matrix(N, vector<float>(N));

    // 初始化矩阵
    if (currentRank == 0) {
        initializeMatrix(matrix, N);
    }

    // 广播矩阵到所有进程
    for (int i = 0; i < N; ++i) {
        MPI_Bcast(matrix[i].data(), N, MPI_FLOAT, 0, MPI_COMM_WORLD);
    }

    // 记录开始时间
    struct timeval t_start, t_end;
    gettimeofday(&t_start, NULL);

    for (int globalRowIndex = 0; globalRowIndex < N; ++globalRowIndex) {
        if (currentRank == (globalRowIndex % numProcesses)) {
            // 执行高斯消元的局部操作
            float32x4_t diagonal = vdupq_n_f32(matrix[globalRowIndex][globalRowIndex]);
            int col = globalRowIndex + 1;
            for (; col <= N - 4; col += 4) {
                float32x4_t row_data = vld1q_f32(&matrix[globalRowIndex][col]);
                row_data = vdivq_f32(row_data, diagonal);
                vst1q_f32(&matrix[globalRowIndex][col], row_data);
            }
            for (; col < N; ++col) {
                matrix[globalRowIndex][col] /= matrix[globalRowIndex][globalRowIndex];
            }

            // 将处理后的行数据发送给所有其他进程
            for (int recipient = 0; recipient < numProcesses; ++recipient) {
                if (recipient != currentRank) {
                    MPI_Send(matrix[globalRowIndex].data(), N, MPI_FLOAT, recipient, globalRowIndex, MPI_COMM_WORLD);
                }
            }
        } else {
            // 如果该行数据应由其他进程处理，则接收数据
            int sendingRank = globalRowIndex % numProcesses;
            MPI_Recv(matrix[globalRowIndex].data(), N, MPI_FLOAT, sendingRank, globalRowIndex, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
        }

        // 对当前进程负责的所有行进行消元操作
        #pragma omp parallel for
        for (int i = globalRowIndex + 1; i < N; ++i) {
            if (i % numProcesses == currentRank) {
                float32x4_t pivot_row_val;
                float32x4_t factor;
                int j = globalRowIndex + 1;
                for (; j <= N - 4; j += 4) {
                    pivot_row_val = vld1q_f32(&matrix[globalRowIndex][j]);
                    factor = vdupq_n_f32(matrix[i][globalRowIndex]);
                    float32x4_t row_data = vld1q_f32(&matrix[i][j]);
                    row_data = vmlsq_f32(row_data, factor, pivot_row_val);
                    vst1q_f32(&matrix[i][j], row_data);
                }
                for (; j < N; ++j) {
                    matrix[i][j] -= matrix[i][globalRowIndex] * matrix[globalRowIndex][j];
                }
                matrix[i][globalRowIndex] = 0;
            }
        }
    }

    // 记录结束时间
    gettimeofday(&t_end, NULL);
    if (currentRank == 0) {
        cout << "Pipeline MPI+OpenMP+NEON time cost: "
            << 1000 * (t_end.tv_sec - t_start.tv_sec) +
            0.001 * (t_end.tv_usec - t_start.tv_usec) << "ms" << endl;
    }

    MPI_Finalize();
    return 0;
}
