#include <iostream>

#include "src/blas.h"

int main(int argc, char** argv)
{
#if defined(USE_PARALLEL)
    // 初始化 MPI 环境
    MPI_Init(&argc, &argv);
#endif

    std::vector<double> x;
    std::vector<double> y;
    int N = 10000;
    double xmin = 0.0;
    double xmax = 1.0;
    double result = 0.0;
    long start = 0;
    long stop = 0;

#if defined(USE_PARALLEL)
    printf("number of thread = %d, number of process = %d\n", omp_get_num_threads(), omp_get_num_procs());
#pragma omp parallel
    {
#pragma omp single
        printf("number of thread = %d, number of process = %d\n", omp_get_num_threads(), omp_get_num_procs());
    }
    omp_set_num_threads(4);
#endif


    start = clock();

    // vector_creat(x, N, 1.0);
    // vector_creat(y, N, 1.0);

    vector_creat_random(x, N, xmin, xmax);
    vector_creat_random(y, N, xmin, xmax);

    result = dot(x, y);

    stop = clock();

    //std::cout << "Result = " << result <<", cost time = " << (stop - start) << "clocks" << std::endl;
    printf("Result = %15.10f, cost time = %6ld clocks\n", result , stop - start);

#if defined(USE_PARALLEL)
    // omp_get_num_threads()必须在并行区域内才返回使用的线程数，并行区域外始终返回1
    printf("number of thread = %d, number of process = %d\n", omp_get_num_threads(), omp_get_num_procs());
#endif
#if defined(USE_PARALLEL)
#pragma omp parallel
    {
#pragma omp single
        printf("number of thread = %d, number of process = %d\n", omp_get_num_threads(), omp_get_num_procs());
    }

    //int rank, size;
    //MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    //MPI_Comm_size(MPI_COMM_WORLD, &size);
    //printf("Hello from process %d of %d\n", rank, size);

    int rank, size;
    const int array_size = N; // 假设我们有一个大小为 100 的数组
    std::vector<int> full_array(array_size);

    // 获取进程的信息
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);

    if (rank == 0) {
        // 只有根进程初始化数组（可以是任意值）
        for (int i = 0; i < array_size; ++i) {
            full_array[i] = i + 1; // 简单地填充从 1 到 array_size 的值
        }
    }

    // 计算每个进程应处理的数据块大小
    int elements_per_process = array_size / size;
    int remainder = array_size % size;

    // 分配更多元素给前面的几个进程以平衡负载
    int start_index = rank * elements_per_process + std::min(rank, remainder);
    int end_index = start_index + elements_per_process + (rank < remainder ? 1 : 0);

    // 创建一个子数组用于存储当前进程需要处理的数据
    std::vector<int> local_array(end_index - start_index);

    // 广播整个数组给所有进程
    MPI_Bcast(full_array.data(), array_size, MPI_INT, 0, MPI_COMM_WORLD);

    // 每个进程根据其 rank 获取相应的子数组
    for (int i = start_index; i < end_index; ++i) {
        local_array[i - start_index] = full_array[i];
    }

    // 局部求和
    int local_sum = 0;
    for (auto val : local_array) {
        local_sum += val;
    }
    std::cout << "Local sum: " << local_sum << std::endl;

    // 全局求和
    int global_sum = 0;
    MPI_Reduce(&local_sum, &global_sum, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);

    // 根进程输出结果
    if (rank == 0) {
        std::cout << "Global sum: " << global_sum << ", Ref = " << (array_size+1)*(array_size/2) << std::endl;
    }

    // 终止 MPI 环境
    MPI_Finalize();
#endif

    return 0;
}
