/*******************************************************************************
 * MPI并行性能诊断工具
 * 
 * 功能：
 * 1. MPI通信性能测试（点对点、集合通信）
 * 2. 内存带宽测试
 * 3. 计算密集型扩展性测试（矩阵乘法、HPCC GFLOPS）
 * 4. NUMA感知测试
 * 5. 负载均衡测试
 * 6. 缓存性能测试
 * 
 * 编译方法：
 *   mpicxx -O3 -march=native -fopenmp -o mpiTest mpiTest.cpp -lm
 * 
 * 运行方法：
 *   mpirun -np <进程数> ./mpiTest
 * 
 ******************************************************************************/

#include <mpi.h>
#include <iostream>
#include <iomanip>
#include <vector>
#include <cmath>
#include <cstring>
#include <algorithm>
#include <numeric>
#include <map>
#include <sys/time.h>
#include <unistd.h>
#include <fstream>
#include <sstream>
#include <dirent.h>
#include <sched.h>

// 获取高精度时间
double get_time() {
    struct timeval tv;
    gettimeofday(&tv, NULL);
    return tv.tv_sec + tv.tv_usec * 1e-6;
}

// 打印分隔线
void print_section(int rank, const std::string& title) {
    if (rank == 0) {
        std::cout << "\n" << std::string(80, '=') << std::endl;
        std::cout << "  " << title << std::endl;
        std::cout << std::string(80, '=') << std::endl;
    }
}

/*******************************************************************************
 * 测试1：MPI点对点通信性能（Ping-Pong）
 ******************************************************************************/
void test_point_to_point(int rank, int size) {
    print_section(rank, "测试1：点对点通信性能（Ping-Pong）");

    const int WARMUP = 10;
    const int ITERATIONS = 100;
    std::vector<int> message_sizes = {1, 8, 64, 512, 4096, 32768, 262144, 1048576, 8388608, 33554432, 134217728, 536870912};

    // 收集每个进程的 hostname / cpu / numa 信息
    char hostname[256];
    gethostname(hostname, sizeof(hostname));
    int cpu_id = sched_getcpu();
    int numa_id = -1;
    // 解析 numa id（与 test_memory_bandwidth 保持一致）
    if (cpu_id >= 0) {
        const char* node_dir = "/sys/devices/system/node";
        DIR* dp = opendir(node_dir);
        if (dp) {
            struct dirent* entry;
            while ((entry = readdir(dp)) != NULL) {
                std::string name(entry->d_name);
                if (name.rfind("node", 0) == 0) {
                    std::string cpulist_path = std::string(node_dir) + "/" + name + "/cpulist";
                    std::ifstream ifs_cpu(cpulist_path);
                    if (!ifs_cpu) continue;
                    std::string cpulist;
                    std::getline(ifs_cpu, cpulist);
                    std::istringstream ss(cpulist);
                    std::string token;
                    bool found = false;
                    while (std::getline(ss, token, ',')) {
                        size_t dash = token.find('-');
                        if (dash == std::string::npos) {
                            int v = std::stoi(token);
                            if (v == cpu_id) { found = true; break; }
                        } else {
                            int a = std::stoi(token.substr(0, dash));
                            int b = std::stoi(token.substr(dash+1));
                            if (cpu_id >= a && cpu_id <= b) { found = true; break; }
                        }
                    }
                    if (found) {
                        try { numa_id = std::stoi(name.substr(4)); } catch(...) { numa_id = -1; }
                        break;
                    }
                }
            }
            closedir(dp);
        }
    }

    // gather hostname (fixed length), cpu_id and numa_id
    const int HOSTLEN = 256;
    std::vector<char> all_hostbuf(size * HOSTLEN);
    std::vector<int> all_cpus(size);
    std::vector<int> all_numas(size);

    // ensure hostname buffer null-terminated
    char hostbuf[HOSTLEN];
    std::memset(hostbuf, 0, HOSTLEN);
    std::strncpy(hostbuf, hostname, HOSTLEN-1);

    MPI_Gather(hostbuf, HOSTLEN, MPI_CHAR, all_hostbuf.data(), HOSTLEN, MPI_CHAR, 0, MPI_COMM_WORLD);
    MPI_Gather(&cpu_id, 1, MPI_INT, all_cpus.data(), 1, MPI_INT, 0, MPI_COMM_WORLD);
    MPI_Gather(&numa_id, 1, MPI_INT, all_numas.data(), 1, MPI_INT, 0, MPI_COMM_WORLD);

    // rank 0 选择三类 pair：same NUMA, diff NUMA same host, diff host
    int same_numa_pair[2] = {-1, -1};
    int diff_numa_pair[2] = {-1, -1};
    int diff_host_pair[2] = {-1, -1};

    if (rank == 0) {
        // build helper arrays of host strings
        std::vector<std::string> hosts(size);
        for (int i = 0; i < size; i++) {
            hosts[i] = std::string(&all_hostbuf[i * HOSTLEN]);
        }

        // find same numa (same host & same numa, different ranks)
        for (int i = 0; i < size; i++) {
            for (int j = i+1; j < size; j++) {
                if (hosts[i] == hosts[j] && all_numas[i] >= 0 && all_numas[i] == all_numas[j]) {
                    same_numa_pair[0] = i; same_numa_pair[1] = j; goto found_same_numa;
                }
            }
        }
found_same_numa:;

        // find diff numa same host
        for (int i = 0; i < size; i++) {
            for (int j = i+1; j < size; j++) {
                if (hosts[i] == hosts[j] && all_numas[i] >=0 && all_numas[j] >=0 && all_numas[i] != all_numas[j]) {
                    diff_numa_pair[0] = i; diff_numa_pair[1] = j; goto found_diff_numa;
                }
            }
        }
found_diff_numa:;

        // find diff host
        for (int i = 0; i < size; i++) {
            for (int j = i+1; j < size; j++) {
                if (hosts[i] != hosts[j]) {
                    diff_host_pair[0] = i; diff_host_pair[1] = j; goto found_diff_host;
                }
            }
        }
found_diff_host:;

        std::cout << "Ping-Pong 配对 (same NUMA / diff NUMA same host / diff host):\n";
        auto print_pair = [&](int a, int b){ if (a>=0) std::cout << a << "-" << b; else std::cout << "N/A"; };
        std::cout << " same_numa: "; print_pair(same_numa_pair[0], same_numa_pair[1]);
        std::cout << "  diff_numa: "; print_pair(diff_numa_pair[0], diff_numa_pair[1]);
        std::cout << "  diff_host: "; print_pair(diff_host_pair[0], diff_host_pair[1]);
        std::cout << std::endl;
    }

    // broadcast pairs to all ranks
    MPI_Bcast(same_numa_pair, 2, MPI_INT, 0, MPI_COMM_WORLD);
    MPI_Bcast(diff_numa_pair, 2, MPI_INT, 0, MPI_COMM_WORLD);
    MPI_Bcast(diff_host_pair, 2, MPI_INT, 0, MPI_COMM_WORLD);

    // helper lambda to run ping-pong for a given pair and message size
    auto run_pingpong = [&](int peer0, int peer1, int msg_size)->std::pair<double,double> {
        // returns pair<latency_us, bandwidth_MBps> measured by peer0 (only meaningful if peer0 participates)
        std::vector<char> send_buf(msg_size);
        std::vector<char> recv_buf(msg_size);
        for (int i = 0; i < msg_size; i++) send_buf[i] = (char)(i % 256);

        // synchronize
        MPI_Barrier(MPI_COMM_WORLD);

        if (rank == peer0 || rank == peer1) {
            // warmup
            for (int i = 0; i < WARMUP; i++) {
                if (rank == peer0) {
                    MPI_Send(send_buf.data(), msg_size, MPI_CHAR, peer1, 100, MPI_COMM_WORLD);
                    MPI_Recv(recv_buf.data(), msg_size, MPI_CHAR, peer1, 100, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
                } else {
                    MPI_Recv(recv_buf.data(), msg_size, MPI_CHAR, peer0, 100, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
                    MPI_Send(send_buf.data(), msg_size, MPI_CHAR, peer0, 100, MPI_COMM_WORLD);
                }
            }
        }

        MPI_Barrier(MPI_COMM_WORLD);

        double start = MPI_Wtime();
        if (rank == peer0 || rank == peer1) {
            for (int i = 0; i < ITERATIONS; i++) {
                if (rank == peer0) {
                    MPI_Send(send_buf.data(), msg_size, MPI_CHAR, peer1, 101, MPI_COMM_WORLD);
                    MPI_Recv(recv_buf.data(), msg_size, MPI_CHAR, peer1, 101, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
                } else {
                    MPI_Recv(recv_buf.data(), msg_size, MPI_CHAR, peer0, 101, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
                    MPI_Send(send_buf.data(), msg_size, MPI_CHAR, peer0, 101, MPI_COMM_WORLD);
                }
            }
        }
        double end = MPI_Wtime();

        if (rank == peer0) {
            double rtt = (end - start) / ITERATIONS; // round-trip seconds
            double latency_us = rtt * 1e6 / 2.0; // one-way us
            double bandwidth = (msg_size * 2.0) / (rtt * 1e6); // MB/s
            return {latency_us, bandwidth};
        }
        return {-1.0, -1.0};
    };

    // For each category, if pair exists, run tests across message sizes and print on rank 0
    struct Pair { int a,b; const char* name; } pairs[3] = {
        {same_numa_pair[0], same_numa_pair[1], "same NUMA"},
        {diff_numa_pair[0], diff_numa_pair[1], "diff NUMA (same host)"},
        {diff_host_pair[0], diff_host_pair[1], "diff host"}
    };

    for (auto &p : pairs) {
        if (p.a < 0 || p.b < 0) continue; // skip if not found

        if (rank == 0) {
            std::cout << "\n[Ping-Pong - " << p.name << "] pairs: " << p.a << " <-> " << p.b << std::endl;
            std::cout << std::left << std::setw(15) << "消息大小(B)" 
                      << std::setw(15) << "带宽(MB/s)" 
                      << std::setw(15) << "延迟(us)" << std::endl;
            std::cout << std::string(45, '-') << std::endl;
        }

        for (int msg_size : message_sizes) {
            // run pingpong; only peer0 will return meaningful values
            auto res = run_pingpong(p.a, p.b, msg_size);
            // ensure all processes reach here
            MPI_Barrier(MPI_COMM_WORLD);
            if (rank == p.a) {
                double latency = res.first;
                double bandwidth = res.second;
                std::cout << std::left << std::setw(15) << msg_size
                          << std::setw(15) << std::fixed << std::setprecision(2) << bandwidth
                          << std::setw(15) << std::fixed << std::setprecision(2) << latency << std::endl;
            }
            MPI_Barrier(MPI_COMM_WORLD);
        }
    }

    // final barrier
    MPI_Barrier(MPI_COMM_WORLD);
}

/*******************************************************************************
 * 测试2：MPI集合通信性能
 ******************************************************************************/
void test_collective_communication(int rank, int size) {
    print_section(rank, "测试2：集合通信性能");
    
    const int ITERATIONS = 100;
    std::vector<int> message_sizes = {1, 1024, 4096, 16384, 65536, 262144, 1048576, 4194304, 8388608};
    
    if (rank == 0) {
        std::cout << "\n[Broadcast 测试]" << std::endl;
        std::cout << std::left << std::setw(15) << "消息大小(B)" 
                  << std::setw(20) << "时间(ms)" 
                  << std::setw(20) << "带宽(MB/s)" << std::endl;
        std::cout << std::string(55, '-') << std::endl;
    }
    
    // Broadcast测试
    for (int msg_size : message_sizes) {
        std::vector<char> buffer(msg_size);
        
        MPI_Barrier(MPI_COMM_WORLD);
        double start = MPI_Wtime();
        
        for (int i = 0; i < ITERATIONS; i++) {
            MPI_Bcast(buffer.data(), msg_size, MPI_CHAR, 0, MPI_COMM_WORLD);
        }
        
        MPI_Barrier(MPI_COMM_WORLD);
        double end = MPI_Wtime();
        
        if (rank == 0) {
            double avg_time = (end - start) / ITERATIONS * 1000;  // ms
            
            // 避免除以0，检测时间是否太短
            if (avg_time > 0.00001) {  // > 0.01微秒
                double bandwidth = msg_size / ((end - start) / ITERATIONS) / 1e6;  // MB/s
                std::cout << std::left << std::setw(15) << msg_size
                          << std::setw(20) << std::fixed << std::setprecision(6) << avg_time
                          << std::setw(20) << std::fixed << std::setprecision(2) << bandwidth << std::endl;
            } else {
                // 时间太短，只显示"极快"
                std::cout << std::left << std::setw(15) << msg_size
                          << std::setw(20) << "< 0.00001"
                          << std::setw(20) << "N/A (极快)" << std::endl;
            }
        }
    }
    
    // Allreduce测试
    if (rank == 0) {
        std::cout << "\n[Allreduce 测试]" << std::endl;
        std::cout << std::left << std::setw(15) << "元素个数" 
                  << std::setw(20) << "时间(ms)" << std::endl;
        std::cout << std::string(35, '-') << std::endl;
    }
    
    std::vector<int> elem_counts = {1, 256, 1024, 4096, 16384, 65536, 262144, 1048576};
    for (int count : elem_counts) {
        std::vector<double> send_buf(count, 1.0);
        std::vector<double> recv_buf(count);
        
        MPI_Barrier(MPI_COMM_WORLD);
        double start = MPI_Wtime();
        
        for (int i = 0; i < ITERATIONS; i++) {
            MPI_Allreduce(send_buf.data(), recv_buf.data(), count, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
        }
        
        MPI_Barrier(MPI_COMM_WORLD);
        double end = MPI_Wtime();
        
        if (rank == 0) {
            double avg_time = (end - start) / ITERATIONS * 1000;  // ms
            if (avg_time > 0.00001) {
                std::cout << std::left << std::setw(15) << count
                          << std::setw(20) << std::fixed << std::setprecision(6) << avg_time << std::endl;
            } else {
                std::cout << std::left << std::setw(15) << count
                          << std::setw(20) << "< 0.00001" << std::endl;
            }
        }
    }
    
    MPI_Barrier(MPI_COMM_WORLD);
}

/*******************************************************************************
 * 测试3：内存带宽测试
 ******************************************************************************/
void test_memory_bandwidth(int rank, int size) {
    print_section(rank, "测试3：内存带宽测试");
    
    const size_t SIZE = 100 * 1024 * 1024;  // 100MB
    const int ITERATIONS = 10;
    
    std::vector<double> src(SIZE);
    std::vector<double> dst(SIZE);
    
    // 初始化数据
    for (size_t i = 0; i < SIZE; i++) {
        src[i] = i * 1.0;
    }
    
    // 预热
    memcpy(dst.data(), src.data(), SIZE * sizeof(double));

    MPI_Barrier(MPI_COMM_WORLD);
    double start = MPI_Wtime();

    for (int i = 0; i < ITERATIONS; i++) {
        memcpy(dst.data(), src.data(), SIZE * sizeof(double));
    }

    double end = MPI_Wtime();

    double total_time = end - start;                     // 总耗时
    double time_per_iter = total_time / ITERATIONS;      // 每次复制平均耗时 (s)

    // 防止除0
    const double EPS = 1e-12;
    size_t bytes = SIZE * sizeof(double);
    double local_bw = (time_per_iter > EPS) ? (bytes / time_per_iter / 1e9) : 0.0; // GB/s

    // 获取当前进程的 CPU / socket / NUMA 信息
    int cpu_id = -1;
    int socket_id = -1;
    int numa_id = -1;
    cpu_id = sched_getcpu();

    // 读取 socket id（physical_package_id）
    if (cpu_id >= 0) {
        std::ostringstream path_sock;
        path_sock << "/sys/devices/system/cpu/cpu" << cpu_id << "/topology/physical_package_id";
        std::ifstream ifs_sock(path_sock.str());
        if (ifs_sock) {
            ifs_sock >> socket_id;
        }

        // 通过遍历 /sys/devices/system/node 找到包含该 cpu 的 NUMA node
        const char* node_dir = "/sys/devices/system/node";
        DIR* dp = opendir(node_dir);
        if (dp) {
            struct dirent* entry;
            while ((entry = readdir(dp)) != NULL) {
                std::string name(entry->d_name);
                if (name.rfind("node", 0) == 0) { // starts with "node"
                    // nodeX
                    std::string cpulist_path = std::string(node_dir) + "/" + name + "/cpulist";
                    std::ifstream ifs_cpu(cpulist_path);
                    if (!ifs_cpu) continue;
                    std::string cpulist;
                    std::getline(ifs_cpu, cpulist);
                    // parse cpulist like "0-3,8,10-11"
                    std::istringstream ss(cpulist);
                    std::string token;
                    bool found = false;
                    while (std::getline(ss, token, ',')) {
                        size_t dash = token.find('-');
                        if (dash == std::string::npos) {
                            int v = std::stoi(token);
                            if (v == cpu_id) { found = true; break; }
                        } else {
                            int a = std::stoi(token.substr(0, dash));
                            int b = std::stoi(token.substr(dash+1));
                            if (cpu_id >= a && cpu_id <= b) { found = true; break; }
                        }
                    }
                    if (found) {
                        // parse node id from name
                        try {
                            numa_id = std::stoi(name.substr(4));
                        } catch (...) { numa_id = -1; }
                        break;
                    }
                }
            }
            closedir(dp);
        }
    }

    // 收集所有进程的每次复制时间（s）和 CPU/NUMA 信息
    std::vector<double> all_times(size);
    std::vector<int> all_cpus(size);
    std::vector<int> all_sockets(size);
    std::vector<int> all_numas(size);

    MPI_Gather(&time_per_iter, 1, MPI_DOUBLE, all_times.data(), 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
    MPI_Gather(&cpu_id, 1, MPI_INT, all_cpus.data(), 1, MPI_INT, 0, MPI_COMM_WORLD);
    MPI_Gather(&socket_id, 1, MPI_INT, all_sockets.data(), 1, MPI_INT, 0, MPI_COMM_WORLD);
    MPI_Gather(&numa_id, 1, MPI_INT, all_numas.data(), 1, MPI_INT, 0, MPI_COMM_WORLD);

    if (rank == 0) {
        // 由每个进程的 time_per_iter 计算带宽
        std::vector<double> all_bws(size);
        for (int i = 0; i < size; i++) {
            if (all_times[i] > EPS) all_bws[i] = (double)bytes / all_times[i] / 1e9;
            else all_bws[i] = 0.0;
        }

        double avg_bw = std::accumulate(all_bws.begin(), all_bws.end(), 0.0) / size;
        double min_bw = *std::min_element(all_bws.begin(), all_bws.end());
        double max_bw = *std::max_element(all_bws.begin(), all_bws.end());

        std::cout << "平均内存带宽: " << std::fixed << std::setprecision(2) << avg_bw << " GB/s" << std::endl;
        std::cout << "最小内存带宽: " << std::fixed << std::setprecision(2) << min_bw << " GB/s" << std::endl;
        std::cout << "最大内存带宽: " << std::fixed << std::setprecision(2) << max_bw << " GB/s" << std::endl;
        if (avg_bw > EPS) {
            std::cout << "带宽差异率: " << std::fixed << std::setprecision(2)
                      << (max_bw - min_bw) / avg_bw * 100 << "%" << std::endl;
        }

        // 打印每个进程的详细信息（包含 CPU / socket / NUMA）
        std::cout << "\n各进程详细时间与带宽:" << std::endl;
        std::cout << std::left << std::setw(10) << "进程ID"
                  << std::setw(8) << "CPU"
                  << std::setw(10) << "Socket"
                  << std::setw(8) << "NUMA"
                  << std::setw(16) << "时间/iter(s)"
                  << std::setw(12) << "时间(ms)"
                  << std::setw(14) << "带宽(GB/s)"
                  << std::setw(12) << "相对平均(%)" << std::endl;
        std::cout << std::string(105, '-') << std::endl;

        for (int i = 0; i < size; i++) {
            double t = all_times[i];
            double bw = all_bws[i];
            int cpu = all_cpus[i];
            int sock = all_sockets[i];
            int node = all_numas[i];
            double relative = (avg_bw > EPS) ? (bw / avg_bw - 1.0) * 100.0 : 0.0;
            std::string marker = "";
            if (bw == min_bw) marker = " [最低]";
            if (bw == max_bw) marker = " [最高]";

            std::cout << std::left << std::setw(10) << i
                      << std::setw(8) << cpu
                      << std::setw(10) << (sock >= 0 ? std::to_string(sock) : std::string("N/A"))
                      << std::setw(8) << (node >= 0 ? std::to_string(node) : std::string("N/A"))
                      << std::setw(16) << std::fixed << std::setprecision(6) << t
                      << std::setw(12) << std::fixed << std::setprecision(2) << t * 1000.0
                      << std::setw(14) << std::fixed << std::setprecision(2) << bw
                      << std::setw(12) << std::fixed << std::setprecision(2) << std::showpos << relative << std::noshowpos
                      << marker << std::endl;
        }
        std::cout << std::string(105, '-') << std::endl;
    }

    MPI_Barrier(MPI_COMM_WORLD);
}

/*******************************************************************************
 * 测试4：计算密集型扩展性测试（矩阵乘法）
 ******************************************************************************/
void test_compute_scalability(int rank, int size) {
    print_section(rank, "测试4：计算密集型扩展性测试（矩阵乘法）");
    
    // 根据进程数调整矩阵大小，避免单进程时太慢
    std::vector<int> matrix_sizes;
    if (size == 1) {
        matrix_sizes = {512, 1024};  // 单进程：小规模，避免太慢
    } else if (size <= 4) {
        matrix_sizes = {512, 1024, 1536};  // 少量进程：中等规模
    } else {
        matrix_sizes = {512, 1024, 2048};  // 多进程：完整规模
    }
    
    if (rank == 0) {
        std::cout << std::left << std::setw(15) << "矩阵大小" 
                  << std::setw(20) << "计算时间(s)" 
                  << std::setw(20) << "GFLOPS" << std::endl;
        std::cout << std::string(55, '-') << std::endl;
        if (size == 1) {
            std::cout << "注意：单进程跳过大矩阵测试（避免等待过长）" << std::endl;
        }
    }
    
    for (int N : matrix_sizes) {
        if (rank == 0) {
            std::cout << "计算 " << N << "x" << N << " 矩阵..." << std::flush;
        }
        // 每个进程计算一部分
        int rows_per_proc = N / size;
        if (rank < N % size) rows_per_proc++;
        
        std::vector<double> A(rows_per_proc * N);
        std::vector<double> B(N * N);
        std::vector<double> C(rows_per_proc * N, 0.0);
        
        // 初始化数据
        for (int i = 0; i < rows_per_proc * N; i++) {
            A[i] = (double)(rand() % 100) / 100.0;
        }
        for (int i = 0; i < N * N; i++) {
            B[i] = (double)(rand() % 100) / 100.0;
        }
        
        MPI_Barrier(MPI_COMM_WORLD);
        double start = MPI_Wtime();
        
        // 矩阵乘法 C = A * B
        for (int i = 0; i < rows_per_proc; i++) {
            for (int j = 0; j < N; j++) {
                double sum = 0.0;
                for (int k = 0; k < N; k++) {
                    sum += A[i * N + k] * B[k * N + j];
                }
                C[i * N + j] = sum;
            }
        }
        
        MPI_Barrier(MPI_COMM_WORLD);
        double end = MPI_Wtime();
        
        double max_time;
        double local_time = end - start;
        MPI_Reduce(&local_time, &max_time, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
        
        if (rank == 0) {
            // FLOPS = 2 * N^3 (矩阵乘法)
            double flops = 2.0 * N * N * N;
            double gflops = flops / max_time / 1e9;
            
            // 清除"计算中..."提示，输出结果
            std::cout << "\r" << std::left << std::setw(15) << N
                      << std::setw(20) << std::fixed << std::setprecision(4) << max_time
                      << std::setw(20) << std::fixed << std::setprecision(2) << gflops << std::endl;
        }
    }
    
    MPI_Barrier(MPI_COMM_WORLD);
}

/*******************************************************************************
 * 测试5：负载均衡测试
 ******************************************************************************/
void test_load_balance(int rank, int size) {
    print_section(rank, "测试5：负载均衡测试");
    
    const long long WORKLOAD = 2000000;  // 2百万次循环
    
    // 模拟计算工作负载
    volatile double result = 0.0;  // 使用volatile防止编译器优化掉
    
    MPI_Barrier(MPI_COMM_WORLD);
    double start = MPI_Wtime();
    
    for (long long i = 0; i < WORKLOAD; i++) {
        result += sin((double)i) * cos((double)i);
    }
    
    double end = MPI_Wtime();
    double local_time = end - start;
    
    // 使用result防止编译器优化（即使不打印，也要"使用"它）
    if (result < -1e100) {  // 永远不会为真，但编译器不知道
        std::cout << "Impossible: " << result << std::endl;
    }
    
    // 收集所有进程的时间
    std::vector<double> all_times(size);
    MPI_Gather(&local_time, 1, MPI_DOUBLE, all_times.data(), 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
    
    // 同时收集每个进程的 hostname / cpu / numa 信息
    const int HOSTLEN = 256;
    char myhost[HOSTLEN];
    std::memset(myhost, 0, HOSTLEN);
    gethostname(myhost, HOSTLEN-1);

    int my_cpu = sched_getcpu();
    int my_numa = -1;
    if (my_cpu >= 0) {
        const char* node_dir = "/sys/devices/system/node";
        DIR* dp = opendir(node_dir);
        if (dp) {
            struct dirent* entry;
            while ((entry = readdir(dp)) != NULL) {
                std::string name(entry->d_name);
                if (name.rfind("node", 0) == 0) {
                    std::string cpulist_path = std::string(node_dir) + "/" + name + "/cpulist";
                    std::ifstream ifs_cpu(cpulist_path);
                    if (!ifs_cpu) continue;
                    std::string cpulist;
                    std::getline(ifs_cpu, cpulist);
                    std::istringstream ss(cpulist);
                    std::string token;
                    bool found = false;
                    while (std::getline(ss, token, ',')) {
                        size_t dash = token.find('-');
                        if (dash == std::string::npos) {
                            int v = std::stoi(token);
                            if (v == my_cpu) { found = true; break; }
                        } else {
                            int a = std::stoi(token.substr(0, dash));
                            int b = std::stoi(token.substr(dash+1));
                            if (my_cpu >= a && my_cpu <= b) { found = true; break; }
                        }
                    }
                    if (found) {
                        try { my_numa = std::stoi(name.substr(4)); } catch(...) { my_numa = -1; }
                        break;
                    }
                }
            }
            closedir(dp);
        }
    }

    std::vector<char> all_hostbuf(size * HOSTLEN);
    std::vector<int> all_cpus(size);
    std::vector<int> all_numas(size);

    MPI_Gather(myhost, HOSTLEN, MPI_CHAR, all_hostbuf.data(), HOSTLEN, MPI_CHAR, 0, MPI_COMM_WORLD);
    MPI_Gather(&my_cpu, 1, MPI_INT, all_cpus.data(), 1, MPI_INT, 0, MPI_COMM_WORLD);
    MPI_Gather(&my_numa, 1, MPI_INT, all_numas.data(), 1, MPI_INT, 0, MPI_COMM_WORLD);
    
    if (rank == 0) {
        double avg_time = std::accumulate(all_times.begin(), all_times.end(), 0.0) / size;
        double min_time = *std::min_element(all_times.begin(), all_times.end());
        double max_time = *std::max_element(all_times.begin(), all_times.end());
        
        // 提高输出精度，显示毫秒
        std::cout << "平均计算时间: " << std::fixed << std::setprecision(6) << avg_time << " s (" 
                  << std::fixed << std::setprecision(2) << avg_time * 1000 << " ms)" << std::endl;
        std::cout << "最小计算时间: " << std::fixed << std::setprecision(6) << min_time << " s (" 
                  << std::fixed << std::setprecision(2) << min_time * 1000 << " ms)" << std::endl;
        std::cout << "最大计算时间: " << std::fixed << std::setprecision(6) << max_time << " s (" 
                  << std::fixed << std::setprecision(2) << max_time * 1000 << " ms)" << std::endl;
        
        // 详细列出每个进程的计算时间 & 拓扑信息
        std::cout << "\n各进程详细时间与拓扑信息:" << std::endl;
        std::cout << std::left << std::setw(8) << "Rank"
                  << std::setw(20) << "Hostname"
                  << std::setw(8) << "CPU"
                  << std::setw(8) << "NUMA"
                  << std::setw(18) << "计算时间(s)"
                  << std::setw(12) << "时间(ms)"
                  << std::setw(12) << "相对平均(%)" << std::endl;
        std::cout << std::string(86, '-') << std::endl;

        for (int i = 0; i < size; i++) {
            double relative = (all_times[i] / avg_time - 1.0) * 100;  // 相对平均值的偏差百分比
            std::string marker = "";
            if (all_times[i] == min_time) marker = " [最快]";
            if (all_times[i] == max_time) marker = " [最慢]";

            std::string hoststr = std::string(&all_hostbuf[i * HOSTLEN]);
            // trim possible trailing spaces / nulls
            size_t pos = hoststr.find('\0'); if (pos != std::string::npos) hoststr.resize(pos);

            std::cout << std::left << std::setw(8) << i
                      << std::setw(20) << hoststr
                      << std::setw(8) << all_cpus[i]
                      << std::setw(8) << (all_numas[i] >= 0 ? std::to_string(all_numas[i]) : std::string("N/A"))
                      << std::setw(18) << std::fixed << std::setprecision(6) << all_times[i]
                      << std::setw(12) << std::fixed << std::setprecision(2) << all_times[i] * 1000
                      << std::setw(12) << std::fixed << std::setprecision(2) << std::showpos << relative << std::noshowpos
                      << marker << std::endl;
        }
        std::cout << std::string(86, '-') << std::endl;
        
        // 只有当平均时间 > 0.001s 时才计算不均衡度，避免噪声
        if (avg_time > 0.001) {
            std::cout << "\n负载不均衡度: " << std::fixed << std::setprecision(2) 
                      << (max_time - min_time) / avg_time * 100 << "%" << std::endl;
            
            // 计算并行效率
            double efficiency = min_time / max_time * 100;
            std::cout << "并行效率: " << std::fixed << std::setprecision(2) << efficiency << "%" << std::endl;
        } else {
            std::cout << "\n警告：计算时间过短（< 1ms），测量结果可能不准确" << std::endl;
        }
    }
    
    MPI_Barrier(MPI_COMM_WORLD);
}

/*******************************************************************************
 * 测试6：NUMA感知测试
 ******************************************************************************/
void test_numa_awareness(int rank, int size) {
    print_section(rank, "测试6：NUMA感知测试");
    
    // 获取进程绑定的CPU
    char hostname[256];
    gethostname(hostname, 256);
    
    int cpu_id = sched_getcpu();
    
    // 收集所有进程的CPU ID
    std::vector<int> all_cpus(size);
    MPI_Gather(&cpu_id, 1, MPI_INT, all_cpus.data(), 1, MPI_INT, 0, MPI_COMM_WORLD);
    
    if (rank == 0) {
        std::cout << "进程-CPU绑定信息（前10个进程）:" << std::endl;
        std::cout << std::left << std::setw(10) << "Rank" << std::setw(10) << "CPU ID" << std::endl;
        std::cout << std::string(20, '-') << std::endl;
        
        for (int i = 0; i < std::min(10, size); i++) {
            std::cout << std::left << std::setw(10) << i << std::setw(10) << all_cpus[i] << std::endl;
        }
        
        // 检查是否有良好的CPU亲和性
        std::vector<bool> used_cpus(1024, false);
        int conflicts = 0;
        for (int cpu : all_cpus) {
            if (cpu >= 0 && cpu < 1024) {
                if (used_cpus[cpu]) {
                    conflicts++;
                }
                used_cpus[cpu] = true;
            }
        }
        
        std::cout << "\n绑定冲突数: " << conflicts << std::endl;
        if (conflicts > 0) {
            std::cout << "警告：检测到多个进程绑定到同一CPU，可能影响性能！" << std::endl;
        }
    }
    
    MPI_Barrier(MPI_COMM_WORLD);
}

/*******************************************************************************
 * 测试7：强扩展性测试
 ******************************************************************************/
void test_strong_scaling(int rank, int size) {
    print_section(rank, "测试7：强扩展性测试（固定问题规模）");
    
    // 固定总问题规模
    const long long TOTAL_SIZE = 100000000;  // 1亿次计算
    long long local_size = TOTAL_SIZE / size;
    
    // 每个进程计算一部分
    volatile double result = 0.0;  // 使用volatile防止编译器优化
    
    MPI_Barrier(MPI_COMM_WORLD);
    double start = MPI_Wtime();
    
    for (long long i = 0; i < local_size; i++) {
        result += sin((double)i) * cos((double)i) + sqrt((double)i + 1.0);
    }
    
    MPI_Barrier(MPI_COMM_WORLD);
    double end = MPI_Wtime();
    
    double compute_time = end - start;
    
    // 全局规约（需要复制volatile变量）
    double local_result = result;  // 复制到非volatile变量
    double global_result;
    MPI_Reduce(&local_result, &global_result, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
    
    // 计算理论加速比（假设单核基准时间，这里用size=1的情况作为基准）
    if (rank == 0) {
        std::cout << "进程数: " << size << std::endl;
        std::cout << "总计算规模: " << TOTAL_SIZE << std::endl;
        std::cout << "每进程计算量: " << local_size << std::endl;
        std::cout << "计算时间: " << std::fixed << std::setprecision(4) << compute_time << " s" << std::endl;
        std::cout << "\n注意：运行多次测试（不同进程数）来评估扩展性" << std::endl;
        std::cout << "加速比 = T(1) / T(n)，其中T(1)是单进程时间，T(n)是n进程时间" << std::endl;
    }
    
    MPI_Barrier(MPI_COMM_WORLD);
}

/*******************************************************************************
 * 测试8：集合通信详细诊断
 * 
 * 目的：深入分析为什么鲲鹏平台的集合通信性能较差
 * 
 * 诊断内容：
 * 1. Barrier同步开销 - 检查多NUMA节点同步的代价
 * 2. 不同规模子通信域性能 - 确认性能是否随进程数恶化
 * 3. NUMA分布统计 - 检查进程在NUMA节点上的分布情况
 ******************************************************************************/
void test_collective_diagnosis(int rank, int size) {
    print_section(rank, "测试8：集合通信详细诊断");
    
    if (rank == 0) {
        std::cout << "\n本测试用于诊断集合通信性能问题，特别适用于大规模多NUMA系统" << std::endl;
        std::cout << "鲲鹏系统特点：16个NUMA节点 vs Intel 8个NUMA节点" << std::endl;
        std::cout << "预期：NUMA节点越多，集合通信同步开销越大\n" << std::endl;
    }
    
    const int ITERATIONS = 100;
    const int msg_size = 1024;  // 固定1KB消息
    std::vector<char> buffer(msg_size);
    
    // =========================================================================
    // 诊断1: Barrier同步开销分析
    // 原因：集合通信需要多次Barrier，Barrier在多NUMA系统上开销很大
    // =========================================================================
    MPI_Barrier(MPI_COMM_WORLD);
    double start = MPI_Wtime();
    for (int i = 0; i < ITERATIONS; i++) {
        MPI_Barrier(MPI_COMM_WORLD);
    }
    double end = MPI_Wtime();
    double barrier_time = (end - start) / ITERATIONS * 1000;  // ms
    
    std::vector<double> all_barrier_times(size);
    MPI_Gather(&barrier_time, 1, MPI_DOUBLE, all_barrier_times.data(), 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
    
    if (rank == 0) {
        std::cout << "[诊断1: Barrier同步开销分析]" << std::endl;
        std::cout << "说明：Barrier是所有集合通信的基础，开销大说明跨NUMA同步慢" << std::endl;
        double avg = std::accumulate(all_barrier_times.begin(), all_barrier_times.end(), 0.0) / size;
        double min_t = *std::min_element(all_barrier_times.begin(), all_barrier_times.end());
        double max_t = *std::max_element(all_barrier_times.begin(), all_barrier_times.end());
        
        std::cout << "平均Barrier时间: " << std::fixed << std::setprecision(6) << avg << " ms" << std::endl;
        std::cout << "最小Barrier时间: " << std::fixed << std::setprecision(6) << min_t << " ms" << std::endl;
        std::cout << "最大Barrier时间: " << std::fixed << std::setprecision(6) << max_t << " ms" << std::endl;
        std::cout << "时间差异: " << std::fixed << std::setprecision(2) << (max_t - min_t) / avg * 100 << "%";
        
        // 判断标准
        if (avg > 0.05) {
            std::cout << " [警告：Barrier开销过大！]" << std::endl;
        } else if (avg > 0.01) {
            std::cout << " [注意：Barrier开销较高]" << std::endl;
        } else {
            std::cout << " [正常]" << std::endl;
        }
        std::cout << std::endl;
    }
    
    // =========================================================================
    // 诊断2: 不同进程数下的Broadcast性能扩展性
    // 目的：确认性能是否随进程数增加而恶化（应该是对数增长，不应线性）
    // =========================================================================
    if (rank == 0) {
        std::cout << "\n[诊断2: Broadcast扩展性分析（固定1KB消息）]" << std::endl;
        std::cout << "说明：理想情况下时间应该是O(log N)增长，如果是线性增长说明算法有问题" << std::endl;
        std::cout << std::left << std::setw(15) << "进程数" 
                  << std::setw(20) << "时间(ms)" 
                  << std::setw(20) << "相对基准"
                  << std::setw(25) << "理论O(log N)倍数" << std::endl;
        std::cout << std::string(80, '-') << std::endl;
    }
    
    std::vector<int> test_sizes = {2, 4, 8, 16, 32, 64, 128, 256, 512};
    if (size >= 608) test_sizes.push_back(608);
    
    double baseline = 0.0;
    for (int test_size : test_sizes) {
        if (test_size > size) continue;
        
        // 创建子通信域（只包含前test_size个进程）
        MPI_Comm sub_comm;
        int color = (rank < test_size) ? 0 : MPI_UNDEFINED;
        MPI_Comm_split(MPI_COMM_WORLD, color, rank, &sub_comm);
        
        double local_time = 0.0;
        if (rank < test_size) {
            MPI_Barrier(sub_comm);
            double start = MPI_Wtime();
            for (int i = 0; i < ITERATIONS; i++) {
                MPI_Bcast(buffer.data(), msg_size, MPI_CHAR, 0, sub_comm);
            }
            MPI_Barrier(sub_comm);
            double end = MPI_Wtime();
            local_time = (end - start) / ITERATIONS * 1000;  // ms
            MPI_Comm_free(&sub_comm);
        }
        
        if (rank == 0) {
            if (test_size == 2) baseline = local_time;
            double relative = (baseline > 0) ? local_time / baseline : 1.0;
            double theoretical = (baseline > 0 && test_size > 2) ? log2(test_size) / log2(2) : 1.0;
            
            std::cout << std::left << std::setw(15) << test_size
                      << std::setw(20) << std::fixed << std::setprecision(6) << local_time
                      << std::setw(20) << std::fixed << std::setprecision(2) << relative << "x"
                      << std::setw(25) << std::fixed << std::setprecision(2) << theoretical << "x";
            
            // 判断是否符合理论预期
            if (test_size > 2 && relative > theoretical * 2) {
                std::cout << " [异常：超过理论2倍]";
            }
            std::cout << std::endl;
        }
        MPI_Barrier(MPI_COMM_WORLD);
    }
    
    if (rank == 0) {
        std::cout << "\n解读：如果'相对基准'远大于'理论O(log N)倍数'，说明算法效率低下" << std::endl;
        std::cout << "可能原因：跨NUMA通信、缓存一致性协议开销、MPI实现算法选择不当\n" << std::endl;
    }
    
    // =========================================================================
    // 诊断3: NUMA分布统计与跨NUMA通信分析
    // 目的：检查进程在NUMA节点上的分布，诊断是否存在NUMA不均衡
    // =========================================================================
    int cpu_id = sched_getcpu();
    int numa_id = -1;
    
    // 获取NUMA信息
    if (cpu_id >= 0) {
        const char* node_dir = "/sys/devices/system/node";
        DIR* dp = opendir(node_dir);
        if (dp) {
            struct dirent* entry;
            while ((entry = readdir(dp)) != NULL) {
                std::string name(entry->d_name);
                if (name.rfind("node", 0) == 0) {
                    std::string cpulist_path = std::string(node_dir) + "/" + name + "/cpulist";
                    std::ifstream ifs_cpu(cpulist_path);
                    if (!ifs_cpu) continue;
                    std::string cpulist;
                    std::getline(ifs_cpu, cpulist);
                    std::istringstream ss(cpulist);
                    std::string token;
                    bool found = false;
                    while (std::getline(ss, token, ',')) {
                        size_t dash = token.find('-');
                        if (dash == std::string::npos) {
                            int v = std::stoi(token);
                            if (v == cpu_id) { found = true; break; }
                        } else {
                            int a = std::stoi(token.substr(0, dash));
                            int b = std::stoi(token.substr(dash+1));
                            if (cpu_id >= a && cpu_id <= b) { found = true; break; }
                        }
                    }
                    if (found) {
                        try { numa_id = std::stoi(name.substr(4)); } catch(...) { numa_id = -1; }
                        break;
                    }
                }
            }
            closedir(dp);
        }
    }
    
    std::vector<int> all_numas(size);
    MPI_Gather(&numa_id, 1, MPI_INT, all_numas.data(), 1, MPI_INT, 0, MPI_COMM_WORLD);
    
    if (rank == 0) {
        std::cout << "[诊断3: NUMA分布统计]" << std::endl;
        std::cout << "说明：进程分布在越多NUMA节点，集合通信的跨节点开销越大" << std::endl;
        
        std::map<int, int> numa_count;
        for (int n : all_numas) {
            if (n >= 0) numa_count[n]++;
        }
        
        int total_numa_nodes = numa_count.size();
        std::cout << "使用的NUMA节点数: " << total_numa_nodes << std::endl;
        std::cout << "总进程数: " << size << std::endl;
        std::cout << "平均每NUMA节点进程数: " << std::fixed << std::setprecision(1) 
                  << (double)size / total_numa_nodes << std::endl;
        
        std::cout << "\n各NUMA节点进程分布（前16个）:" << std::endl;
        int shown = 0;
        int min_procs = size, max_procs = 0;
        for (auto& p : numa_count) {
            if (shown++ < 16) {
                std::cout << "  NUMA " << std::setw(2) << p.first << ": " 
                          << std::setw(3) << p.second << " 个进程" << std::endl;
            }
            min_procs = std::min(min_procs, p.second);
            max_procs = std::max(max_procs, p.second);
        }
        if (shown > 16) {
            std::cout << "  ... (共" << total_numa_nodes << "个NUMA节点)" << std::endl;
        }
        
        std::cout << "\nNUMA负载均衡: 最少" << min_procs << "个进程, 最多" << max_procs << "个进程";
        if (max_procs - min_procs > 2) {
            std::cout << " [不均衡]" << std::endl;
        } else {
            std::cout << " [均衡]" << std::endl;
        }
    }
    
    MPI_Barrier(MPI_COMM_WORLD);
}

/*******************************************************************************
 * 主函数
 ******************************************************************************/
int main(int argc, char** argv) {
    MPI_Init(&argc, &argv);
    
    int rank, size;
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    
    char processor_name[MPI_MAX_PROCESSOR_NAME];
    int name_len;
    MPI_Get_processor_name(processor_name, &name_len);
    
    // 打印基本信息
    if (rank == 0) {
        std::cout << "\n" << std::string(80, '=') << std::endl;
        std::cout << "  MPI并行性能诊断工具" << std::endl;
        std::cout << std::string(80, '=') << std::endl;
        std::cout << "总进程数: " << size << std::endl;
        std::cout << "主机名: " << processor_name << std::endl;
        std::cout << "开始时间: " << __DATE__ << " " << __TIME__ << std::endl;
        std::cout << std::string(80, '=') << std::endl;
    }
    
    MPI_Barrier(MPI_COMM_WORLD);
    
    // 运行所有测试
    if (size >= 2) {
        test_point_to_point(rank, size);
    } else {
        if (rank == 0) {
            std::cout << "\n跳过点对点通信测试（至少需要2个进程）" << std::endl;
        }
    }
    
    test_collective_communication(rank, size);
    test_memory_bandwidth(rank, size);
    test_compute_scalability(rank, size);
    test_load_balance(rank, size);
    test_numa_awareness(rank, size);
    test_strong_scaling(rank, size);
    test_collective_diagnosis(rank, size);
    
    // 总结
    if (rank == 0) {
        std::cout << "\n" << std::string(80, '=') << std::endl;
        std::cout << "  测试完成！" << std::endl;
        std::cout << std::string(80, '=') << std::endl;
        std::cout << "\n诊断建议：" << std::endl;
        std::cout << "1. 如果点对点通信延迟 > 2us，检查网络配置和绑定" << std::endl;
        std::cout << "2. 如果内存带宽差异 > 20%，检查NUMA配置" << std::endl;
        std::cout << "3. 如果负载不均衡度 > 10%，可能是算法或调度问题" << std::endl;
        std::cout << "4. 如果计算扩展性差，运行强扩展性测试对比不同核数" << std::endl;
        std::cout << "5. 检查进程是否正确绑定到不同的CPU核心" << std::endl;
        std::cout << "\n建议多次运行测试（如: 9, 18, 36, 72, 144, 288, 576核）" << std::endl;
        std::cout << "来评估扩展性曲线，判断性能瓶颈来源" << std::endl;
        std::cout << std::string(80, '=') << std::endl;
    }
    
    MPI_Finalize();
    return 0;
}

