#include "common.hpp"
#include <mpi.h>
#include <sched.h>
#include <numa.h>
#include <numaif.h>
#include <unistd.h>

std::ofstream result;

int Task(int rank,Direction direction, size_t device_id, size_t data_size, int n_warmup, size_t n_loop) {

    printf("rank: %d, device_id: %d\n", rank, device_id);

    //aclrtContext context;
    //ACLCHECK(aclrtCreateContext(&context, rank));

    ACLCHECK(aclrtSetDevice(device_id));

    aclrtStream stream;
    ACLCHECK(aclrtCreateStream(&stream));

    // 申请Host内存, 并初始化, 让每一块的数据都不一样
    char *hostBuff;
    char *deviceBuff;
    char *deviceBuff_host;

    ACLCHECK(aclrtMallocHost((void **)&hostBuff, data_size));
    ACLCHECK(aclrtMalloc((void **)&deviceBuff, data_size, ACL_MEM_MALLOC_HUGE_FIRST));
    ACLCHECK(aclrtMallocHost((void **)&deviceBuff_host, data_size));

    // Initialize host buffer with unique sequence based on rank
    for (size_t i = 0; i < data_size; i++) {
        hostBuff[i] = static_cast<char>((i * rank) % 256); // Use rank to create unique pattern
        deviceBuff_host[i] = static_cast<char>((i * rank + 1) % 256); // Use rank to create unique pattern
    }

    ACLCHECK(aclrtMemcpyAsync(deviceBuff, data_size, deviceBuff_host, data_size, ACL_MEMCPY_HOST_TO_DEVICE, stream));
    ACLCHECK(aclrtSynchronizeStream(stream));

    bool verify = false;

    // Declare times vector at the beginning of Task function
    std::vector<double> times(n_loop);

    auto inner_func = [=](bool warm_up, size_t i) -> int {
        //if (!warm_up) ACLCHECK(aclrtRecordEvent(startEvents[i], stream));
        if (direction == Direction::H2D) {
            ACLCHECK(aclrtMemcpyAsync(deviceBuff, data_size, hostBuff, data_size, ACL_MEMCPY_HOST_TO_DEVICE, stream));

            if (verify) 
            {
                // Verify H2D transfer
                char* verify_buff;
                ACLCHECK(aclrtMallocHost((void **)&verify_buff, data_size));
                ACLCHECK(aclrtMemcpyAsync(verify_buff, data_size, deviceBuff, data_size, ACL_MEMCPY_DEVICE_TO_HOST, stream));
                ACLCHECK(aclrtSynchronizeStream(stream));
            
                bool failed = false;
                for (size_t j = 0; j < data_size; j++) {
                    if (verify_buff[j] != hostBuff[j]) {
                        printf("Data verification failed at index %zu! Expected: %d, Got: %d\n", 
                            j, hostBuff[j], verify_buff[j]);
                        failed = true;
                        break;
                    }
                }
                if (!failed) {
                    printf("Data verification H2D passed\n");
                }
                ACLCHECK(aclrtFreeHost(verify_buff));
            }

        } else if (direction == Direction::D2H) {
            ACLCHECK(aclrtMemcpyAsync(hostBuff, data_size, deviceBuff, data_size, ACL_MEMCPY_DEVICE_TO_HOST, stream));

            if (verify) {
                // For D2H, we can compare with the initial device buffer pattern
                bool failed = false;
                for (size_t j = 0; j < data_size; j++) {
                    char expected = static_cast<char>((j * rank + 1) % 256);
                    if (hostBuff[j] != expected) {
                        printf("Data verification failed at index %zu! Expected: %d, Got: %d\n", 
                            j, expected, hostBuff[j]);
                        failed = true;
                        break;
                    }
                }
                if (!failed) {
                    printf("Data verification D2H passed\n");
                }
            }
        } else {
            printf("error\n");
        }
        //if (!warm_up) ACLCHECK(aclrtRecordEvent(endEvents[i], stream));
        return 0;
    };
    // Warmup阶段
    for (size_t i = 0; i < n_warmup; i++) inner_func(true, i);

    MPI_Barrier(MPI_COMM_WORLD);
    double total_start_time = MPI_Wtime();
    for (size_t i = 0; i < n_loop; i++) {
        double start_time = MPI_Wtime();
        inner_func(false, i);
        ACLCHECK(aclrtSynchronizeStream(stream));
        double end_time = MPI_Wtime();
        times[i] = (end_time - start_time) * 1000;  // Convert to milliseconds
    }
    double total_end_time = MPI_Wtime();
    double total_time = (total_end_time - total_start_time) * 1000;

    MPI_Barrier(MPI_COMM_WORLD);

    // Gather timing stats across ranks (convert to float for MPI_Reduce)
    float total_time_f = static_cast<float>(total_time);
    float min_time, max_time, avg_time;
    MPI_Reduce(&total_time_f, &min_time, 1, MPI_FLOAT, MPI_MIN, 0, MPI_COMM_WORLD);
    MPI_Reduce(&total_time_f, &max_time, 1, MPI_FLOAT, MPI_MAX, 0, MPI_COMM_WORLD); 
    MPI_Reduce(&total_time_f, &avg_time, 1, MPI_FLOAT, MPI_SUM, 0, MPI_COMM_WORLD);
    

    if (rank == 0) {
        int size;
        MPI_Comm_size(MPI_COMM_WORLD, &size);
        avg_time /= size;
        std::cout << "Time stats across ranks - Min: " << min_time 
                  << " ms, Max: " << max_time 
                  << " ms, Avg: " << avg_time << " ms" << std::endl;
    

        std::cout << (direction == Direction::H2D ? "H2D" : "D2H") 
                  << "_Task >> device: " << device_id               //
                  << ", n_loop: " << n_loop << ", data_size: " << GetSizeStr(data_size) << ", time: " << avg_time  //
                  << " ms, bandwidth: " 
                  << GetBandwidthStr((n_loop * (float)data_size / GB) / (avg_time * 1E-3)) 
                  << " GB/s, total_bandwidth: "
                  << GetBandwidthStr((n_loop * (float)data_size / GB) / (avg_time * 1E-3) * size)
                  << " GB/s"
                  << std::endl;
    }

    // 清理数据
    ACLCHECK(aclrtFree(deviceBuff));
    ACLCHECK(aclrtFreeHost(hostBuff));

    ACLCHECK(aclrtDestroyStream(stream));
    ACLCHECK(aclrtResetDevice(device_id));

    return 0;
}

void print_cpu_affinity(int rank) {
    cpu_set_t cpu_set;
    CPU_ZERO(&cpu_set);
    if (sched_getaffinity(0, sizeof(cpu_set), &cpu_set) == -1) {
        printf("Rank %d: Failed to get CPU affinity\n", rank);
        return;
    }

    // Get number of CPUs
    int num_cpus = sysconf(_SC_NPROCESSORS_CONF);
    
    // Print CPU mask
   // printf("Rank %d CPU affinity mask: ", rank);
   // for (int i = 0; i < num_cpus; i++) {
   //     printf("%d", CPU_ISSET(i, &cpu_set) ? 1 : 0);
   // }
   // printf("\n");

    // Get current CPU
    int current_cpu = sched_getcpu();
    
    // Get NUMA node for current CPU
    int numa_node = -1;
    if (numa_available() >= 0) {
        numa_node = numa_node_of_cpu(current_cpu);
    }

    // Get total physical cores
    
    //int physical_cores = sysconf(_SC_NPROCESSORS_ONLN);
    //printf("Total physical cores: %d\n", physical_cores);

   
    char socket_info[32] = {0};   
    // Print socket information if available
    char cmd[256];
    snprintf(cmd, sizeof(cmd), "lscpu -p=cpu,socket | grep '^%d' | cut -f2 -d','", current_cpu);
    FILE* fp = popen(cmd, "r");
    if (fp) {
        if (fgets(socket_info, sizeof(socket_info), fp) != NULL) {
            //printf("  Socket: %s", socket_info);
        }
        pclose(fp);
    }

     printf("Rank %d \t running on CPU Core: %d, \t Socket: %s \t NUMA Node: %d\n", 
        rank, current_cpu, strtok(socket_info, "\n"), numa_node);
}

int main(int argc, char *argv[]) {
    MPI_Init(&argc, &argv);
    int rank, size;
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);

    // Add this line to print CPU affinity info
    print_cpu_affinity(rank);

    size_t device_id = rank % 8;                    // 获取设备的ID
    
    //size_t device_id = (rank) % 4;                    // 获取设备的ID

    //size_t device_id = 0;                    // 获取设备的ID

    //printf("-rank: %d, device_id: %d\n", rank, device_id);

    size_t n_warmup = 5;                     //
    size_t n_loop = 10;                     //
    size_t start_data_size = (size_t)1024*1024*1024;              // 测试的起始数据包大小
    size_t end_data_size = (size_t)8 * GB;  // 测试的最终数据大小
    Direction direction = Direction::H2D;    

    // 创建结果保存文件
    std::ostringstream filename;
    filename << "perf-single-stream-device" << device_id << "-loop" << n_loop << "-" << (direction == Direction::H2D ? "h2d" : "d2h") << ".csv";
    result.open(filename.str());

    // 写入标题
    result << "data_size(byte),total_time(us),total_bandwidth(GB/s)";
    for (size_t i = 0; i < n_loop; i++) result << ",time_" << i + 1 << "(us)";
    for (size_t i = 0; i < n_loop; i++) result << ",bandwidth_" << i + 1 << "(GB/s)";
    result << std::endl;

    // 开始测试
    size_t data_size = start_data_size;
    while (data_size <= end_data_size) {
        Task(rank, direction, device_id, data_size, n_warmup, n_loop);
        data_size *= 2;
    }
    result.flush();
    result.close();

    MPI_Finalize();

    return 0;
}
