// nvme_gds_detailed_timing.cu - 分阶段详细计时的GDS性能测试
#include <cuda_runtime.h>
#include <cufile.h>

#include <fcntl.h>
#include <unistd.h>
#include <sys/stat.h>
#include <stdio.h>
#include <time.h>
#include <string.h>
#include <sys/statvfs.h>
#include <errno.h>
#include <sys/mman.h>


double get_time_ms() {
    struct timespec ts;
    clock_gettime(CLOCK_MONOTONIC, &ts);
    return ts.tv_sec * 1000.0 + ts.tv_nsec / 1000000.0;
}


void detect_storage_type(const char* test_path) {
    printf("=== 存储设备检测 ===\n");
    printf("测试路径: %s\n", test_path);

    // 获取设备信息
    struct stat st;
    if (stat(test_path, &st) != 0) {
        perror("stat 失败");
        return;
    }
    printf("设备ID: %ld\n", (long)st.st_dev);

    // 查找挂载点
    FILE* mounts = fopen("/proc/mounts", "r");
    char mount_point[256] = "";
    char device[256] = "";
    if (mounts) {
        char line[512], mp[256], dev[256];
        while (fgets(line, sizeof(line), mounts)) {
            sscanf(line, "%255s %255s", dev, mp);
            if (strstr(test_path, mp) == test_path) { // test_path 以 mp 开头
                if (strlen(mp) > strlen(mount_point)) {
                    strcpy(mount_point, mp);
                    strcpy(device, dev);
                }
            }
        }
        fclose(mounts);
    }
    printf("挂载点: %s\n", mount_point);
    printf("块设备: %s\n", device);

    if (strncmp(device, "/dev/nvme", 9) == 0) {
        printf("✓ 该挂载点为 NVMe 设备: %s\n", device);
    } else {
        printf("该挂载点不是 NVMe 设备: %s\n", device);
    }

    // 检查Direct I/O支持
    char test_file[512];
    snprintf(test_file, sizeof(test_file), "%s/direct_io_test", test_path);

    int fd = open(test_file, O_CREAT | O_WRONLY | O_DIRECT, 0644);
    if (fd >= 0) {
        printf("✓ 存储设备支持O_DIRECT\n");
        close(fd);
        unlink(test_file);
    } else {
        printf("✗ 存储设备不支持O_DIRECT，检查下当前用户是否有写权限: %s\n", strerror(errno));
    }

}


// 优化的文件创建（针对NVMe）
bool create_nvme_optimized_file(const char* filepath, size_t size_mb) {
    printf("\n=== 创建NVMe优化测试文件 ===\n");
    printf("文件: %s (%.0f MB)\n", filepath, size_mb);
    
    // 尝试使用Direct I/O创建文件
    int flags = O_CREAT | O_WRONLY | O_TRUNC | O_DIRECT;
    int fd = open(filepath, flags, 0644);
    
    if (fd < 0) {
        printf("⚠ Direct I/O创建失败，使用标准模式: %s\n", strerror(errno));
        fd = open(filepath, O_CREAT | O_WRONLY | O_TRUNC, 0644);
        if (fd < 0) {
            perror("文件创建失败");
            return false;
        }
    } else {
        printf("✓ 使用Direct I/O模式创建文件\n");
    }
    
    // 使用大块对齐写入（NVMe优化）
    size_t block_size = 1024 * 1024; // 1MB块
    void *aligned_buffer;
    
    if (posix_memalign(&aligned_buffer, 4096, block_size) != 0) {
        perror("对齐内存分配失败");
        close(fd);
        return false;
    }
    
    // 填充测试模式
    unsigned char *buffer = (unsigned char*)aligned_buffer;
    for (size_t i = 0; i < block_size; i++) {
        buffer[i] = (i / 4096) % 256; // 每4KB一个模式
    }
    
    printf("写入进度: ");
    fflush(stdout);
    
    size_t total_bytes = size_mb * 1024 * 1024;
    size_t written = 0;
    double start_time = get_time_ms();
    
    while (written < total_bytes) {
        size_t to_write = (total_bytes - written > block_size) ? block_size : (total_bytes - written);
        
        ssize_t result = write(fd, buffer, to_write);
        if (result != (ssize_t)to_write) {
            printf("\n写入错误: %s\n", strerror(errno));
            break;
        }
        
        written += result;
        
        // 显示进度
        if (written % (10 * 1024 * 1024) == 0) {
            printf("%.0fMB ", written / (1024.0 * 1024.0));
            fflush(stdout);
        }
    }
    
    double write_time = get_time_ms() - start_time;
    printf("\n✓ 文件创建完成\n");
    printf("写入时间: %.2f ms\n", write_time);
    printf("写入带宽: %.2f GB/s\n", (written / (1024.0*1024.0*1024.0)) / (write_time / 1000.0));
    
    free(aligned_buffer);
    close(fd);
    
    // 强制同步到存储
    sync();
    
    return true;
}

// 结构体成员分组示例
typedef struct {
    // File open
    double file_open_time;

    // Memory allocation
    double cpu_malloc_time;
    double cuda_malloc_time;
    double cuda_malloc_uvm_time;
    double total_malloc_time;

    // cuFile_read__gpu registration
    double handle_register_time;
    double buffer_register_time;
    double total_register_time;

    // Data transfer
    double cpu_read_from_disk_time;
    double cudaMemcpy_time;
    double mmap_time;
    double mmap_togpu_time;
    double cpu_file_2_uvm_time;
    double mmap2uvm_time;
    double cpu_touch_sysBuf_time;
    double cufileRead_time;
    double total_data_transfer_time;

    // Touch (CPU/GPU access)
    double total_touch_time;
    double gpu_touch_UVM_time;
    double gpu_touch_cudaMem_time;
    double cpu_touch_uvm_time;
    double cpu_touch_mmap_time; // mmap预热的cpu访问时间

    // Cleanup
    double cleanup_time;
    double total_all;
} DetailedTiming;


void save_timing_to_csv(const char* csv_path, const char* method, DetailedTiming timing, size_t test_size) {
    FILE* f = fopen(csv_path, "a");
    if (!f) {
        printf("无法打开CSV文件: %s\n", csv_path);
        return;
    }
    // 如果文件为空，写表头
    static int header_written = 0;
    if (ftell(f) == 0 && !header_written) {
        fprintf(f,
            "method,test_size_MB,"
            "file_open,"
            "cpu_malloc,cuda_malloc,cuda_malloc_uvm,"
            "total_malloc,total_data_transfer,total_register,total_touch,"
            "handle_register,buffer_register,"
            "cudaMemcpy,mmap,mmap_togpu,"
            "cpu_file_2_uvm,mmap2uvm,cpu_touch_sysBuf,"
            "cpu_read_from_disk,cufileRead,"
            "gpu_touch_uvm,gpu_touch_cudaMem,cpu_touch_uvm,cpu_touch_mmap,"
            "cleanup,total_all\n"
        );
        header_written = 1;
    }

    fprintf(f, "%s,%.2f,"
               "%.3f," // file_open_time
               "%.3f,%.3f,%.3f," // cpu_malloc_time, cuda_malloc_time, cuda_malloc_uvm_time
               "%.3f,%.3f,%.3f,%.3f," // total_malloc_time, total_data_transfer_time, total_register_time, total_touch_time
               "%.3f,%.3f," // handle_register_time, buffer_register_time, 
               "%.3f,%.3f,%.3f," // cudaMemcpy_time, mmap_time, mmap_togpu_time, 
               "%.3f,%.3f,%.3f," // cpu_file_2_uvm_time, mmap2uvm_time, cpu_touch_sysBuf_time, 
               "%.3f,%.3f," // cpu_read_from_disk_time, cufileRead_time"
               "%.3f,%.3f,%.3f,%.3f," // 读数据
               "%.3f,%.3f\n", // cleanup_time, total_all
        method,
        test_size / (1024.0 * 1024.0),
        timing.file_open_time,
        timing.cpu_malloc_time, timing.cuda_malloc_time, timing.cuda_malloc_uvm_time,
        timing.total_malloc_time, timing.total_data_transfer_time, timing.total_register_time, timing.total_touch_time, 
        timing.handle_register_time, timing.buffer_register_time,
        timing.cudaMemcpy_time, timing.mmap_time, timing.mmap_togpu_time, 
        timing.cpu_file_2_uvm_time, timing.mmap2uvm_time, timing.cpu_touch_sysBuf_time,
        timing.cpu_read_from_disk_time, timing.cufileRead_time,
        timing.gpu_touch_UVM_time, timing.gpu_touch_cudaMem_time,timing.cpu_touch_uvm_time, timing.cpu_touch_mmap_time,
        timing.cleanup_time, timing.total_all
    );
    fclose(f);
}


// 清除文件系统缓存
void clear_caches() {
    printf("清除文件系统缓存...\n");
    system("sudo sh -c 'sync; echo 3 > /proc/sys/vm/drop_caches'");
    usleep(100000); // 等待100ms
    // usleep(1000000); // 等待1000ms

}


// 这里每个字节都访问，其实没有对标到cpu的每页访问一次，改用下面的 touch_gpu_buffer_page_page
__global__ void touch_gpu_buffer(char* buf, size_t n) {
    size_t i = blockIdx.x * blockDim.x + threadIdx.x;
    if (i < n) buf[i] += 1; // 只要访问即可
}


__global__ void touch_gpu_buffer_page(char* buf, size_t n, size_t page_size) {
    size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
    size_t i = idx * page_size;
    if (i < n) buf[i] += 1;
}


// 详细计时的传统方法测试
DetailedTiming test_traditional__gpu(const char* filepath, size_t test_size) {
    printf("\n=== 详细计时：传统 read + cudaMemcpy ===\n");
    DetailedTiming timing = {0};
    
    double total_start = get_time_ms();
    
    // 1. 文件打开
    double t1 = get_time_ms();
    int fd = open(filepath, O_RDONLY);
    if (fd < 0) {
        perror("文件打开失败");
        return timing;
    }
    double t2 = get_time_ms();
    timing.file_open_time = t2 - t1;
    printf("1. 文件打开: %.3f ms\n", timing.file_open_time);

    // 2. 内存分配
    t1 = get_time_ms();
    char *host_buf = (char*)malloc(test_size);
    t2 = get_time_ms();
    timing.cpu_malloc_time = t2 - t1;

    t1 = get_time_ms();
    void *gpu_buf;
    cudaMalloc(&gpu_buf, test_size);
    t2 = get_time_ms();
    timing.cuda_malloc_time = t2 - t1;
    timing.total_malloc_time = timing.cpu_malloc_time + timing.cuda_malloc_time;

    printf("2.1 内存分配(Host): %.3f ms\n", timing.cpu_malloc_time);
    printf("2.2 内存分配(GPU): %.3f ms\n", timing.cuda_malloc_time);
    printf("2. 内存分配(HOST+GPU): %.3f ms\n", timing.total_malloc_time);

    // 3. 数据传输（read + cudaMemcpy）
    double t_read1 = get_time_ms();
    ssize_t bytes_read = read(fd, host_buf, test_size);
    double t_read2 = get_time_ms();
    timing.cpu_read_from_disk_time = t_read2 - t_read1;
    
    if (bytes_read != (ssize_t)test_size) {
        printf("读取失败: %zd bytes\n", bytes_read);
    }
    
    double t_memcpy1 = get_time_ms();
    cudaMemcpy(gpu_buf, host_buf, test_size, cudaMemcpyHostToDevice);
    cudaDeviceSynchronize();
    double t_memcpy2 = get_time_ms();
    timing.cudaMemcpy_time = t_memcpy2 - t_memcpy1;
    
    timing.total_data_transfer_time = timing.cpu_read_from_disk_time + timing.cudaMemcpy_time;
    
    printf("3.1 read: %.3f ms\n", timing.cpu_read_from_disk_time);
    printf("3.2 cudaMemcpy: %.3f ms\n", timing.cudaMemcpy_time);
    printf("3. 数据传输(read+memcpy): %.3f ms\n", timing.total_data_transfer_time);

    // 4. GPU访问
    double t_gpu_access1 = get_time_ms();

    size_t page_size = 4096;
    size_t num_pages = (test_size + page_size - 1) / page_size;
    touch_gpu_buffer_page<<<(num_pages+255)/256, 256>>>((char*)gpu_buf, test_size, page_size);

    cudaDeviceSynchronize();
    double t_gpu_access2 = get_time_ms();
    timing.gpu_touch_cudaMem_time = t_gpu_access2 - t_gpu_access1;
    timing.total_touch_time = timing.gpu_touch_cudaMem_time;
    printf("4. GPU实际访问cudaMem数据: %.3f ms\n", timing.gpu_touch_cudaMem_time);

    // 5. 清理
    t1 = get_time_ms();
    free(host_buf);
    cudaFree(gpu_buf);
    close(fd);
    t2 = get_time_ms();
    timing.cleanup_time = t2 - t1;
    printf("5. 清理: %.3f ms\n", timing.cleanup_time);
    
    timing.total_all = get_time_ms() - total_start;
    printf("总时间: %.3f ms\n", timing.total_all);
    
    double bandwidth = (test_size / (1024.0*1024.0*1024.0)) / (timing.total_data_transfer_time / 1000.0);
    printf("数据传输带宽: %.2f GB/s\n", bandwidth);
    
    double touch_bandwidth = (test_size / (1024.0*1024.0*1024.0)) / (timing.total_touch_time / 1000.0);
    printf("gpu访问cuda memory速度: %.2f GB/s\n", touch_bandwidth);

    return timing;
}


// 详细计时的传统方法测试: 对比纯 cpu 访问
DetailedTiming test_traditional__cpu(const char* filepath, size_t test_size) {
    printf("\n=== 详细计时：传统 read (CPU内存) ===\n");
    DetailedTiming timing = {0};
    double total_start = get_time_ms();
    
    // 1. 文件打开
    double t1 = get_time_ms();
    int fd = open(filepath, O_RDONLY);
    if (fd < 0) {
        perror("文件打开失败");
        return timing;
    }
    double t2 = get_time_ms();
    timing.file_open_time = t2 - t1;
    printf("1. 文件打开: %.3f ms\n", timing.file_open_time);

    // 2. 内存分配
    t1 = get_time_ms();
    char *host_buf = (char*)malloc(test_size);
    t2 = get_time_ms();
    timing.cpu_malloc_time = t2 - t1;
    timing.total_malloc_time = timing.cpu_malloc_time;
    printf("2.1 内存分配(Host): %.3f ms\n", timing.cpu_malloc_time);

    // 3. 数据传输（read + cudaMemcpy）
    double t_read1 = get_time_ms();
    ssize_t bytes_read = read(fd, host_buf, test_size);
    double t_read2 = get_time_ms();
    timing.cpu_read_from_disk_time = t_read2 - t_read1;
    
    if (bytes_read != (ssize_t)test_size) {
        printf("读取失败: %zd bytes\n", bytes_read);
    }

    timing.total_data_transfer_time = timing.cpu_read_from_disk_time;
    
    printf("3.1 read: %.3f ms\n", timing.cpu_read_from_disk_time);
    printf("3. 数据传输(read): %.3f ms\n", timing.total_data_transfer_time);

    // 4. CPU访问（可选：遍历触发实际加载）。这里我有个疑问，数据已经在cpu内存了吗？还是说cudaMemcpy其实会出发数据读，这里没有cudaMemcpy是不是没有读？
    // TODO: 这里或许可以对比下cudamemcpy后cpu读的是不是变快了。
    t1 = get_time_ms();
    volatile char sum = 0;
    for (size_t i = 0; i < test_size; i += 4096) sum += host_buf[i];
    t2 = get_time_ms();
    timing.cpu_touch_sysBuf_time = t2 - t1;
    timing.total_touch_time = timing.cpu_touch_sysBuf_time;
    printf("4. cpu实际访问cpu buffer数据: %.3f ms\n", timing.gpu_touch_cudaMem_time);

    // 5. 清理
    t1 = get_time_ms();
    free(host_buf);
    close(fd);
    t2 = get_time_ms();
    timing.cleanup_time = t2 - t1;
    printf("5. 清理: %.3f ms\n", timing.cleanup_time);
    
    timing.total_all = get_time_ms() - total_start;
    printf("总时间: %.3f ms\n", timing.total_all);
    
    double bandwidth = (test_size / (1024.0*1024.0*1024.0)) / (timing.total_data_transfer_time / 1000.0);
    printf("数据传输带宽: %.2f GB/s\n", bandwidth);
    
    double touch_bandwidth = (test_size / (1024.0*1024.0*1024.0)) / (timing.total_touch_time / 1000.0);
    printf("CPU访问system cache速度: %.2f GB/s\n", touch_bandwidth);

    return timing;
}


DetailedTiming test_cuFile_read__gpu(const char* filepath, size_t test_size) {
    printf("\n=== 详细计时：GDS cuFileRead ===\n");
    DetailedTiming timing = {0};
    
    double total_start = get_time_ms();
    
    // 1. 文件打开
    double t1 = get_time_ms();
    int fd = open(filepath, O_RDONLY | O_DIRECT);
    if (fd < 0) {
        printf("Direct I/O打开失败，使用标准模式\n");
        fd = open(filepath, O_RDONLY);
        if (fd < 0) {
            perror("文件打开失败");
            return timing;
        }
    }
    double t2 = get_time_ms();
    timing.file_open_time = t2 - t1;
    printf("1. 文件打开(Direct I/O): %.3f ms\n", timing.file_open_time);
    
    // 2. 内存分配 + 句柄注册
    double t_cuda_malloc1 = get_time_ms();
    void *gpu_buf_raw;
    cudaMalloc(&gpu_buf_raw, test_size + 65536);
    void *gpu_buf = (void*)(((uintptr_t)gpu_buf_raw + 65535) & ~65535UL);
    double t_cuda_malloc2 = get_time_ms();
    timing.cuda_malloc_time = t_cuda_malloc2 - t_cuda_malloc1;
    
    double t_handle_reg1 = get_time_ms();
    CUfileDescr_t cf_descr = {};
    cf_descr.handle.fd = fd;
    cf_descr.type = CU_FILE_HANDLE_TYPE_OPAQUE_FD;
    CUfileHandle_t cf_handle;
    CUfileError_t status = cuFileHandleRegister(&cf_handle, &cf_descr);
    double t_handle_reg2 = get_time_ms();
    timing.handle_register_time = t_handle_reg2 - t_handle_reg1;
    
    if (status.err != CU_FILE_SUCCESS) {
        printf("文件句柄注册失败: %d\n", status.err);
        cudaFree(gpu_buf_raw);
        close(fd);
        return timing;
    }
    
    double t_buf_reg1 = get_time_ms();
    status = cuFileBufRegister(gpu_buf, test_size, 0);
    double t_buf_reg2 = get_time_ms();
    timing.buffer_register_time = t_buf_reg2 - t_buf_reg1;
    
    if (status.err != CU_FILE_SUCCESS) {
        printf("GPU缓冲区注册失败: %d\n", status.err);
        cuFileHandleDeregister(cf_handle);
        cudaFree(gpu_buf_raw);
        close(fd);
        return timing;
    }
    
    timing.total_malloc_time = timing.cuda_malloc_time + timing.handle_register_time + timing.buffer_register_time;
    
    printf("2.1 CUDA内存分配: %.3f ms\n", timing.cuda_malloc_time);
    printf("2.2 cuFile句柄注册: %.3f ms\n", timing.handle_register_time);
    printf("2.3 cuFile buffer注册: %.3f ms\n", timing.buffer_register_time);
    printf("2. 内存分配+注册总计: %.3f ms\n", timing.total_malloc_time);

    // 3. 数据传输（cuFileRead）
    t1 = get_time_ms();
    ssize_t bytes_read = cuFileRead(cf_handle, gpu_buf, test_size, 0, 0);
    cudaDeviceSynchronize();
    t2 = get_time_ms();
    timing.cufileRead_time = t2 - t1;
    timing.total_data_transfer_time = timing.cufileRead_time;
    printf("3. 数据传输(cuFileRead): %.3f ms\n", timing.total_data_transfer_time);
    
    if (bytes_read != (ssize_t)test_size) {
        printf("读取失败: %zd bytes\n", bytes_read);
    }

    // 4. GPU实际访问内存速度
    double t_gpu_access1 = get_time_ms();
    size_t page_size = 4096;
    size_t num_pages = (test_size + page_size - 1) / page_size;
    touch_gpu_buffer_page<<<(num_pages+255)/256, 256>>>((char*)gpu_buf, test_size, page_size);
    cudaDeviceSynchronize();
    double t_gpu_access2 = get_time_ms();
    timing.gpu_touch_cudaMem_time = t_gpu_access2 - t_gpu_access1;
    timing.total_touch_time = timing.gpu_touch_cudaMem_time;
    printf("4. GPU实际访问cuFile数据: %.3f ms\n", timing.gpu_touch_cudaMem_time);

    // 5. 清理
    t1 = get_time_ms();
    cuFileBufDeregister(gpu_buf);
    cuFileHandleDeregister(cf_handle);
    cudaFree(gpu_buf_raw);
    close(fd);
    t2 = get_time_ms();
    timing.cleanup_time = t2 - t1;
    printf("5. 清理: %.3f ms\n", timing.cleanup_time);
    
    timing.total_all = get_time_ms() - total_start;
    printf("总时间: %.3f ms\n", timing.total_all);
    
    double bandwidth = (test_size / (1024.0*1024.0*1024.0)) / (timing.total_data_transfer_time / 1000.0);
    printf("数据传输（cuFile read）带宽: %.2f GB/s\n", bandwidth);

    double touch_bandwidth = (test_size / (1024.0*1024.0*1024.0)) / (timing.total_touch_time / 1000.0);
    printf("GPU访问（cuFile buffer）带宽: %.2f GB/s\n", touch_bandwidth);
    
    return timing;
}


DetailedTiming test_mmapFile__gpu(const char* filepath, size_t test_size) {
    printf("\n=== 详细计时：mmap + cudaMemcpy ===\n");
    DetailedTiming timing = {0};
    
    double total_start = get_time_ms();
    
    // 1. 文件打开
    double t1 = get_time_ms();
    int fd = open(filepath, O_RDONLY);
    if (fd < 0) {
        perror("文件打开失败");
        return timing;
    }
    double t2 = get_time_ms();
    timing.file_open_time = t2 - t1;
    printf("1. 文件打开: %.3f ms\n", timing.file_open_time);
    
    // 2. 内存映射 + GPU内存分配（分开计时）
    double t_mmap1 = get_time_ms();
    void* mmap_ptr = mmap(NULL, test_size, PROT_READ, MAP_PRIVATE, fd, 0);
    double t_mmap2 = get_time_ms();
    if (mmap_ptr == MAP_FAILED) {
        perror("mmap 失败");
        close(fd);
        return timing;
    }
    timing.mmap_time = t_mmap2 - t_mmap1;
    printf("2.1 内存映射(mmap): %.3f ms\n", timing.mmap_time);
    
    double t_cuda_malloc1 = get_time_ms();
    void* gpu_buf;
    cudaMalloc(&gpu_buf, test_size);
    double t_cuda_malloc2 = get_time_ms();
    timing.cuda_malloc_time = t_cuda_malloc2 - t_cuda_malloc1;
    printf("2.2 GPU内存分配(cudaMalloc): %.3f ms\n", timing.cuda_malloc_time);
    
    timing.total_malloc_time = timing.mmap_time + timing.cuda_malloc_time;
    printf("2. 内存映射+GPU分配总计: %.3f ms\n", timing.total_malloc_time);

    // 3. 数据传输（cudaMemcpy from mmap）
    t1 = get_time_ms();
    cudaMemcpy(gpu_buf, mmap_ptr, test_size, cudaMemcpyHostToDevice);
    cudaDeviceSynchronize();
    t2 = get_time_ms();
    timing.cudaMemcpy_time = t2 - t1;
    timing.total_data_transfer_time = timing.cudaMemcpy_time; 
    printf("3. 数据传输(mmap->GPU): %.3f ms\n", timing.total_data_transfer_time);

    // 4. GPU访问测试
    double t_gpu_access1 = get_time_ms();
    size_t page_size = 4096;
    size_t num_pages = (test_size + page_size - 1) / page_size;
    touch_gpu_buffer_page<<<(num_pages+255)/256, 256>>>((char*)gpu_buf, test_size, page_size);
    cudaDeviceSynchronize();
    double t_gpu_access2 = get_time_ms();
    timing.gpu_touch_cudaMem_time = t_gpu_access2 - t_gpu_access1;
    printf("4. GPU访问mmap到cuda内存的数据: %.3f ms\n", timing.gpu_touch_cudaMem_time);
    timing.total_touch_time = timing.gpu_touch_cudaMem_time;

    // 5. 清理
    t1 = get_time_ms();
    munmap(mmap_ptr, test_size);
    cudaFree(gpu_buf);
    close(fd);
    t2 = get_time_ms();
    timing.cleanup_time = t2 - t1;
    printf("5. 清理: %.3f ms\n", timing.cleanup_time);
    
    timing.total_all = get_time_ms() - total_start;
    printf("总时间: %.3f ms\n", timing.total_all);
    
    double bandwidth = (test_size / (1024.0*1024.0*1024.0)) / (timing.total_data_transfer_time / 1000.0);
    printf("数据传输带宽: %.2f GB/s\n", bandwidth);

    double touch_bandwidth = (test_size / (1024.0*1024.0*1024.0)) / (timing.total_touch_time / 1000.0);
    printf("GPU访问带宽: %.2f GB/s\n", touch_bandwidth);
    
    return timing;
}


DetailedTiming test_mmapFile_warm__gpu(const char* filepath, size_t test_size) {
    printf("\n=== 详细计时：mmap + cudaMemcpy ===\n");
    DetailedTiming timing = {0};
    
    double total_start = get_time_ms();
    
    // 1. 文件打开
    double t1 = get_time_ms();
    int fd = open(filepath, O_RDONLY);
    if (fd < 0) {
        perror("文件打开失败");
        return timing;
    }
    double t2 = get_time_ms();
    timing.file_open_time = t2 - t1;
    printf("1. 文件打开: %.3f ms\n", timing.file_open_time);
    
    // 2. 内存映射 + GPU内存分配（分开计时）
    double t_mmap1 = get_time_ms();
    void* mmap_ptr = mmap(NULL, test_size, PROT_READ, MAP_PRIVATE, fd, 0);
    if (mmap_ptr == MAP_FAILED) {
        perror("mmap 失败");
        close(fd);
        return timing;
    }
    double t_mmap2 = get_time_ms();
    timing.mmap_time = t_mmap2 - t_mmap1;
    printf("2.1 内存映射: %.3f ms\n", timing.mmap_time);
    
    double t_cuda_malloc1 = get_time_ms();
    void* gpu_buf;
    cudaMalloc(&gpu_buf, test_size);
    double t_cuda_malloc2 = get_time_ms();
    timing.cuda_malloc_time = t_cuda_malloc2 - t_cuda_malloc1;
    printf("2.2 GPU内存分配(cudaMalloc): %.3f ms\n", timing.cuda_malloc_time);
    
    timing.total_malloc_time = timing.mmap_time + timing.cuda_malloc_time;
    printf("2. 内存映射+GPU分配总计: %.3f ms\n", timing.total_malloc_time);
    
    // 预热：访问 mmap_ptr，触发页缓存
    // todo: 预热这里和cpu touch步长不一样了,算上cpu预取的话，步长多少合理？
    double t_cpu_touch1 = get_time_ms();
    volatile char sum = 0;
    // 这里的步长4096是常见的页大小
    for (size_t i = 0; i < test_size; i += 4096) sum += ((char*)mmap_ptr)[i];
    double t_cpu_touch2 = get_time_ms();
    timing.cpu_touch_mmap_time = t_cpu_touch2 - t_cpu_touch1;
    printf("2.1 预热(访问mmap): %.3f ms\n", timing.cpu_touch_mmap_time);

    // 3. 数据传输（cudaMemcpy from mmap）
    t1 = get_time_ms();
    cudaMemcpy(gpu_buf, mmap_ptr, test_size, cudaMemcpyHostToDevice);
    cudaDeviceSynchronize();
    t2 = get_time_ms();
    timing.cudaMemcpy_time = t2 - t1;
    timing.total_data_transfer_time = timing.cpu_touch_mmap_time + timing.cudaMemcpy_time; 
    printf("3. 数据传输(预热mmap + mmap->GPU): %.3f ms\n", timing.total_data_transfer_time);

    // 4. GPU访问测试
    double t_gpu_access1 = get_time_ms();
    size_t page_size = 4096;
    size_t num_pages = (test_size + page_size - 1) / page_size;
    touch_gpu_buffer_page<<<(num_pages+255)/256, 256>>>((char*)gpu_buf, test_size, page_size);
    cudaDeviceSynchronize();
    double t_gpu_access2 = get_time_ms();
    timing.gpu_touch_cudaMem_time = t_gpu_access2 - t_gpu_access1;
    printf("4. GPU访问mmap到cuda内存的数据: %.3f ms\n", timing.gpu_touch_cudaMem_time);
    timing.total_touch_time = timing.gpu_touch_cudaMem_time;

    // 5. 清理
    t1 = get_time_ms();
    munmap(mmap_ptr, test_size);
    cudaFree(gpu_buf);
    close(fd);
    t2 = get_time_ms();
    timing.cleanup_time = t2 - t1;
    printf("5. 清理: %.3f ms\n", timing.cleanup_time);
    
    timing.total_all = get_time_ms() - total_start;
    printf("总时间: %.3f ms\n", timing.total_all);
    
    double bandwidth = (test_size / (1024.0*1024.0*1024.0)) / (timing.total_data_transfer_time / 1000.0);
    printf("数据传输带宽: %.2f GB/s\n", bandwidth);

    double touch_bandwidth = (test_size / (1024.0*1024.0*1024.0)) / (timing.total_touch_time / 1000.0);
    printf("GPU访问带宽: %.2f GB/s\n", touch_bandwidth);
    
    return timing;
}


// 详细计时的UVM + read测试
// 将文件读到 uvm buffer，读完之后，gpu能直接使用这块uvm内存，
// 但是同前面read+cudamemcpy、mmap+cudammecpy、GDS三种不同的是，数据不一定物理上在gpu内存
DetailedTiming test_cpuFd_2_uvm__gpu(const char* filepath, size_t test_size) {
    printf("\n=== 详细计时：UVM + read ===\n");
    DetailedTiming timing = {0};
    
    double total_start = get_time_ms();
    
    // 1. 文件打开
    double t1 = get_time_ms();
    int fd = open(filepath, O_RDONLY);
    if (fd < 0) {
        perror("文件打开失败");
        return timing;
    }
    double t2 = get_time_ms();
    timing.file_open_time = t2 - t1;
    printf("1. 文件打开: %.3f ms\n", timing.file_open_time);
    
    // 2. UVM内存分配
    t1 = get_time_ms();
    char* uvm_buf;
    cudaMallocManaged(&uvm_buf, test_size);
    t2 = get_time_ms();
    timing.cuda_malloc_uvm_time = t2 - t1;
    timing.total_malloc_time = timing.cuda_malloc_uvm_time;

    printf("2. UVM内存分配: %.3f ms\n", timing.total_malloc_time);
    
    // 3. 数据传输（read到UVM）
    t1 = get_time_ms();
    ssize_t bytes_read = read(fd, uvm_buf, test_size);
    cudaDeviceSynchronize(); // 确保UVM可被GPU访问
    t2 = get_time_ms();
    timing.cpu_file_2_uvm_time = t2 - t1;
    timing.total_data_transfer_time = timing.cpu_file_2_uvm_time;
    printf("3. 数据传输(read->UVM): %.3f ms\n", timing.total_data_transfer_time);
    
    if (bytes_read != (ssize_t)test_size) {
        printf("读取失败: %zd bytes\n", bytes_read);
    }

    // 假设 uvm_buf 已经 read 完成，看看gpu读取它的速度（这里的读取可能不涉及拷贝？）
    double t_gpu_access1 = get_time_ms();
    size_t page_size = 4096;
    size_t num_pages = (test_size + page_size - 1) / page_size;
    touch_gpu_buffer_page<<<(num_pages+255)/256, 256>>>((char*)uvm_buf, test_size, page_size);
    cudaDeviceSynchronize();
    double t_gpu_access2 = get_time_ms();
    timing.gpu_touch_UVM_time = t_gpu_access2 - t_gpu_access1; // 包含read和GPU访问
    timing.total_touch_time = timing.gpu_touch_UVM_time;
    printf("3.1 GPU访问从cpu文件read过来的UVM: %.3f ms\n", timing.gpu_touch_UVM_time);

    // 4. 清理
    t1 = get_time_ms();
    cudaFree(uvm_buf);
    close(fd);
    t2 = get_time_ms();
    timing.cleanup_time = t2 - t1;
    printf("4. 清理: %.3f ms\n", timing.cleanup_time);
    
    timing.total_all = get_time_ms() - total_start;
    printf("总时间: %.3f ms\n", timing.total_all);
    
    double bandwidth = (test_size / (1024.0*1024.0*1024.0)) / (timing.total_data_transfer_time / 1000.0);
    printf("数据传输带宽: %.2f GB/s\n", bandwidth);

    double touch_bandwidth = (test_size / (1024.0*1024.0*1024.0)) / (timing.total_touch_time / 1000.0);
    printf("gpu读取uvm带宽: %.2f GB/s\n", touch_bandwidth);
    
    return timing;
}


DetailedTiming test_mmapedFile_to_uvmC__cpu(const char* filepath, size_t test_size) {
    printf("\n=== 详细计时：mmaped文件到UVM，CPU访问 ===\n");
    DetailedTiming timing = {0};
    
    double total_start = get_time_ms();

    // 1. 文件打开
    double t1 = get_time_ms();
    int fd = open(filepath, O_RDONLY);
    if (fd < 0) {
        perror("文件打开失败");
        return timing;
    }
    double t2 = get_time_ms();
    timing.file_open_time = t2 - t1;
    printf("1. 文件打开: %.3f ms\n", timing.file_open_time);

    // 2. UVM内存分配
    t1 = get_time_ms();
    char* uvm_buf;
    cudaMallocManaged(&uvm_buf, test_size);
    t2 = get_time_ms();
    timing.cuda_malloc_uvm_time = t2 - t1;
    timing.total_malloc_time = timing.cuda_malloc_uvm_time;
    printf("2. UVM内存分配: %.3f ms\n", timing.total_malloc_time);

    // 3. mmap + memcpy
    t1 = get_time_ms();
    void* mmap_ptr = mmap(NULL, test_size, PROT_READ, MAP_PRIVATE, fd, 0);
    if (mmap_ptr == MAP_FAILED) {
        perror("mmap 失败");
        return timing;
    }
    t2 = get_time_ms();
    timing.mmap_time = t2 - t1;
    printf("3.1 内存映射(mmap): %.3f ms\n", timing.mmap_time);

    t1 = get_time_ms();
    memcpy(uvm_buf, mmap_ptr, test_size);
    cudaDeviceSynchronize();
    t2 = get_time_ms();
    timing.mmap2uvm_time = t2 - t1;
    timing.total_data_transfer_time = timing.mmap2uvm_time;
    printf("3. 数据传输(mmap->UVM): %.3f ms\n", timing.total_data_transfer_time);

    // 4. CPU访问UVM buffer，触发实际数据加载
    t1 = get_time_ms();
    volatile char sum = 0;
    for (size_t i = 0; i < test_size; i += 4096) {
        sum += uvm_buf[i];
    }
    t2 = get_time_ms();
    timing.cpu_touch_uvm_time = t2 - t1;
    timing.total_touch_time = timing.cpu_touch_uvm_time;
    printf("4. CPU访问UVM buffer触发数据加载: %.3f ms\n", timing.total_touch_time);

    // 5. 清理
    t1 = get_time_ms();
    munmap(mmap_ptr, test_size);
    cudaFree(uvm_buf);
    close(fd);
    t2 = get_time_ms();
    timing.cleanup_time = t2 - t1;
    printf("5. 清理: %.3f ms\n", timing.cleanup_time);

    timing.total_all = get_time_ms() - total_start;
    printf("总时间: %.3f ms\n", timing.total_all);

    double touch_bandwidth = (test_size / (1024.0*1024.0*1024.0)) / (timing.total_touch_time / 1000.0);
    printf("CPU读取UVM速度: %.2f GB/s\n", touch_bandwidth);

    return timing;
}


// 详细计时的UVM + read测试
// 将文件元数据mmap到 uvm buffer，mmap完之后，gpu能直接使用这块uvm内存，
// 但是同前面read+cudamemcpy、mmap+cudammecpy、GDS三种不同的是，数据不一定物理上在gpu内存
DetailedTiming test_mmapedFile_2_uvmC__gpu(const char* filepath, size_t test_size) {
    printf("\n=== 详细计时：mmaped文件到UVM，gpu访问 ===\n");
    DetailedTiming timing = {0};
    
    double total_start = get_time_ms();
    
    // 1. 文件打开
    double t1 = get_time_ms();
    int fd = open(filepath, O_RDONLY);
    if (fd < 0) {
        perror("文件打开失败");
        return timing;
    }
    double t2 = get_time_ms();
    timing.file_open_time = t2 - t1;
    printf("1. 文件打开: %.3f ms\n", timing.file_open_time);
    
    // 2. UVM内存分配
    t1 = get_time_ms();
    char* uvm_buf;
    cudaMallocManaged(&uvm_buf, test_size);
    t2 = get_time_ms();
    timing.cuda_malloc_uvm_time = t2 - t1;
    timing.total_malloc_time = timing.cuda_malloc_uvm_time;
    printf("2. UVM内存分配: %.3f ms\n", timing.total_malloc_time);
    
    // 3. 数据传输（read到UVM）
    t1 = get_time_ms();
    void* mmap_ptr = mmap(NULL, test_size, PROT_READ, MAP_PRIVATE, fd, 0);
    if (mmap_ptr == MAP_FAILED) {
        perror("mmap 失败");
        return timing;
    }
    t2 = get_time_ms();
    timing.mmap_time = t2 - t1;
    printf("3.1 内存映射(mmap): %.3f ms\n", timing.mmap_time);

    t1 = get_time_ms();
    memcpy(uvm_buf, mmap_ptr, test_size); // 不复制内容，只留映射
    cudaDeviceSynchronize(); // 确保UVM可被GPU访问
    t2 = get_time_ms();
    timing.mmap2uvm_time = t2 - t1;
    timing.total_data_transfer_time = timing.mmap2uvm_time;
    printf("3. 数据传输(mmap->UVM): %.3f ms\n", timing.total_data_transfer_time);

    // 假设 uvm_buf 已经 read 完成，看看gpu读取它的速度（这里的读取可能不涉及拷贝？）
    double t_gpu_access1 = get_time_ms();
    size_t page_size = 4096;
    size_t num_pages = (test_size + page_size - 1) / page_size;
    touch_gpu_buffer_page<<<(num_pages+255)/256, 256>>>((char*)uvm_buf, test_size, page_size);
    cudaDeviceSynchronize();
    double t_gpu_access2 = get_time_ms();
    timing.gpu_touch_UVM_time = t_gpu_access2 - t_gpu_access1; // 包含read和GPU访问
    timing.total_touch_time = timing.gpu_touch_UVM_time;
    printf("3.1 GPU访问mmap拷贝过来的UVM: %.3f ms\n", timing.gpu_touch_UVM_time);

    // 4. 清理
    t1 = get_time_ms();
    munmap(mmap_ptr, test_size);    // 这里不unmap会导致cpu access变快，因为不需要重新读数据？
    cudaFree(uvm_buf);
    close(fd);
    t2 = get_time_ms();
    timing.cleanup_time = t2 - t1;
    printf("4. 清理: %.3f ms\n", timing.cleanup_time);
    
    timing.total_all = get_time_ms() - total_start;
    printf("总时间: %.3f ms\n", timing.total_all);
    
    double bandwidth = (test_size / (1024.0*1024.0*1024.0)) / (timing.total_data_transfer_time / 1000.0);
    printf("数据传输带宽: %.2f GB/s\n", bandwidth);
    
    double touch_bandwidth = (test_size / (1024.0*1024.0*1024.0)) / (timing.total_touch_time / 1000.0);
    printf("gpu读取uvm带宽: %.2f GB/s\n", touch_bandwidth);

    return timing;
}



DetailedTiming test_mmapedFile_to_uvmG__cpu(const char* filepath, size_t test_size) {
    printf("\n=== 详细计时：mmaped文件到UVM，CPU访问 ===\n");
    DetailedTiming timing = {0};
    
    double total_start = get_time_ms();

    // 1. 文件打开
    double t1 = get_time_ms();
    int fd = open(filepath, O_RDONLY);
    if (fd < 0) {
        perror("文件打开失败");
        return timing;
    }
    double t2 = get_time_ms();
    timing.file_open_time = t2 - t1;
    printf("1. 文件打开: %.3f ms\n", timing.file_open_time);

    // 2. UVM内存分配
    t1 = get_time_ms();
    char* uvm_buf;
    cudaMallocManaged(&uvm_buf, test_size);
    t2 = get_time_ms();
    timing.cuda_malloc_uvm_time = t2 - t1;
    timing.total_malloc_time = timing.cuda_malloc_uvm_time;
    printf("2. UVM内存分配: %.3f ms\n", timing.total_malloc_time);

    // 3. mmap + memcpy
    t1 = get_time_ms();
    void* mmap_ptr = mmap(NULL, test_size, PROT_READ, MAP_PRIVATE, fd, 0);
    if (mmap_ptr == MAP_FAILED) {
        perror("mmap 失败");
        return timing;
    }
    t2 = get_time_ms();
    timing.mmap_time = t2 - t1;
    printf("3.1 内存映射(mmap): %.3f ms\n", timing.mmap_time);

    t1 = get_time_ms();

    // int dev;
    // cudaGetDevice(&dev);
    // cudaMemPrefetchAsync(uvm_buf, test_size, dev, 0);
    // cudaDeviceSynchronize();

    cudaMemcpy(uvm_buf, mmap_ptr, test_size, cudaMemcpyHostToDevice);
    cudaDeviceSynchronize();
    t2 = get_time_ms();
    timing.mmap2uvm_time = t2 - t1;
    timing.total_data_transfer_time = timing.mmap2uvm_time;
    printf("3. 数据传输(mmap->UVM): %.3f ms\n", timing.total_data_transfer_time);

    // 4. CPU访问UVM buffer，触发实际数据加载
    t1 = get_time_ms();
    volatile char sum = 0;
    for (size_t i = 0; i < test_size; i += 4096) {
        sum += uvm_buf[i];
    }
    t2 = get_time_ms();
    timing.cpu_touch_uvm_time = t2 - t1;
    timing.total_touch_time = timing.cpu_touch_uvm_time;
    printf("4. CPU访问UVM buffer触发数据加载: %.3f ms\n", timing.total_touch_time);

    // 5. 清理
    t1 = get_time_ms();
    munmap(mmap_ptr, test_size);
    cudaFree(uvm_buf);
    close(fd);
    t2 = get_time_ms();
    timing.cleanup_time = t2 - t1;
    printf("5. 清理: %.3f ms\n", timing.cleanup_time);

    timing.total_all = get_time_ms() - total_start;
    printf("总时间: %.3f ms\n", timing.total_all);

    double touch_bandwidth = (test_size / (1024.0*1024.0*1024.0)) / (timing.total_touch_time / 1000.0);
    printf("CPU读取UVM速度: %.2f GB/s\n", touch_bandwidth);

    return timing;
}


DetailedTiming test_mmapedFile_2_uvmG__gpu(const char* filepath, size_t test_size) {
    printf("\n=== 详细计时：mmaped文件到UVM，gpu访问 ===\n");
    DetailedTiming timing = {0};
    
    double total_start = get_time_ms();
    
    // 1. 文件打开
    double t1 = get_time_ms();
    int fd = open(filepath, O_RDONLY);
    if (fd < 0) {
        perror("文件打开失败");
        return timing;
    }
    double t2 = get_time_ms();
    timing.file_open_time = t2 - t1;
    printf("1. 文件打开: %.3f ms\n", timing.file_open_time);
    
    // 2. UVM内存分配
    t1 = get_time_ms();
    char* uvm_buf;
    cudaMallocManaged(&uvm_buf, test_size);
    t2 = get_time_ms();
    timing.cuda_malloc_uvm_time = t2 - t1;
    timing.total_malloc_time = timing.cuda_malloc_uvm_time;
    printf("2. UVM内存分配: %.3f ms\n", timing.total_malloc_time);
    
    // 3. 数据传输（read到UVM）
    t1 = get_time_ms();
    void* mmap_ptr = mmap(NULL, test_size, PROT_READ, MAP_PRIVATE, fd, 0);
    if (mmap_ptr == MAP_FAILED) {
        perror("mmap 失败");
        return timing;
    }
    t2 = get_time_ms();
    timing.mmap_time = t2 - t1;
    printf("3.1 内存映射(mmap): %.3f ms\n", timing.mmap_time);

    t1 = get_time_ms();

    int dev;
    cudaGetDevice(&dev);
    cudaMemPrefetchAsync(uvm_buf, test_size, dev, 0);
    cudaDeviceSynchronize();

    cudaMemcpy(uvm_buf, mmap_ptr, test_size, cudaMemcpyHostToDevice); // 不复制内容，只留映射


    cudaDeviceSynchronize(); // 确保UVM可被GPU访问
    t2 = get_time_ms();
    timing.mmap2uvm_time = t2 - t1;
    timing.total_data_transfer_time = timing.mmap2uvm_time;
    printf("3. 数据传输(mmap->UVM): %.3f ms\n", timing.total_data_transfer_time);

    // 假设 uvm_buf 已经 read 完成，看看gpu读取它的速度（这里的读取可能不涉及拷贝？）
    double t_gpu_access1 = get_time_ms();
    size_t page_size = 4096;
    size_t num_pages = (test_size + page_size - 1) / page_size;
    touch_gpu_buffer_page<<<(num_pages+255)/256, 256>>>((char*)uvm_buf, test_size, page_size);
    cudaDeviceSynchronize();
    double t_gpu_access2 = get_time_ms();
    timing.gpu_touch_UVM_time = t_gpu_access2 - t_gpu_access1; // 包含read和GPU访问
    timing.total_touch_time = timing.gpu_touch_UVM_time;
    printf("3.1 GPU访问mmap拷贝过来的UVM: %.3f ms\n", timing.gpu_touch_UVM_time);

    // 4. 清理
    t1 = get_time_ms();
    munmap(mmap_ptr, test_size);    // 这里不unmap会导致cpu access变快，因为不需要重新读数据？
    cudaFree(uvm_buf);
    close(fd);
    t2 = get_time_ms();
    timing.cleanup_time = t2 - t1;
    printf("4. 清理: %.3f ms\n", timing.cleanup_time);
    
    timing.total_all = get_time_ms() - total_start;
    printf("总时间: %.3f ms\n", timing.total_all);
    
    double bandwidth = (test_size / (1024.0*1024.0*1024.0)) / (timing.total_data_transfer_time / 1000.0);
    printf("数据传输带宽: %.2f GB/s\n", bandwidth);
    
    double touch_bandwidth = (test_size / (1024.0*1024.0*1024.0)) / (timing.total_touch_time / 1000.0);
    printf("gpu读取uvm带宽: %.2f GB/s\n", touch_bandwidth);

    return timing;
}


// cpu读取（算上加一计算）mmap文件
DetailedTiming test_mmapedFile__cpu(const char* filepath, size_t test_size) {
    printf("\n=== 详细计时：cpu读取（算上加一计算）mmap文件 ===\n");
    DetailedTiming timing = {0};
    
    double total_start = get_time_ms();
    
    // 1. 文件打开
    double t1 = get_time_ms();
    int fd = open(filepath, O_RDONLY);
    if (fd < 0) {
        perror("文件打开失败");
        return timing;
    }
    double t2 = get_time_ms();
    timing.file_open_time = t2 - t1;
    
    // 2. 创建mmap映射
    t1 = get_time_ms();
    void* mmap_ptr = mmap(NULL, test_size, PROT_READ, MAP_PRIVATE, fd, 0);
    if (mmap_ptr == MAP_FAILED) {
        perror("mmap 失败");
        return timing;
    }
    t2 = get_time_ms();
    timing.mmap_time = t2 - t1;
    timing.total_malloc_time = timing.mmap_time;
    
    // 3. 触发实际数据加载（通过CPU访问）
    t1 = get_time_ms();
    volatile char sum = 0;
    char* data = (char*)mmap_ptr;
    // 按页访问以触发页面加载
    for (size_t i = 0; i < test_size; i += 4096) {
        sum += data[i];
    }
    t2 = get_time_ms();
    timing.cpu_touch_sysBuf_time = t2 - t1;
    timing.total_touch_time = timing.cpu_touch_sysBuf_time;
    
    printf("1. 文件打开: %.3f ms\n", timing.file_open_time);
    printf("2. mmap映射: %.3f ms\n", timing.total_malloc_time);
    printf("3. cpu访问mmap内存触发数据加载: %.3f ms\n", timing.total_touch_time);
    
    // 4. 清理
    t1 = get_time_ms();
    munmap(mmap_ptr, test_size);
    close(fd);
    t2 = get_time_ms();
    timing.cleanup_time = t2 - t1;
    
    timing.total_all = get_time_ms() - total_start;
    printf("总时间: %.3f ms\n", timing.total_all);
    
    double touch_bandwidth = (test_size / (1024.0*1024.0*1024.0)) / (timing.total_touch_time / 1000.0);
    printf("CPU读取mmap带宽: %.2f GB/s\n", touch_bandwidth);
    
    return timing;
}

// 统一的打印函数
void print_timing_summary(const char* method, DetailedTiming timing) {
    printf("%-17s %6.2f   %6.2f   %6.2f   %6.2f  %6.2f ms\n",
        method,
        timing.file_open_time,
        timing.total_malloc_time,
        timing.total_data_transfer_time,
        timing.cleanup_time,
        timing.total_all
    );
}

void print_data_transfer_bw(const char* method, DetailedTiming timing, size_t test_size, double base_bw) {
    double bw = (test_size / (1024.0*1024.0*1024.0)) / (timing.total_data_transfer_time / 1000.0);
    printf("%-18s 数据传输: %.2f GB/s (%.2fx)\n", method, bw, bw / base_bw);
}

void print_total_bw(const char* method, DetailedTiming timing, size_t test_size, double base_bw) {
    double bw = (test_size / (1024.0*1024.0*1024.0)) / (timing.total_all / 1000.0);
    printf("%-18s 端到端: %.2f GB/s (%.2fx)\n", method, bw, bw / base_bw);
}


// 综合对比分析
void comprehensive_comparison(const char* filepath, size_t test_size) {
    printf("\n========================================\n");
    printf("=== 综合对比分析 (%.2f MB) ===\n", test_size / (1024.0 * 1024.0));
    printf("========================================\n");
    
    DetailedTiming traditional__gpu, traditional__CPU, cuFile_read__gpu;
    DetailedTiming mmapFile__gpu, mmapFile_warm__gpu;
    DetailedTiming mmapedFile__cpu;
    DetailedTiming cpuFd_2_uvm__gpu;

    DetailedTiming mmapedFile_2_uvmC__gpu, mmapedFile_2_uvmG__gpu;
    DetailedTiming mmapedFile_to_uvmC__cpu, mmapedFile_to_uvmG__cpu;
    // 测试1: 传统方法
    clear_caches();
    cudaFree(0); // 预热，初始化CUDA上下文
    traditional__gpu = test_traditional__gpu(filepath, test_size);
    
    // 测试1.5: 传统方法2
    clear_caches();
    cudaFree(0); // 预热，初始化CUDA上下文
    traditional__CPU = test_traditional__cpu(filepath, test_size); // 传统方法的CPU访问测试

    // 测试2: cuFile_read__gpu
    clear_caches();
    cudaFree(0); // 预热，初始化CUDA上下文
    cuFile_read__gpu = test_cuFile_read__gpu(filepath, test_size);
    
    // 测试3: mmap
    clear_caches();
    cudaFree(0); // 预热，初始化CUDA上下文
    mmapFile__gpu = test_mmapFile__gpu(filepath, test_size);

    clear_caches();
    cudaFree(0); // 预热，初始化CUDA上下文
    mmapFile_warm__gpu = test_mmapFile_warm__gpu(filepath, test_size);

    // 测试4: 
    clear_caches();
    cudaFree(0); // 预热，初始化CUDA上下文
    cpuFd_2_uvm__gpu = test_cpuFd_2_uvm__gpu(filepath, test_size);

    // 测试5: 
    clear_caches();
    cudaFree(0); // 预热，初始化CUDA上下文
    mmapedFile_to_uvmC__cpu = test_mmapedFile_to_uvmC__cpu(filepath, test_size);

    clear_caches();
    cudaFree(0); // 预热，初始化CUDA上下文
    mmapedFile_2_uvmC__gpu = test_mmapedFile_2_uvmC__gpu(filepath, test_size);

    // 测试6
    clear_caches();
    cudaFree(0); // 预热，初始化CUDA上下文
    // 带计算的（真正读取数据）
    mmapedFile_to_uvmG__cpu = test_mmapedFile_to_uvmG__cpu(filepath, test_size);

    // 测试5: 
    clear_caches();
    cudaFree(0); // 预热，初始化CUDA上下文
    mmapedFile_2_uvmG__gpu = test_mmapedFile_2_uvmG__gpu(filepath, test_size);

    // 测试7: 
    clear_caches();
    cudaFree(0); // 预热，初始化CUDA上下文
    mmapedFile__cpu = test_mmapedFile__cpu(filepath, test_size);

    // 总结对比
    printf("\n========================================\n");
    printf("=== 性能对比总结 ===\n");
    printf("========================================\n");
    printf("方法              文件打开  内存分配  数据传输  清理     总时间\n");
    printf("---------------------------------------------------------------\n");
    print_timing_summary("traditional__CPU", traditional__CPU);
    print_timing_summary("traditional__gpu", traditional__gpu);

    print_timing_summary("cuFile_read__gpu", cuFile_read__gpu);

    print_timing_summary("mmapedFile__cpu", mmapedFile__cpu);
    print_timing_summary("mmapFile__gpu", mmapFile__gpu);
    print_timing_summary("mmapedFile_warm__gpu", mmapFile_warm__gpu);

    print_timing_summary("cpuFd_2_uvm__gpu", cpuFd_2_uvm__gpu);

    print_timing_summary("mmapedFile_to_uvmC__cpu", mmapedFile_to_uvmC__cpu);
    print_timing_summary("mmapedFile_2_uvmC__gpu", mmapedFile_2_uvmC__gpu);

    print_timing_summary("mmapedFile_to_uvmG__cpu", mmapedFile_to_uvmG__cpu);
    print_timing_summary("mmapedFile_2_uvmG__gpu", mmapedFile_2_uvmG__gpu);


    printf("---------------------------------------------------------------\n");

    // 数据传输和端到端带宽对比
    double base_transfer_bw = (test_size / (1024.0*1024.0*1024.0)) / (traditional__gpu.total_data_transfer_time / 1000.0);
    double base_total_bw = (test_size / (1024.0*1024.0*1024.0)) / (traditional__gpu.total_all / 1000.0);
    
    // 纯数据传输性能对比
    printf("\n=== 纯数据传输性能对比 ===\n");
    print_data_transfer_bw("traditional__gpu", traditional__gpu, test_size, base_transfer_bw);
    print_data_transfer_bw("traditional__CPU", traditional__CPU, test_size, base_transfer_bw);

    print_data_transfer_bw("cuFile_read__gpu", cuFile_read__gpu, test_size, base_transfer_bw);
    print_data_transfer_bw("mmapFile__gpu", mmapFile__gpu, test_size, base_transfer_bw);
    print_data_transfer_bw("mmapFile_warm__gpu", mmapFile_warm__gpu, test_size, base_transfer_bw);

    print_data_transfer_bw("cpuFd_2_uvm__gpu", cpuFd_2_uvm__gpu, test_size, base_transfer_bw);
    print_data_transfer_bw("mmapedFile_2_uvmC__gpu", mmapedFile_2_uvmC__gpu, test_size, base_transfer_bw);
    print_data_transfer_bw("mmapedFile_2_uvmG__gpu", mmapedFile_2_uvmG__gpu, test_size, base_transfer_bw);

    print_data_transfer_bw("mmapedFile__cpu", mmapedFile__cpu, test_size, base_transfer_bw);
    print_data_transfer_bw("mmapedFile_to_uvmC__cpu", mmapedFile_to_uvmC__cpu, test_size, base_transfer_bw);
    print_data_transfer_bw("mmapedFile_to_uvmG__cpu", mmapedFile_to_uvmG__cpu, test_size, base_transfer_bw);

    // 端到端性能对比
    printf("\n=== 端到端性能对比 ===\n");
    print_total_bw("traditional__gpu", traditional__gpu, test_size, base_total_bw);
    print_total_bw("traditional__CPU", traditional__CPU, test_size, base_total_bw);

    print_total_bw("cuFile_read__gpu", cuFile_read__gpu, test_size, base_total_bw);
    print_total_bw("mmapFile__gpu", mmapFile__gpu, test_size, base_total_bw);
    print_total_bw("mmapedFile_warm__gpu", mmapFile_warm__gpu, test_size, base_total_bw);
    print_total_bw("cpuFd_2_uvm__gpu", cpuFd_2_uvm__gpu, test_size, base_total_bw);
    print_total_bw("mmapedFile_2_uvmC__gpu", mmapedFile_2_uvmC__gpu, test_size, base_total_bw);
    print_total_bw("mmapedFile_2_uvmG__gpu", mmapedFile_2_uvmG__gpu, test_size, base_total_bw);

    print_total_bw("mmapedFile__cpu", mmapedFile__cpu, test_size, base_total_bw);
    print_total_bw("mmapedFile_to_uvmC__cpu", mmapedFile_to_uvmC__cpu, test_size, base_total_bw);
    print_total_bw("mmapedFile_to_uvmG__cpu", mmapedFile_to_uvmG__cpu, test_size, base_total_bw);


    const char* csv_path = "timing_results.csv";
    save_timing_to_csv(csv_path, "traditional__CPU", traditional__CPU, test_size);
    save_timing_to_csv(csv_path, "traditional__gpu", traditional__gpu, test_size);

    save_timing_to_csv(csv_path, "cuFile_read__gpu", cuFile_read__gpu, test_size);

    save_timing_to_csv(csv_path, "mmapedFile__cpu", mmapedFile__cpu, test_size);    
    save_timing_to_csv(csv_path, "mmapFile__gpu", mmapFile__gpu, test_size);
    save_timing_to_csv(csv_path, "mmapedFile_warm__gpu", mmapFile_warm__gpu, test_size);

    save_timing_to_csv(csv_path, "cpuFd_2_uvm__gpu", cpuFd_2_uvm__gpu, test_size);

    save_timing_to_csv(csv_path, "mmapedFile_to_uvmC__cpu", mmapedFile_to_uvmC__cpu, test_size);
    save_timing_to_csv(csv_path, "mmapedFile_2_uvmC__gpu", mmapedFile_2_uvmC__gpu, test_size);

    save_timing_to_csv(csv_path, "mmapedFile_to_uvmG__cpu", mmapedFile_to_uvmG__cpu, test_size);
    save_timing_to_csv(csv_path, "mmapedFile_2_uvmG__gpu", mmapedFile_2_uvmG__gpu, test_size);

}

int main(int argc, char** argv) {
    printf("======================================\n");
    printf("分阶段详细计时的NVMe GDS性能测试\n");
    printf("======================================\n");
    
    if (argc < 2) {
        printf("用法: %s <测试路径> [文件大小MB]\n", argv[0]);
        printf("示例: %s /mnt/nvme_test 200\n", argv[0]);
        return 1;
    }
    
    const char* test_path = argv[1];
    float file_size_mb = (argc > 2) ? atof(argv[2]) : 100.0f;
    

    printf("测试路径: %s\n", test_path);
    printf("测试文件大小: %.1f MB\n", file_size_mb);
    printf("======================================\n");
    
    // 1. 检测存储设备
    detect_storage_type(test_path);
    
    // 2. 创建测试文件
    char test_file[512];
    snprintf(test_file, sizeof(test_file), "%s/nvme_gds_detailed_test.bin", test_path);
    
    if (!create_nvme_optimized_file(test_file, file_size_mb)) {
        return 1;
    }
    
    // 3. 初始化GDS
    printf("\n初始化GDS驱动...\n");
    CUfileError_t status = cuFileDriverOpen();
    if (status.err != CU_FILE_SUCCESS) {
        printf("✗ GDS驱动初始化失败: %d\n", status.err);
        return 1;
    }
    printf("✓ GDS驱动初始化成功\n");
    // 这里的驱动初始化也不能代表启用了硬件GDS加速。
    // 网上搜到一个可以用CUfileDriverProperties_t判断，但好像是新版本gds的api。
    // 目前就看盘是否是NVME以及看性能吧。64MB读取：
    // 普通盘其他三种方法0.05GB/s, cuFile_read__gpu 0.10GB/s，端到端跟纯传输都是这
    // NVME盘，传统方法和UVM的CPU读大概1GB/s；GDS和mmap大概1.5GB/s，端到端GDS减少到0.8GB/s，其他三个差不多


    // 4. 综合对比测试
    size_t test_size = 512 * 1024 * 1024; // 64MB测试
    comprehensive_comparison(test_file, test_size);
    
    // 5. 关闭GDS
    cuFileDriverClose();
    
    printf("\n======================================\n");
    printf("详细计时测试完成\n");
    printf("======================================\n");
    
    // 清理测试文件
    unlink(test_file);
    
    return 0;
}