#include <cuda_runtime.h>
#include <cuda_fp16.h>  // 支持__half类型
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <signal.h>     // 用于信号处理（Ctrl+C）

// 检查CUDA调用是否成功的宏
#define CUDA_CHECK(call) \
    do { \
        cudaError_t err = call; \
        if (err != cudaSuccess) { \
            fprintf(stderr, "CUDA错误: %s at %s:%d\n", \
                    cudaGetErrorString(err), __FILE__, __LINE__); \
            exit(EXIT_FAILURE); \
        } \
    } while (0)

// 定义常量内存，用于内核函数访问
__constant__ float constFloatData[1024];
__constant__ double constDoubleData[1024];
__constant__ char constInt8Data[1024];
__constant__ __half constHalfData[1024];

// 全局变量，用于跟踪和释放GPU资源
static float* d_fp32_data = NULL;
static double* d_fp64_data = NULL;
static char* d_int8_data = NULL;
static __half* d_fp16_data = NULL;
static cudaEvent_t start_event, stop_event;
static int resource_allocated = 0;  // 标记资源是否已分配
static volatile int running = 1;    // 用于信号处理控制循环

// 信号处理函数，处理Ctrl+C退出并释放资源
void cleanup(int signum) {
    running = 0;  // 停止性能测试循环
    printf("\n接收到Ctrl+C信号，正在释放GPU资源...\n");
    if (resource_allocated) {
        if (d_fp32_data) CUDA_CHECK(cudaFree(d_fp32_data));
        if (d_fp64_data) CUDA_CHECK(cudaFree(d_fp64_data));
        if (d_int8_data) CUDA_CHECK(cudaFree(d_int8_data));
        if (d_fp16_data) CUDA_CHECK(cudaFree(d_fp16_data));
        CUDA_CHECK(cudaEventDestroy(start_event));
        CUDA_CHECK(cudaEventDestroy(stop_event));
        CUDA_CHECK(cudaDeviceReset());
        resource_allocated = 0;
    }
    printf("GPU资源已释放，程序退出。\n");
    exit(0);
}

// 打印所有GPU属性的函数
void printDeviceInfo(int deviceID) {
    cudaDeviceProp prop;
    CUDA_CHECK(cudaGetDeviceProperties(&prop, deviceID));

    printf("=== 设备信息 ===\n");
    printf("设备名称: %s\n", prop.name);
    printf("总全局内存: %ld MB\n", prop.totalGlobalMem / (1024 * 1024));
    printf("共享内存 per block: %ld KB\n", prop.sharedMemPerBlock / 1024);
    printf("寄存器 per block: %d\n", prop.regsPerBlock);
    printf("线程块中的最大线程数: %d\n", prop.maxThreadsPerBlock);
    printf("多处理器数量: %d\n", prop.multiProcessorCount);
    printf("时钟频率: %d MHz\n", prop.clockRate / 1000);
    printf("计算能力: %d.%d\n", prop.major, prop.minor);
    printf("全局内存总线宽度: %d bits\n", prop.memoryBusWidth);
    printf("L2缓存大小: %d KB\n", prop.l2CacheSize / 1024);
    printf("每个多处理器的最大线程数: %d\n", prop.maxThreadsPerMultiProcessor);
    printf("最大网格维度: [%d, %d, %d]\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
    printf("最大块维度: [%d, %d, %d]\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
    printf("内存时钟频率: %d MHz\n", prop.memoryClockRate / 1000);
    printf("设备是否支持并发内核执行: %d\n", prop.concurrentKernels);
    printf("设备是否支持异步内存拷贝: %d\n", prop.asyncEngineCount > 0);
    printf("设备是否是集成GPU: %d\n", prop.integrated);
    printf("设备是否支持主机内存映射: %d\n", prop.canMapHostMemory);
    printf("ECC支持: %d\n", prop.ECCEnabled);
    printf("===============\n");
}

// FP32内核函数：无条件判断，优化性能
__global__ void fp32Kernel(float* data) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    float result = data[idx];
    for (int i = 0; i < 10000; i++) {
        result += constFloatData[i % 1024];
    }
    data[idx] = result;
}

// FP64内核函数：无条件判断
__global__ void fp64Kernel(double* data) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    double result = data[idx];
    for (int i = 0; i < 10000; i++) {
        result += constDoubleData[i % 1024];
    }
    data[idx] = result;
}

// int8内核函数：无条件判断
__global__ void int8Kernel(char* data) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    char result = data[idx];
    for (int i = 0; i < 10000; i++) {
        result += constInt8Data[i % 1024];
    }
    data[idx] = result;
}

// FP16内核函数：无条件判断
__global__ void fp16Kernel(__half* data) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    __half result = data[idx];
    for (int i = 0; i < 10000; i++) {
        result = __hadd(result, constHalfData[i % 1024]);
    }
    data[idx] = result;
}

// 测试FP32性能
double testFP32Performance(int threadsPerBlock, int testTimeSeconds) {
    const int size = 1024 * 1024 * 10; // 10M个元素
    CUDA_CHECK(cudaMalloc(&d_fp32_data, size * sizeof(float)));

    dim3 blockDim(threadsPerBlock);
    dim3 gridDim((size + blockDim.x - 1) / blockDim.x);

    CUDA_CHECK(cudaEventCreate(&start_event));
    CUDA_CHECK(cudaEventCreate(&stop_event));
    resource_allocated = 1;

    // 预热GPU
    fp32Kernel<<<gridDim, blockDim>>>(d_fp32_data);
    CUDA_CHECK(cudaDeviceSynchronize());

    // 开始计时
    CUDA_CHECK(cudaEventRecord(start_event));
    int iterations = 0;
    clock_t startTime = clock();
    while (((clock() - startTime) / CLOCKS_PER_SEC < testTimeSeconds) && running) {
        fp32Kernel<<<gridDim, blockDim>>>(d_fp32_data);
        iterations++;
    }
    CUDA_CHECK(cudaEventRecord(stop_event));
    CUDA_CHECK(cudaEventSynchronize(stop_event));

    float milliseconds = 0;
    CUDA_CHECK(cudaEventElapsedTime(&milliseconds, start_event, stop_event));

    // 计算性能
    long long operations = (long long)size * 10000 * iterations;
    double seconds = milliseconds / 1000.0;
    double flops = operations / seconds;
    double tflops = flops / 1e12;

    CUDA_CHECK(cudaFree(d_fp32_data));
    d_fp32_data = NULL;
    return tflops;
}

// 测试FP64性能
double testFP64Performance(int threadsPerBlock, int testTimeSeconds) {
    const int size = 1024 * 1024 * 10;
    CUDA_CHECK(cudaMalloc(&d_fp64_data, size * sizeof(double)));

    dim3 blockDim(threadsPerBlock);
    dim3 gridDim((size + blockDim.x - 1) / blockDim.x);

    // 预热GPU
    fp64Kernel<<<gridDim, blockDim>>>(d_fp64_data);
    CUDA_CHECK(cudaDeviceSynchronize());

    // 开始计时
    CUDA_CHECK(cudaEventRecord(start_event));
    int iterations = 0;
    clock_t startTime = clock();
    while (((clock() - startTime) / CLOCKS_PER_SEC < testTimeSeconds) && running) {
        fp64Kernel<<<gridDim, blockDim>>>(d_fp64_data);
        iterations++;
    }
    CUDA_CHECK(cudaEventRecord(stop_event));
    CUDA_CHECK(cudaEventSynchronize(stop_event));

    float milliseconds = 0;
    CUDA_CHECK(cudaEventElapsedTime(&milliseconds, start_event, stop_event));

    long long operations = (long long)size * 10000 * iterations;
    double seconds = milliseconds / 1000.0;
    double flops = operations / seconds;
    double tflops = flops / 1e12;

    CUDA_CHECK(cudaFree(d_fp64_data));
    d_fp64_data = NULL;
    return tflops;
}

// 测试int8性能
double testInt8Performance(int threadsPerBlock, int testTimeSeconds) {
    const int size = 1024 * 1024 * 10;
    CUDA_CHECK(cudaMalloc(&d_int8_data, size * sizeof(char)));

    dim3 blockDim(threadsPerBlock);
    dim3 gridDim((size + blockDim.x - 1) / blockDim.x);

    // 预热GPU
    int8Kernel<<<gridDim, blockDim>>>(d_int8_data);
    CUDA_CHECK(cudaDeviceSynchronize());

    // 开始计时
    CUDA_CHECK(cudaEventRecord(start_event));
    int iterations = 0;
    clock_t startTime = clock();
    while (((clock() - startTime) / CLOCKS_PER_SEC < testTimeSeconds) && running) {
        int8Kernel<<<gridDim, blockDim>>>(d_int8_data);
        iterations++;
    }
    CUDA_CHECK(cudaEventRecord(stop_event));
    CUDA_CHECK(cudaEventSynchronize(stop_event));

    float milliseconds = 0;
    CUDA_CHECK(cudaEventElapsedTime(&milliseconds, start_event, stop_event));

    long long operations = (long long)size * 10000 * iterations;
    double seconds = milliseconds / 1000.0;
    double flops = operations / seconds;
    double tflops = flops / 1e12;

    CUDA_CHECK(cudaFree(d_int8_data));
    d_int8_data = NULL;
    return tflops;
}

// 测试FP16性能
double testFP16Performance(int threadsPerBlock, int testTimeSeconds) {
    const int size = 1024 * 1024 * 10;
    CUDA_CHECK(cudaMalloc(&d_fp16_data, size * sizeof(__half)));

    dim3 blockDim(threadsPerBlock);
    dim3 gridDim((size + blockDim.x - 1) / blockDim.x);

    // 预热GPU
    fp16Kernel<<<gridDim, blockDim>>>(d_fp16_data);
    CUDA_CHECK(cudaDeviceSynchronize());

    // 开始计时
    CUDA_CHECK(cudaEventRecord(start_event));
    int iterations = 0;
    clock_t startTime = clock();
    while (((clock() - startTime) / CLOCKS_PER_SEC < testTimeSeconds) && running) {
        fp16Kernel<<<gridDim, blockDim>>>(d_fp16_data);
        iterations++;
    }
    CUDA_CHECK(cudaEventRecord(stop_event));
    CUDA_CHECK(cudaEventSynchronize(stop_event));

    float milliseconds = 0;
    CUDA_CHECK(cudaEventElapsedTime(&milliseconds, start_event, stop_event));

    long long operations = (long long)size * 10000 * iterations;
    double seconds = milliseconds / 1000.0;
    double flops = operations / seconds;
    double tflops = flops / 1e12;

    CUDA_CHECK(cudaFree(d_fp16_data));
    d_fp16_data = NULL;
    return tflops;
}

int main(int argc, char** argv) {
 
    // 设置设备
    int deviceID = 0;
    CUDA_CHECK(cudaSetDevice(deviceID));

    // 打印设备信息
    printDeviceInfo(deviceID);

    return 0;
}