#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <time.h>
#include "prof.h"
#include <pthread.h>
#include <unistd.h>

// 错误处理宏：检查 CUDA 函数返回值
#define CUDA_CHECK(call) \
    do { \
        cudaError_t err = call; \
        if (err != cudaSuccess) { \
            fprintf(stderr, "CUDA错误 %s:%d 错误码=%d(%s) \"%s\"\n", \
                    __FILE__, __LINE__, err, cudaGetErrorString(err), #call); \
            exit(EXIT_FAILURE); \
        } \
    } while (0)

// 错误处理宏：检查 cuBLAS 函数返回值
#define CUBLAS_CHECK(call) \
    do { \
        cublasStatus_t status = call; \
        if (status != CUBLAS_STATUS_SUCCESS) { \
            fprintf(stderr, "cuBLAS错误 %s:%d 错误码=%d\n", \
                    __FILE__, __LINE__, status); \
            exit(EXIT_FAILURE); \
        } \
    } while (0)

#define RAND_MAX 2147483647

// 生成随机浮点数的函数
float rand_float() {
    return (float)rand() / RAND_MAX;
}

// im2col 函数（CPU 版本，支持批量处理）- 实数
void im2col_cpu_real_batch(const float* data_im, int batch_size, int channels, int height, int width,
                           int ksize, int stride, float* data_col) {
    int height_col = (height - ksize) / stride + 1; // 输出高度
    int width_col = (width - ksize) / stride + 1;   // 输出宽度
    int channels_col = channels * ksize * ksize;    // 展平后的通道数

    for (int b = 0; b < batch_size; b++) {
        const float* data_im_batch = data_im + b * channels * height * width;
        float* data_col_batch = data_col + b * channels_col * height_col * width_col;
        for (int c = 0; c < channels_col; ++c) {
            int w_offset = c % ksize;
            int h_offset = (c / ksize) % ksize;
            int c_im = c / ksize / ksize;
            for (int h = 0; h < height_col; ++h) {
                for (int w = 0; w < width_col; ++w) {
                    int h_pad = h * stride + h_offset;
                    int w_pad = w * stride + w_offset;
                    if (h_pad >= 0 && h_pad < height && w_pad >= 0 && w_pad < width) {
                        data_col_batch[(c * height_col + h) * width_col + w] =
                            data_im_batch[(c_im * height + h_pad) * width + w_pad];
                    } else {
                        data_col_batch[(c * height_col + h) * width_col + w] = 0;
                    }
                }
            }
        }
    }
}

// 实数卷积测试（批量处理版本）
void test_real_conv_batch(int batch_size, int height, int width, int channels, int ksize, int stride, int n) {
    int height_out = (height - ksize) / stride + 1;       // 输出高度
    int width_out = (width - ksize) / stride + 1;         // 输出宽度
    int kernel_size = ksize * ksize;                      // 卷积核大小
    int output_size = height_out * width_out;             // 输出矩阵大小

    // 分配主机内存
    float* h_im = (float*)malloc(batch_size * channels * height * width * sizeof(float)); // 输入图像
    float* h_kernel = (float*)malloc(channels * kernel_size * sizeof(float));            // 卷积核
    float* h_col = (float*)malloc(batch_size * channels * kernel_size * output_size * sizeof(float)); // im2col 结果
    float* h_out = (float*)malloc(batch_size * output_size * sizeof(float));             // 输出结果

    // 初始化随机数据
    srand(time(NULL));
    for (int i = 0; i < batch_size * channels * height * width; ++i) {
        h_im[i] = rand_float();
    }
    for (int i = 0; i < channels * kernel_size; ++i) {
        h_kernel[i] = rand_float();
    }

    // 执行 im2col 转换
    im2col_cpu_real_batch(h_im, batch_size, channels, height, width, ksize, stride, h_col);

    // 分配设备内存
    float *d_im, *d_kernel, *d_col, *d_out;
    CUDA_CHECK(cudaMalloc(&d_im, batch_size * channels * height * width * sizeof(float)));
    CUDA_CHECK(cudaMalloc(&d_kernel, channels * kernel_size * sizeof(float)));
    CUDA_CHECK(cudaMalloc(&d_col, batch_size * channels * kernel_size * output_size * sizeof(float)));
    CUDA_CHECK(cudaMalloc(&d_out, batch_size * output_size * sizeof(float)));

    // 数据从主机拷贝到设备
    CUDA_CHECK(cudaMemcpy(d_im, h_im, batch_size * channels * height * width * sizeof(float), cudaMemcpyHostToDevice));
    CUDA_CHECK(cudaMemcpy(d_kernel, h_kernel, channels * kernel_size * sizeof(float), cudaMemcpyHostToDevice));
    CUDA_CHECK(cudaMemcpy(d_col, h_col, batch_size * channels * kernel_size * output_size * sizeof(float), cudaMemcpyHostToDevice));

    // 创建 cuBLAS 句柄
    cublasHandle_t handle;
    CUBLAS_CHECK(cublasCreate(&handle));

    // 准备批处理指针数组
    float** h_col_array = (float**)malloc(batch_size * sizeof(float*));   // im2col 数据的指针数组
    float** h_out_array = (float**)malloc(batch_size * sizeof(float*));   // 输出数据的指针数组
    for (int b = 0; b < batch_size; b++) {
        h_col_array[b] = d_col + b * channels * kernel_size * output_size;
        h_out_array[b] = d_out + b * output_size;
    }
    float **d_col_array, **d_out_array;
    CUDA_CHECK(cudaMalloc(&d_col_array, batch_size * sizeof(float*)));
    CUDA_CHECK(cudaMalloc(&d_out_array, batch_size * sizeof(float*)));
    CUDA_CHECK(cudaMemcpy(d_col_array, h_col_array, batch_size * sizeof(float*), cudaMemcpyHostToDevice));
    CUDA_CHECK(cudaMemcpy(d_out_array, h_out_array, batch_size * sizeof(float*), cudaMemcpyHostToDevice));

    // 准备卷积核的批处理指针数组（所有批次共享同一个卷积核）
    float** h_kernel_array = (float**)malloc(batch_size * sizeof(float*));
    for (int b = 0; b < batch_size; b++) {
        h_kernel_array[b] = d_kernel;
    }
    float** d_kernel_array;
    CUDA_CHECK(cudaMalloc(&d_kernel_array, batch_size * sizeof(float*)));
    CUDA_CHECK(cudaMemcpy(d_kernel_array, h_kernel_array, batch_size * sizeof(float*), cudaMemcpyHostToDevice));

    // 运行 n 次并统计时间
    for (int i = 0; i < n; ++i) {
        // 数据从主机拷贝到设备
        profEnterSect(1);
        CUDA_CHECK(cudaMemcpy(d_im, h_im, batch_size * channels * height * width * sizeof(float), cudaMemcpyHostToDevice));
        CUDA_CHECK(cudaMemcpy(d_kernel, h_kernel, channels * kernel_size * sizeof(float), cudaMemcpyHostToDevice));
        CUDA_CHECK(cudaMemcpy(d_col, h_col, batch_size * channels * kernel_size * output_size * sizeof(float), cudaMemcpyHostToDevice));
        profLeaveSect(1);

        // 定义矩阵乘法参数
        float alpha = 1.0f;
        float beta = 0.0f;

        // 执行批量矩阵乘法（卷积）
        profEnterSect(2);
        CUBLAS_CHECK(cublasSgemmBatched(handle, CUBLAS_OP_N, CUBLAS_OP_N,
                                        output_size, 1, channels * kernel_size,
                                        &alpha,
                                        (const float**)d_col_array, output_size,
                                        (const float**)d_kernel_array, channels * kernel_size,
                                        &beta,
                                        d_out_array, output_size,
                                        batch_size));
        CUDA_CHECK(cudaDeviceSynchronize());
        profLeaveSect(2);

        // 将结果从设备拷贝回主机
        profEnterSect(3);
        CUDA_CHECK(cudaMemcpy(h_out, d_out, batch_size * output_size * sizeof(float), cudaMemcpyDeviceToHost));
        profLeaveSect(3);
    }

    // 清理资源
    CUDA_CHECK(cudaFree(d_im));
    CUDA_CHECK(cudaFree(d_kernel));
    CUDA_CHECK(cudaFree(d_col));
    CUDA_CHECK(cudaFree(d_out));
    CUDA_CHECK(cudaFree(d_col_array));
    CUDA_CHECK(cudaFree(d_out_array));
    CUDA_CHECK(cudaFree(d_kernel_array));
    CUBLAS_CHECK(cublasDestroy(handle));
    free(h_im);
    free(h_kernel);
    free(h_col);
    free(h_out);
    free(h_col_array);
    free(h_out_array);
    free(h_kernel_array);
}

int main(int argc, char** argv) {
    // 设置参数
    int batch_size = 4;      // 批量大小
    int height = 1024;       // 输入矩阵高度
    int width = 64;          // 输入矩阵宽度
    int channels = 1;        // 输入通道数
    int ksize = 3;           // 卷积核尺寸（3x3）
    int stride = 1;          // 卷积步幅
    int n;                   // 运行次数，由用户输入

    // 获取运行次数 n
    if (argc != 2) {
        fprintf(stderr, "用法: %s <运行次数>\n", argv[0]);
        return 1;
    }
    n = atoi(argv[1]);
    if (n <= 0) {
        fprintf(stderr, "运行次数必须为正整数。\n");
        return 1;
    }

    // 初始化性能分析
    profInit();
    profEnterSect(0);
    profSetSectName(1, "conv htod_time");         // 主机到设备拷贝时间
    profSetSectName(2, "real_conv 实数卷积时间"); // 卷积计算时间
    profSetSectName(3, "conv dtoh_time");         // 设备到主机拷贝时间

    // 测试实数卷积
    test_real_conv_batch(batch_size, height, width, channels, ksize, stride, n);

    profLeaveSect(0);
    profPrintInfo();

    return 0;
}