#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <float.h>

// 定义复数结构体（与 cuComplex 兼容）
typedef struct {
    float x; // 实部
    float y; // 虚部
} Complex;

// 复数加法
__device__ Complex complexAdd(Complex a, Complex b) {
    Complex result;
    result.x = a.x + b.x;
    result.y = a.y + b.y;
    return result;
}

// 复数乘法
__device__ Complex complexMul(Complex a, Complex b) {
    Complex result;
    result.x = a.x * b.x - a.y * b.y;
    result.y = a.x * b.y + a.y * b.x;
    return result;
}

// 检查 CUDA 错误
#define CUDA_CHECK(err) do { \
    if (err != cudaSuccess) { \
        fprintf(stderr, "CUDA错误: %s 在 %s:%d\n", cudaGetErrorString(err), __FILE__, __LINE__); \
        exit(EXIT_FAILURE); \
    } \
} while(0)

// 定义常量
#define N 1536          // 矩阵对数量 (512 * 3)
#define ROWS 50         // 矩阵行数
#define COLS 1604       // 矩阵列数
#define BLOCK_SIZE 256  // 线程块大小

// 用户提供的矩阵乘法核函数
__global__ void matrixMultiplyKernel(Complex* A, Complex* B, Complex* C, int numMatrices) {
    const int tid = blockIdx.x * blockDim.x + threadIdx.x;
    const int elementsPerMatrix = ROWS * ROWS; // 50 * 50
    const int totalElements = numMatrices * elementsPerMatrix;
    
    if (tid >= totalElements) return;

    // 解析矩阵索引和行列位置
    const int matrixIdx = tid / elementsPerMatrix;
    const int localIdx = tid % elementsPerMatrix;
    const int row = localIdx / ROWS;
    const int col = localIdx % ROWS;

    Complex sum = {0.0f, 0.0f};
    for (int k = 0; k < COLS; ++k) { // COLS = 1604
        const Complex a = A[matrixIdx * ROWS * COLS + row * COLS + k];
        const Complex b = B[matrixIdx * COLS * ROWS + k * ROWS + col];
        sum = complexAdd(sum, complexMul(a, b));
    }
    C[matrixIdx * ROWS * ROWS + row * ROWS + col] = sum;
}

// 矩阵求逆核函数：使用 Gaussian-Jordan 消元法
__global__ void inverseKernel(Complex* C, Complex* D, int numMatrices) {
    int i = blockIdx.x; // 第 i 个矩阵
    if (i >= numMatrices) return;

    // 增广矩阵 [C | I]，存储在共享内存中
    __shared__ Complex aug[ROWS][2 * ROWS];
    int tid = threadIdx.x;

    // 加载数据到共享内存
    for (int t = tid; t < ROWS * ROWS; t += blockDim.x) {
        int row = t / ROWS;
        int col = t % ROWS;
        aug[row][col] = C[i * ROWS * ROWS + row * ROWS + col]; // 加载 C
        aug[row][ROWS + col] = (row == col) ? Complex{1.0f, 0.0f} : Complex{0.0f, 0.0f}; // 加载单位矩阵 I
    }
    __syncthreads();

    // Gaussian-Jordan 消元
    for (int k = 0; k < ROWS; k++) {
        if (tid == 0) {
            // 主元选择
            float max_val = 0.0f;
            int max_idx = k;
            for (int m = k; m < ROWS; m++) {
                float mag = sqrtf(aug[m][k].x * aug[m][k].x + aug[m][k].y * aug[m][k].y);
                if (mag > max_val) {
                    max_val = mag;
                    max_idx = m;
                }
            }
            // 行交换
            if (max_idx != k) {
                for (int j = 0; j < 2 * ROWS; j++) {
                    Complex temp = aug[k][j];
                    aug[k][j] = aug[max_idx][j];
                    aug[max_idx][j] = temp;
                }
            }
            // 归一化主元行
            Complex pivot = aug[k][k];
            float pivot_mag = sqrtf(pivot.x * pivot.x + pivot.y * pivot.y);
            Complex pivot_inv = {pivot.x / pivot_mag / pivot_mag, -pivot.y / pivot_mag / pivot_mag};
            for (int j = 0; j < 2 * ROWS; j++) {
                aug[k][j] = complexMul(aug[k][j], pivot_inv);
            }
        }
        __syncthreads();

        // 并行消元
        if (tid < ROWS && tid != k) {
            Complex factor = aug[tid][k];
            for (int j = 0; j < 2 * ROWS; j++) {
                Complex prod = complexMul(factor, aug[k][j]);
                aug[tid][j].x -= prod.x;
                aug[tid][j].y -= prod.y;
            }
        }
        __syncthreads();
    }

    // 拷贝结果到输出矩阵 D
    for (int t = tid; t < ROWS * ROWS; t += blockDim.x) {
        int row = t / ROWS;
        int col = t % ROWS;
        D[i * ROWS * ROWS + row * ROWS + col] = aug[row][ROWS + col];
    }
}

// 主函数
int main() {
    // 计算内存大小
    size_t size_A = N * ROWS * COLS * sizeof(Complex); // 输入矩阵 A
    size_t size_B = N * COLS * ROWS * sizeof(Complex); // 输入矩阵 B
    size_t size_C = N * ROWS * ROWS * sizeof(Complex); // 中间矩阵 C
    size_t size_D = N * ROWS * ROWS * sizeof(Complex); // 输出矩阵 D

    // 分配主机内存
    Complex *h_A = (Complex*)malloc(size_A);
    Complex *h_B = (Complex*)malloc(size_B);
    Complex *h_D = (Complex*)malloc(size_D);

    // 初始化输入数据（随机值，用户可替换为实际数据）
    for (size_t i = 0; i < N * ROWS * COLS; i++) {
        h_A[i].x = rand() / (float)RAND_MAX;
        h_A[i].y = rand() / (float)RAND_MAX;
    }
    for (size_t i = 0; i < N * COLS * ROWS; i++) {
        h_B[i].x = rand() / (float)RAND_MAX;
        h_B[i].y = rand() / (float)RAND_MAX;
    }

    // 分配设备内存
    Complex *d_A, *d_B, *d_C, *d_D;
    CUDA_CHECK(cudaMalloc(&d_A, size_A));
    CUDA_CHECK(cudaMalloc(&d_B, size_B));
    CUDA_CHECK(cudaMalloc(&d_C, size_C));
    CUDA_CHECK(cudaMalloc(&d_D, size_D));

    // 创建 CUDA 事件用于计时
    cudaEvent_t start_total, stop_total, start_h2d, stop_h2d, start_mul, stop_mul, start_inv, stop_inv, start_d2h, stop_d2h;
    CUDA_CHECK(cudaEventCreate(&start_total));
    CUDA_CHECK(cudaEventCreate(&stop_total));
    CUDA_CHECK(cudaEventCreate(&start_h2d));
    CUDA_CHECK(cudaEventCreate(&stop_h2d));
    CUDA_CHECK(cudaEventCreate(&start_mul));
    CUDA_CHECK(cudaEventCreate(&stop_mul));
    CUDA_CHECK(cudaEventCreate(&start_inv));
    CUDA_CHECK(cudaEventCreate(&stop_inv));
    CUDA_CHECK(cudaEventCreate(&start_d2h));
    CUDA_CHECK(cudaEventCreate(&stop_d2h));

    // 时间统计数组
    float times_h2d[1000], times_mul[1000], times_inv[1000], times_d2h[1000], times_total[1000];

    // 计算矩阵乘法所需的线程和块
    const int elementsPerMatrix = ROWS * ROWS;
    const int totalThreads = N * elementsPerMatrix; // 1536 * 50 * 50 = 3,840,000
    const int threadsPerBlock = 256;
    const int blocks = (totalThreads + threadsPerBlock - 1) / threadsPerBlock;

    // 运行 1000 次
    for (int run = 0; run < 1000; run++) {
        CUDA_CHECK(cudaEventRecord(start_total, 0));

        // 主机到设备拷贝
        CUDA_CHECK(cudaEventRecord(start_h2d, 0));
        CUDA_CHECK(cudaMemcpy(d_A, h_A, size_A, cudaMemcpyHostToDevice));
        CUDA_CHECK(cudaMemcpy(d_B, h_B, size_B, cudaMemcpyHostToDevice));
        CUDA_CHECK(cudaEventRecord(stop_h2d, 0));

        // 矩阵乘法
        CUDA_CHECK(cudaEventRecord(start_mul, 0));
        matrixMultiplyKernel<<<blocks, threadsPerBlock>>>(d_A, d_B, d_C, N);
        CUDA_CHECK(cudaGetLastError());
        CUDA_CHECK(cudaEventRecord(stop_mul, 0));

        // 矩阵求逆
        CUDA_CHECK(cudaEventRecord(start_inv, 0));
        inverseKernel<<<N, BLOCK_SIZE>>>(d_C, d_D, N);
        CUDA_CHECK(cudaGetLastError());
        CUDA_CHECK(cudaEventRecord(stop_inv, 0));

        // 设备到主机拷贝
        CUDA_CHECK(cudaEventRecord(start_d2h, 0));
        CUDA_CHECK(cudaMemcpy(h_D, d_D, size_D, cudaMemcpyDeviceToHost));
        CUDA_CHECK(cudaEventRecord(stop_d2h, 0));

        CUDA_CHECK(cudaEventRecord(stop_total, 0));
        CUDA_CHECK(cudaEventSynchronize(stop_total));

        // 计算每次运行的时间
        float t_h2d, t_mul, t_inv, t_d2h, t_total;
        CUDA_CHECK(cudaEventElapsedTime(&t_h2d, start_h2d, stop_h2d));
        CUDA_CHECK(cudaEventElapsedTime(&t_mul, start_mul, stop_mul));
        CUDA_CHECK(cudaEventElapsedTime(&t_inv, start_inv, stop_inv));
        CUDA_CHECK(cudaEventElapsedTime(&t_d2h, start_d2h, stop_d2h));
        CUDA_CHECK(cudaEventElapsedTime(&t_total, start_total, stop_total));

        times_h2d[run] = t_h2d;
        times_mul[run] = t_mul;
        times_inv[run] = t_inv;
        times_d2h[run] = t_d2h;
        times_total[run] = t_total;

        // 实时打印每次运行的时间
        printf("运行 %d: 拷贝H2D: %.3f ms, 乘法: %.3f ms, 求逆: %.3f ms, 拷贝D2H: %.3f ms, 总时间: %.3f ms\n",
               run, t_h2d, t_mul, t_inv, t_d2h, t_total);
    }

    // 计算统计信息
    float avg_h2d = 0, avg_mul = 0, avg_inv = 0, avg_d2h = 0, avg_total = 0;
    float max_h2d = 0, max_mul = 0, max_inv = 0, max_d2h = 0, max_total = 0;
    float min_h2d = FLT_MAX, min_mul = FLT_MAX, min_inv = FLT_MAX, min_d2h = FLT_MAX, min_total = FLT_MAX;

    for (int i = 0; i < 1000; i++) {
        avg_h2d += times_h2d[i];
        avg_mul += times_mul[i];
        avg_inv += times_inv[i];
        avg_d2h += times_d2h[i];
        avg_total += times_total[i];

        max_h2d = fmaxf(max_h2d, times_h2d[i]);
        max_mul = fmaxf(max_mul, times_mul[i]);
        max_inv = fmaxf(max_inv, times_inv[i]);
        max_d2h = fmaxf(max_d2h, times_d2h[i]);
        max_total = fmaxf(max_total, times_total[i]);

        min_h2d = fminf(min_h2d, times_h2d[i]);
        min_mul = fminf(min_mul, times_mul[i]);
        min_inv = fminf(min_inv, times_inv[i]);
        min_d2h = fminf(min_d2h, times_d2h[i]);
        min_total = fminf(min_total, times_total[i]);
    }

    avg_h2d /= 1000;
    avg_mul /= 1000;
    avg_inv /= 1000;
    avg_d2h /= 1000;
    avg_total /= 1000;

    // 打印统计结果
    printf("\n统计结果（基于1000次运行）：\n");
    printf("拷贝H2D - 平均: %.3f ms, 最大: %.3f ms, 最小: %.3f ms\n", avg_h2d, max_h2d, min_h2d);
    printf("矩阵乘法 - 平均: %.3f ms, 最大: %.3f ms, 最小: %.3f ms\n", avg_mul, max_mul, min_mul);
    printf("矩阵求逆 - 平均: %.3f ms, 最大: %.3f ms, 最小: %.3f ms\n", avg_inv, max_inv, min_inv);
    printf("拷贝D2H - 平均: %.3f ms, 最大: %.3f ms, 最小: %.3f ms\n", avg_d2h, max_d2h, min_d2h);
    printf("总时间 - 平均: %.3f ms, 最大: %.3f ms, 最小: %.3f ms\n", avg_total, max_total, min_total);

    // 释放内存
    free(h_A);
    free(h_B);
    free(h_D);
    CUDA_CHECK(cudaFree(d_A));
    CUDA_CHECK(cudaFree(d_B));
    CUDA_CHECK(cudaFree(d_C));
    CUDA_CHECK(cudaFree(d_D));

    // 销毁事件
    CUDA_CHECK(cudaEventDestroy(start_total));
    CUDA_CHECK(cudaEventDestroy(stop_total));
    CUDA_CHECK(cudaEventDestroy(start_h2d));
    CUDA_CHECK(cudaEventDestroy(stop_h2d));
    CUDA_CHECK(cudaEventDestroy(start_mul));
    CUDA_CHECK(cudaEventDestroy(stop_mul));
    CUDA_CHECK(cudaEventDestroy(start_inv));
    CUDA_CHECK(cudaEventDestroy(stop_inv));
    CUDA_CHECK(cudaEventDestroy(start_d2h));
    CUDA_CHECK(cudaEventDestroy(stop_d2h));

    return 0;
}