#include <cuda_runtime.h>
#include <cuComplex.h>
#include <stdio.h>
#include <stdlib.h>
#include <float.h>

// 定义常量
#define N 1536          // 矩阵对数量 (512 * 3)
#define ROWS 50         // 矩阵行数
#define COLS 1604       // 矩阵列数
#define TILE_WIDTH 32   // 矩阵乘法分块宽度
#define BLOCK_SIZE 256  // 求逆内核的线程块大小

// 检查 CUDA 错误
#define CUDA_CHECK(err) do { \
    if (err != cudaSuccess) { \
        fprintf(stderr, "CUDA错误: %s 在 %s:%d\n", cudaGetErrorString(err), __FILE__, __LINE__); \
        exit(EXIT_FAILURE); \
    } \
} while(0)

// 矩阵乘法内核：计算 [50x1604] * [1604x50]，避免 for 循环
__global__ void matmul_kernel(const cuComplex* A, const cuComplex* B, cuComplex* C) {
    int i = blockIdx.x; // 第 i 个矩阵对
    int row = blockIdx.y * blockDim.y + threadIdx.y; // 结果矩阵的行
    int col = blockIdx.z * blockDim.x + threadIdx.x; // 结果矩阵的列

    if (row < ROWS && col < ROWS) {
        // 分配共享内存，用于缓存 A 和 B 的分块
        __shared__ cuComplex A_shared[TILE_WIDTH][TILE_WIDTH];
        __shared__ cuComplex B_shared[TILE_WIDTH][TILE_WIDTH];

        cuComplex sum = make_cuComplex(0.0f, 0.0f); // 累加和
        int base_idx_A = i * ROWS * COLS + row * COLS; // A 的基地址
        int base_idx_B = i * COLS * ROWS + col;        // B 的基地址

        // 分块计算，COLS=1604 分成多个 TILE_WIDTH=32 的块
        int num_tiles = (COLS + TILE_WIDTH - 1) / TILE_WIDTH;
        int tile_idx = threadIdx.x / TILE_WIDTH; // 当前线程负责的 tile
        int local_k = threadIdx.x % TILE_WIDTH;  // tile 内的偏移

        // 使用线程并行性替代 for 循环
        while (tile_idx < num_tiles) {
            int k = tile_idx * TILE_WIDTH + local_k;
            // 加载数据到共享内存，边界检查
            if (k < COLS) {
                A_shared[threadIdx.y][local_k] = A[base_idx_A + k];
                B_shared[local_k][threadIdx.x] = B[base_idx_B + k * ROWS];
            } else {
                A_shared[threadIdx.y][local_k] = make_cuComplex(0.0f, 0.0f);
                B_shared[local_k][threadIdx.x] = make_cuComplex(0.0f, 0.0f);
            }
            __syncthreads(); // 同步，确保共享内存加载完成

            // 计算当前 tile 的贡献
            for (int m = 0; m < TILE_WIDTH; m++) {
                sum = cuCaddf(sum, cuCmulf(A_shared[threadIdx.y][m], B_shared[m][threadIdx.x]));
            }
            __syncthreads(); // 同步，准备下一轮加载

            tile_idx += blockDim.x / TILE_WIDTH; // 移动到下一个 tile
        }

        // 写入结果
        C[i * ROWS * ROWS + row * ROWS + col] = sum;
    }
}

// 矩阵求逆内核：对 50x50 矩阵求逆，使用 Gaussian-Jordan 消元
__global__ void inverse_kernel(const cuComplex* C, cuComplex* D) {
    int i = blockIdx.x; // 第 i 个矩阵
    if (i >= N) return;

    // 增广矩阵 [C | I]，存储在共享内存中
    __shared__ cuComplex aug[ROWS][2 * ROWS];
    int tid = threadIdx.x;

    // 加载数据到共享内存，每个线程处理多个元素
    for (int t = tid; t < ROWS * ROWS; t += blockDim.x) {
        int row = t / ROWS;
        int col = t % ROWS;
        aug[row][col] = C[i * ROWS * ROWS + row * ROWS + col]; // 加载 C
        aug[row][ROWS + col] = (row == col) ? make_cuComplex(1.0f, 0.0f) : make_cuComplex(0.0f, 0.0f); // 加载单位矩阵 I
    }
    __syncthreads();

    // Gaussian-Jordan 消元
    for (int k = 0; k < ROWS; k++) {
        // 主线程选择主元并归一化
        if (tid == 0) {
            float max_val = 0.0f;
            int max_idx = k;
            for (int m = k; m < ROWS; m++) {
                float mag = cuCabsf(aug[m][k]);
                if (mag > max_val) {
                    max_val = mag;
                    max_idx = m;
                }
            }
            // 行交换
            if (max_idx != k) {
                for (int j = 0; j < 2 * ROWS; j++) {
                    cuComplex temp = aug[k][j];
                    aug[k][j] = aug[max_idx][j];
                    aug[max_idx][j] = temp;
                }
            }
            // 归一化当前行
            cuComplex pivot = aug[k][k];
            for (int j = 0; j < 2 * ROWS; j++) {
                aug[k][j] = cuCdivf(aug[k][j], pivot);
            }
        }
        __syncthreads();

        // 并行消元
        if (tid < ROWS && tid != k) {
            cuComplex factor = aug[tid][k];
            for (int j = 0; j < 2 * ROWS; j++) {
                aug[tid][j] = cuCsubf(aug[tid][j], cuCmulf(factor, aug[k][j]));
            }
        }
        __syncthreads();
    }

    // 拷贝结果到输出矩阵 D
    for (int t = tid; t < ROWS * ROWS; t += blockDim.x) {
        int row = t / ROWS;
        int col = t % ROWS;
        D[i * ROWS * ROWS + row * ROWS + col] = aug[row][ROWS + col];
    }
}

// 主函数
int main() {
    // 计算内存大小
    size_t size_A = N * ROWS * COLS * sizeof(cuComplex); // 输入矩阵 A
    size_t size_B = N * COLS * ROWS * sizeof(cuComplex); // 输入矩阵 B
    size_t size_C = N * ROWS * ROWS * sizeof(cuComplex); // 中间矩阵 C
    size_t size_D = N * ROWS * ROWS * sizeof(cuComplex); // 输出矩阵 D

    // 分配主机内存
    cuComplex *h_A = (cuComplex*)malloc(size_A);
    cuComplex *h_B = (cuComplex*)malloc(size_B);
    cuComplex *h_D = (cuComplex*)malloc(size_D);

    // 初始化输入数据（随机值，用户可替换为实际数据）
    for (size_t i = 0; i < N * ROWS * COLS; i++) {
        h_A[i] = make_cuComplex(rand() / (float)RAND_MAX, rand() / (float)RAND_MAX);
    }
    for (size_t i = 0; i < N * COLS * ROWS; i++) {
        h_B[i] = make_cuComplex(rand() / (float)RAND_MAX, rand() / (float)RAND_MAX);
    }

    // 分配设备内存
    cuComplex *d_A, *d_B, *d_C, *d_D;
    CUDA_CHECK(cudaMalloc(&d_A, size_A));
    CUDA_CHECK(cudaMalloc(&d_B, size_B));
    CUDA_CHECK(cudaMalloc(&d_C, size_C));
    CUDA_CHECK(cudaMalloc(&d_D, size_D));

    // 创建 CUDA 事件用于计时
    cudaEvent_t start_total, stop_total, start_h2d, stop_h2d, start_mul, stop_mul, start_inv, stop_inv, start_d2h, stop_d2h;
    CUDA_CHECK(cudaEventCreate(&start_total));
    CUDA_CHECK(cudaEventCreate(&stop_total));
    CUDA_CHECK(cudaEventCreate(&start_h2d));
    CUDA_CHECK(cudaEventCreate(&stop_h2d));
    CUDA_CHECK(cudaEventCreate(&start_mul));
    CUDA_CHECK(cudaEventCreate(&stop_mul));
    CUDA_CHECK(cudaEventCreate(&start_inv));
    CUDA_CHECK(cudaEventCreate(&stop_inv));
    CUDA_CHECK(cudaEventCreate(&start_d2h));
    CUDA_CHECK(cudaEventCreate(&stop_d2h));

    // 时间统计数组
    float times_h2d[1000], times_mul[1000], times_inv[1000], times_d2h[1000], times_total[1000];

    // 运行 1000 次
    for (int run = 0; run < 1000; run++) {
        CUDA_CHECK(cudaEventRecord(start_total, 0));

        // 主机到设备拷贝
        CUDA_CHECK(cudaEventRecord(start_h2d, 0));
        CUDA_CHECK(cudaMemcpy(d_A, h_A, size_A, cudaMemcpyHostToDevice));
        CUDA_CHECK(cudaMemcpy(d_B, h_B, size_B, cudaMemcpyHostToDevice));
        CUDA_CHECK(cudaEventRecord(stop_h2d, 0));

        // 矩阵乘法
        dim3 grid_mul(N, (ROWS + TILE_WIDTH - 1) / TILE_WIDTH, (ROWS + TILE_WIDTH - 1) / TILE_WIDTH);
        dim3 block_mul(TILE_WIDTH, TILE_WIDTH);
        CUDA_CHECK(cudaEventRecord(start_mul, 0));
        matmul_kernel<<<grid_mul, block_mul>>>(d_A, d_B, d_C);
        CUDA_CHECK(cudaGetLastError());
        CUDA_CHECK(cudaEventRecord(stop_mul, 0));

        // 矩阵求逆
        CUDA_CHECK(cudaEventRecord(start_inv, 0));
        inverse_kernel<<<N, BLOCK_SIZE>>>(d_C, d_D);
        CUDA_CHECK(cudaGetLastError());
        CUDA_CHECK(cudaEventRecord(stop_inv, 0));

        // 设备到主机拷贝
        CUDA_CHECK(cudaEventRecord(start_d2h, 0));
        CUDA_CHECK(cudaMemcpy(h_D, d_D, size_D, cudaMemcpyDeviceToHost));
        CUDA_CHECK(cudaEventRecord(stop_d2h, 0));

        CUDA_CHECK(cudaEventRecord(stop_total, 0));
        CUDA_CHECK(cudaEventSynchronize(stop_total));

        // 计算每次运行的时间
        float t_h2d, t_mul, t_inv, t_d2h, t_total;
        CUDA_CHECK(cudaEventElapsedTime(&t_h2d, start_h2d, stop_h2d));
        CUDA_CHECK(cudaEventElapsedTime(&t_mul, start_mul, stop_mul));
        CUDA_CHECK(cudaEventElapsedTime(&t_inv, start_inv, stop_inv));
        CUDA_CHECK(cudaEventElapsedTime(&t_d2h, start_d2h, stop_d2h));
        CUDA_CHECK(cudaEventElapsedTime(&t_total, start_total, stop_total));

        times_h2d[run] = t_h2d;
        times_mul[run] = t_mul;
        times_inv[run] = t_inv;
        times_d2h[run] = t_d2h;
        times_total[run] = t_total;

        // 实时打印每次运行的时间
        printf("运行 %d: 拷贝H2D: %.3f ms, 乘法: %.3f ms, 求逆: %.3f ms, 拷贝D2H: %.3f ms, 总时间: %.3f ms\n",
               run, t_h2d, t_mul, t_inv, t_d2h, t_total);
    }

    // 计算统计信息
    float avg_h2d = 0, avg_mul = 0, avg_inv = 0, avg_d2h = 0, avg_total = 0;
    float max_h2d = 0, max_mul = 0, max_inv = 0, max_d2h = 0, max_total = 0;
    float min_h2d = FLT_MAX, min_mul = FLT_MAX, min_inv = FLT_MAX, min_d2h = FLT_MAX, min_total = FLT_MAX;

    for (int i = 0; i < 1000; i++) {
        avg_h2d += times_h2d[i];
        avg_mul += times_mul[i];
        avg_inv += times_inv[i];
        avg_d2h += times_d2h[i];
        avg_total += times_total[i];

        max_h2d = fmaxf(max_h2d, times_h2d[i]);
        max_mul = fmaxf(max_mul, times_mul[i]);
        max_inv = fmaxf(max_inv, times_inv[i]);
        max_d2h = fmaxf(max_d2h, times_d2h[i]);
        max_total = fmaxf(max_total, times_total[i]);

        min_h2d = fminf(min_h2d, times_h2d[i]);
        min_mul = fminf(min_mul, times_mul[i]);
        min_inv = fminf(min_inv, times_inv[i]);
        min_d2h = fminf(min_d2h, times_d2h[i]);
        min_total = fminf(min_total, times_total[i]);
    }

    avg_h2d /= 1000;
    avg_mul /= 1000;
    avg_inv /= 1000;
    avg_d2h /= 1000;
    avg_total /= 1000;

    // 打印统计结果
    printf("\n统计结果（基于1000次运行）：\n");
    printf("拷贝H2D - 平均: %.3f ms, 最大: %.3f ms, 最小: %.3f ms\n", avg_h2d, max_h2d, min_h2d);
    printf("矩阵乘法 - 平均: %.3f ms, 最大: %.3f ms, 最小: %.3f ms\n", avg_mul, max_mul, min_mul);
    printf("矩阵求逆 - 平均: %.3f ms, 最大: %.3f ms, 最小: %.3f ms\n", avg_inv, max_inv, min_inv);
    printf("拷贝D2H - 平均: %.3f ms, 最大: %.3f ms, 最小: %.3f ms\n", avg_d2h, max_d2h, min_d2h);
    printf("总时间 - 平均: %.3f ms, 最大: %.3f ms, 最小: %.3f ms\n", avg_total, max_total, min_total);

    // 释放内存
    free(h_A);
    free(h_B);
    free(h_D);
    CUDA_CHECK(cudaFree(d_A));
    CUDA_CHECK(cudaFree(d_B));
    CUDA_CHECK(cudaFree(d_C));
    CUDA_CHECK(cudaFree(d_D));

    // 销毁事件
    CUDA_CHECK(cudaEventDestroy(start_total));
    CUDA_CHECK(cudaEventDestroy(stop_total));
    CUDA_CHECK(cudaEventDestroy(start_h2d));
    CUDA_CHECK(cudaEventDestroy(stop_h2d));
    CUDA_CHECK(cudaEventDestroy(start_mul));
    CUDA_CHECK(cudaEventDestroy(stop_mul));
    CUDA_CHECK(cudaEventDestroy(start_inv));
    CUDA_CHECK(cudaEventDestroy(stop_inv));
    CUDA_CHECK(cudaEventDestroy(start_d2h));
    CUDA_CHECK(cudaEventDestroy(stop_d2h));

    return 0;
}