#include <cuda.h>
#include <mma.h>
#include <cuda_fp16.h>
#include <iostream>
using namespace nvcuda;

#define TILE_M 16
#define TILE_N 16
#define TILE_K 16

// Kernel: NCHW -> NHWC 转置
__global__ void nchw_to_nhwc(const float* input, float* output, int N, int C, int H, int W) {
    int n = blockIdx.z;
    int h = blockIdx.y * blockDim.y + threadIdx.y;
    int w = blockIdx.x * blockDim.x + threadIdx.x;

    if (h < H && w < W) {
        for (int c = 0; c < C; ++c) {
            int nchw_idx = n * C * H * W + c * H * W + h * W + w;
            int nhwc_idx = n * H * W * C + h * W * C + w * C + c;
            output[nhwc_idx] = input[nchw_idx];
        }
    }
}

// Kernel: Tensor Core + WMMA 矩阵乘加
__global__ void wmma_conv_kernel(const half* a, const half* b, float* c,
                                 int M, int N, int K) {
    int warpM = (blockIdx.x * blockDim.x + threadIdx.x) / 32;
    int warpN = (blockIdx.y * blockDim.y + threadIdx.y);

    if (warpM * TILE_M >= M || warpN * TILE_N >= N) return;

    wmma::fragment<wmma::matrix_a, TILE_M, TILE_N, TILE_K, half, wmma::row_major> frag_a;
    wmma::fragment<wmma::matrix_b, TILE_M, TILE_N, TILE_K, half, wmma::col_major> frag_b;
    wmma::fragment<wmma::accumulator, TILE_M, TILE_N, TILE_K, float> frag_c;

    wmma::fill_fragment(frag_c, 0.0f);

    for (int i = 0; i < K; i += TILE_K) {
        const half* tile_a = a + warpM * TILE_M * K + i;
        const half* tile_b = b + i * N + warpN * TILE_N;

        wmma::load_matrix_sync(frag_a, tile_a, K);
        wmma::load_matrix_sync(frag_b, tile_b, N);

        wmma::mma_sync(frag_c, frag_a, frag_b, frag_c);
    }

    float* tile_c = c + warpM * TILE_M * N + warpN * TILE_N;
    wmma::store_matrix_sync(tile_c, frag_c, N, wmma::mem_row_major);
}

// Host 驱动函数
void run_tensorcore_conv() {
    const int N = 64, C = 256, H = 14, W = 14, K = 256;
    const int OH = 14, OW = 14;

    int A_rows = K;
    int B_cols = N * OH * OW;
    int shared_K = C * 3 * 3;

    size_t input_bytes = N * C * H * W * sizeof(float);
    size_t output_bytes = N * K * OH * OW * sizeof(float);
    size_t weight_bytes = K * C * 3 * 3 * sizeof(float);

    float *input_nchw, *input_nhwc, *weights, *output;
    cudaMallocManaged(&input_nchw, input_bytes);
    cudaMallocManaged(&input_nhwc, input_bytes);
    cudaMallocManaged(&weights, weight_bytes);
    cudaMallocManaged(&output, output_bytes);

    // 初始化
    for (int i = 0; i < N * C * H * W; ++i) input_nchw[i] = static_cast<float>(rand()) / RAND_MAX;
    for (int i = 0; i < K * C * 3 * 3; ++i) weights[i] = static_cast<float>(rand()) / RAND_MAX;

    // 转置到 NHWC
    dim3 block(16, 16);
    dim3 grid((W + 15) / 16, (H + 15) / 16, N);
    nchw_to_nhwc<<<grid, block>>>(input_nchw, input_nhwc, N, C, H, W);
    cudaDeviceSynchronize();

    // 将 float 转为 __half
    half *A_half, *B_half;
    cudaMallocManaged(&A_half, K * shared_K * sizeof(half));
    cudaMallocManaged(&B_half, shared_K * B_cols * sizeof(half));

    for (int i = 0; i < K * shared_K; ++i) A_half[i] = __float2half(weights[i]);
    for (int i = 0; i < shared_K * B_cols; ++i) B_half[i] = __float2half(input_nhwc[i]);

    // 分配输出
    float* output_wmma;
    cudaMallocManaged(&output_wmma, A_rows * B_cols * sizeof(float));

    // 启动 Tensor Core kernel
    dim3 blockTC(32, 1);
    dim3 gridTC((A_rows + TILE_M - 1) / TILE_M, (B_cols + TILE_N - 1) / TILE_N);
    wmma_conv_kernel<<<gridTC, blockTC>>>(A_half, B_half, output_wmma, A_rows, B_cols, shared_K);
    cudaDeviceSynchronize();

    std::cout << "WMMA convolution done.\n";

    cudaFree(input_nchw);
    cudaFree(input_nhwc);
    cudaFree(weights);
    cudaFree(output);
    cudaFree(A_half);
    cudaFree(B_half);
    cudaFree(output_wmma);
}

int main() {
    run_tensorcore_conv();
    return 0;
}
