#include "utils.cuh"
#include "kernel_v2_naive_wmma.cuh"
#include "kernel_v3_pipeline_wmma.cuh"

#include <stdio.h>
#include <vector>
#include <string>

int main(int argc, char** argv) {
    // Matrix dimensions (from paper: 5376x5376x2048)
    int M = 5376;
    int N = 5376;
    int K = 2048;
    
    if (argc >= 4) {
        M = atoi(argv[1]);
        N = atoi(argv[2]);
        K = atoi(argv[3]);
    }
    
    // Round up to multiples of 128 for M and N, 32 for K
    M = ((M + 127) / 128) * 128;
    N = ((N + 127) / 128) * 128;
    K = ((K + 31) / 32) * 32;
    
    printf("=================================================================\n");
    printf("CUDA GEMM Kernel Optimization Tutorial\n");
    printf("Following: 如何用CUDA写有CuBLAS 90%%性能的GEMM Kernel\n");
    printf("=================================================================\n");
    printf("Matrix dimensions: M=%d, N=%d, K=%d\n", M, N, K);
    printf("Precision: FP16 input/output, FP32 accumulation\n");
    printf("Target GPU: 5060Ti\n");
    
    // Check GPU compute capability
    int device;
    cudaDeviceProp prop;
    CHECK_CUDA(cudaGetDevice(&device));
    CHECK_CUDA(cudaGetDeviceProperties(&prop, device));
    printf("GPU: %s (Compute Capability: %d.%d)\n", prop.name, prop.major, prop.minor);
    printf("=================================================================\n\n");
    
    // Allocate host memory
    size_t size_A = M * K * sizeof(half);
    size_t size_B = K * N * sizeof(half);
    size_t size_C = M * N * sizeof(half);
    
    half *h_A = (half*)malloc(size_A);
    half *h_B_row = (half*)malloc(size_B);
    half *h_B = (half*)malloc(size_B);      // Col-major B
    half *h_C = (half*)malloc(size_C);
    half *h_C_ref = (half*)malloc(size_C);
    half *h_C_test = (half*)malloc(size_C);
    
    // Initialize matrices
    srand(time(NULL));
    init_matrix_fp16(h_A, M * K);
    init_matrix_fp16(h_B_row, K * N);
    
    // Convert B from row-major to col-major
    transpose_matrix(h_B_row, h_B, K, N);
    
    // Initialize C
    for (int i = 0; i < M * N; i++) {
        h_C[i] = __float2half(0.0f);
    }
    
    // Debug: Print first few values of input matrices
    printf("[DEBUG HOST] First few values of matrix A (row-major %dx%d):\n", M, K);
    for (int i = 0; i < 4 && i < M; ++i) {
        for (int j = 0; j < 4 && j < K; ++j) {
            printf("  A[%d][%d] = %.3f\n", i, j, __half2float(h_A[i * K + j]));
        }
    }
    printf("[DEBUG HOST] First few values of matrix B (col-major, K=%d, N=%d):\n", K, N);
    printf("  (B is stored as B[k*N+n], where k is K dimension, n is N dimension)\n");
    for (int k = 0; k < 4 && k < K; ++k) {
        for (int n = 0; n < 4 && n < N; ++n) {
            printf("  B[k=%d][n=%d] = %.3f (idx=%d)\n", k, n, __half2float(h_B[k * N + n]), k * N + n);
        }
    }
    
    // Allocate device memory
    half *d_A, *d_B, *d_C, *d_C_ref;
    CHECK_CUDA(cudaMalloc(&d_A, size_A));
    CHECK_CUDA(cudaMalloc(&d_B, size_B));
    CHECK_CUDA(cudaMalloc(&d_C, size_C));
    CHECK_CUDA(cudaMalloc(&d_C_ref, size_C));
    
    // Copy data to device
    CHECK_CUDA(cudaMemcpy(d_A, h_A, size_A, cudaMemcpyHostToDevice));
    CHECK_CUDA(cudaMemcpy(d_B, h_B, size_B, cudaMemcpyHostToDevice));
    CHECK_CUDA(cudaMemcpy(d_C, h_C, size_C, cudaMemcpyHostToDevice));
    
    // Setup cuBLAS
    cublasHandle_t handle;
    CHECK_CUBLAS(cublasCreate(&handle));
    
    // =======================================================================
    // Version 1: cuBLAS Baseline
    // =======================================================================
    printf("Version 1: cuBLAS Baseline\n");
    printf("-----------------------------------------------------------------\n");
    printf("Configuration:\n");
    printf("  - FP32 accumulator, Tensor Core enabled\n");
    printf("  - Matrix layout: A row-major, B col-major\n");
    printf("  - alpha=1.0, beta=0.0\n\n");
    
    BenchmarkResult baseline_result = benchmark_cublas(handle, d_A, d_B, d_C_ref, M, N, K);
    
    print_benchmark_header();
    print_benchmark_result("cuBLAS (Baseline)", baseline_result);
    printf("\n");
    
    // Copy reference result
    CHECK_CUDA(cudaMemcpy(h_C_ref, d_C_ref, size_C, cudaMemcpyDeviceToHost));
    
    // Debug: Print first few reference results
    printf("[DEBUG HOST] First few values of cuBLAS result C (row-major %dx%d):\n", M, N);
    for (int i = 0; i < 4 && i < M; ++i) {
        for (int j = 0; j < 4 && j < N; ++j) {
            printf("  C_ref[%d][%d] = %.3f (idx=%d)\n", i, j, __half2float(h_C_ref[i * N + j]), i * N + j);
        }
    }
    
    // Manual verification of C[0][0]
    float manual_c00 = 0.0f;
    for (int k = 0; k < K; ++k) {
        float a_val = __half2float(h_A[0 * K + k]);  // A[0][k] row-major
        float b_val = __half2float(h_B[k * N + 0]);  // B[k][0] col-major: B[k*N+n]
        manual_c00 += a_val * b_val;
    }
    printf("[DEBUG HOST] Manual calculation: C[0][0] = %.3f (sum of %d products)\n", manual_c00, K);
    printf("[DEBUG HOST] cuBLAS C[0][0] = %.3f, difference = %.3f\n", 
           __half2float(h_C_ref[0]), manual_c00 - __half2float(h_C_ref[0]));
    
    float cublas_tflops = baseline_result.tflops;
    
    // =======================================================================
    // Version 2: Naive WMMA Kernel
    // =======================================================================
    printf("\nVersion 2: Naive WMMA Kernel\n");
    printf("-----------------------------------------------------------------\n");
    printf("Configuration:\n");
    printf("  - Block tile: 128x128x32\n");
    printf("  - Warp tile: 64x64x16\n");
    printf("  - WMMA tile: 16x16x16\n");
    printf("  - Threads: [32, 2, 2] = 128\n");
    printf("  - Shared memory: direct mapping, no pipeline\n");
    
    printf("[DEBUG] Getting smem size...\n");
    int smem_v2 = v2_naive_wmma::get_smem_size();
    printf("[DEBUG] Shared memory size: %.2f KB\n\n", smem_v2 / 1024.0f);
    
    // Reset C matrix
    printf("[DEBUG] Resetting C matrix...\n");
    CHECK_CUDA(cudaMemcpy(d_C, h_C, size_C, cudaMemcpyHostToDevice));
    
    // Launch configuration
    printf("[DEBUG] Getting grid dimensions for M=%d, N=%d...\n", M, N);
    dim3 grid_v2 = v2_naive_wmma::get_grid_dim(M, N);
    printf("[DEBUG] Grid: (%d, %d, %d)\n", grid_v2.x, grid_v2.y, grid_v2.z);
    
    printf("[DEBUG] Getting block dimensions...\n");
    dim3 block_v2 = v2_naive_wmma::get_block_dim();
    printf("[DEBUG] Block: (%d, %d, %d)\n", block_v2.x, block_v2.y, block_v2.z);
    printf("[DEBUG] Shared memory: %d bytes\n", smem_v2);
    
    // Check GPU limits
    printf("[DEBUG] Checking GPU shared memory limits...\n");
    int max_shared_mem;
    CHECK_CUDA(cudaDeviceGetAttribute(&max_shared_mem, cudaDevAttrMaxSharedMemoryPerBlock, device));
    printf("[DEBUG] Max shared memory per block: %d bytes (%.2f KB)\n", max_shared_mem, max_shared_mem / 1024.0f);
    
    if (smem_v2 > max_shared_mem) {
        printf("[WARNING] Requested shared memory (%d bytes) exceeds default limit (%d bytes)\n", smem_v2, max_shared_mem);
        printf("[DEBUG] Attempting to set larger shared memory carveout...\n");
        
        // Try to increase shared memory limit
        cudaFuncAttributes attr;
        CHECK_CUDA(cudaFuncGetAttributes(&attr, v2_naive_wmma_matmul));
        printf("[DEBUG] Kernel static shared memory: %d bytes\n", (int)attr.sharedSizeBytes);
        printf("[DEBUG] Kernel max dynamic shared memory: %d bytes\n", (int)attr.maxDynamicSharedSizeBytes);
        
        // Try to set max shared memory
        CHECK_CUDA(cudaFuncSetAttribute(v2_naive_wmma_matmul, 
                                        cudaFuncAttributeMaxDynamicSharedMemorySize, 
                                        smem_v2));
        printf("[DEBUG] Successfully increased shared memory limit\n");
    }
    
    cudaEvent_t start, stop;
    CHECK_CUDA(cudaEventCreate(&start));
    CHECK_CUDA(cudaEventCreate(&stop));
    
    // Warmup  
    printf("[DEBUG] Starting warmup...\n");
    for (int i = 0; i < 10; i++) {
        v2_naive_wmma_matmul<<<grid_v2, block_v2, smem_v2>>>(d_A, d_B, d_C, M, N, K);
        CHECK_CUDA(cudaGetLastError());
    }
    CHECK_CUDA(cudaDeviceSynchronize());
    
    // Benchmark iterations
    printf("[DEBUG] Running benchmark...\n");
    CHECK_CUDA(cudaEventRecord(start));
    for (int i = 0; i < 100; i++) {
        v2_naive_wmma_matmul<<<grid_v2, block_v2, smem_v2>>>(d_A, d_B, d_C, M, N, K);
    }
    CHECK_CUDA(cudaEventRecord(stop));
    CHECK_CUDA(cudaEventSynchronize(stop));
    CHECK_CUDA(cudaGetLastError());
    
    float v2_ms = 0;
    CHECK_CUDA(cudaEventElapsedTime(&v2_ms, start, stop));
    v2_ms /= 100;
    
    double flops = 2.0 * M * N * K;
    BenchmarkResult v2_result;
    v2_result.time_ms = v2_ms;
    v2_result.tflops = flops / (v2_ms * 1e-3) / 1e12;
    v2_result.percentage_of_cublas = v2_result.tflops / cublas_tflops * 100.0f;
    
    print_benchmark_result("V2: Naive WMMA", v2_result);
    
    // Verify
    CHECK_CUDA(cudaMemcpy(h_C_test, d_C, size_C, cudaMemcpyDeviceToHost));
    
    // Debug: Print first few V2 results from different blocks
    printf("[DEBUG HOST] V2 results from different blocks:\n");
    int check_positions[][2] = {{0,0}, {0,128}, {128,0}, {128,128}, {0,1}, {1,0}};
    for (int p = 0; p < 6; ++p) {
        int i = check_positions[p][0];
        int j = check_positions[p][1];
        if (i < M && j < N) {
            float v2_val = __half2float(h_C_test[i * N + j]);
            float ref_val = __half2float(h_C_ref[i * N + j]);
            printf("  V2[%d][%d] = %.3f, ref = %.3f, diff = %.3f (block %d,%d)\n", 
                   i, j, v2_val, ref_val, v2_val - ref_val, j/128, i/128);
        }
    }
    
    verify_results(h_C_ref, h_C_test, M, N, "V2");
    printf("\n");
    
    // =======================================================================
    // Version 3: 4-Stage Pipeline WMMA Kernel
    // =======================================================================
    printf("\nVersion 3: 4-Stage Pipeline WMMA Kernel\n");
    printf("-----------------------------------------------------------------\n");
    printf("Configuration:\n");
    printf("  - Same tile sizes as V2\n");
    printf("  - Uses cp.async for async global->shared transfer\n");
    printf("  - 4-stage software pipeline (prologue + main loop + epilogue)\n");
    
    printf("[DEBUG] Getting V3 smem size...\n");
    int smem_v3 = v3_pipeline_wmma::get_smem_size();
    printf("[DEBUG] Shared memory size: %.2f KB (4x buffers)\n\n", smem_v3 / 1024.0f);
    
    // Reset C matrix
    printf("[DEBUG] Resetting C matrix for V3...\n");
    CHECK_CUDA(cudaMemcpy(d_C, h_C, size_C, cudaMemcpyHostToDevice));
    
    // Launch configuration
    printf("[DEBUG] Getting V3 grid dimensions...\n");
    dim3 grid_v3 = v3_pipeline_wmma::get_grid_dim(M, N);
    printf("[DEBUG] Grid: (%d, %d, %d)\n", grid_v3.x, grid_v3.y, grid_v3.z);
    
    printf("[DEBUG] Getting V3 block dimensions...\n");
    dim3 block_v3 = v3_pipeline_wmma::get_block_dim();
    printf("[DEBUG] Block: (%d, %d, %d)\n", block_v3.x, block_v3.y, block_v3.z);
    printf("[DEBUG] Shared memory: %d bytes\n", smem_v3);
    
    // Configure shared memory for V3
    if (smem_v3 > max_shared_mem) {
        printf("[WARNING] V3 requested shared memory (%d bytes) exceeds default limit (%d bytes)\n", smem_v3, max_shared_mem);
        printf("[DEBUG] Attempting to set larger shared memory for V3...\n");
        
        CHECK_CUDA(cudaFuncSetAttribute(v3_pipeline_wmma::matmul, 
                                        cudaFuncAttributeMaxDynamicSharedMemorySize, 
                                        smem_v3));
        printf("[DEBUG] Successfully increased V3 shared memory limit\n");
    }
    
    // Warmup
    printf("[DEBUG] Starting V3 warmup...\n");
    for (int i = 0; i < 10; i++) {
        v3_pipeline_wmma::matmul<<<grid_v3, block_v3, smem_v3>>>(d_A, d_B, d_C, M, N, K);
        cudaError_t err = cudaGetLastError();
        if (err != cudaSuccess) {
            printf("[ERROR] V3 Kernel launch failed at warmup iteration %d: %s\n", i, cudaGetErrorString(err));
            CHECK_CUDA(err);
        }
    }
    CHECK_CUDA(cudaDeviceSynchronize());
    printf("[DEBUG] V3 warmup complete\n");
    
    // Benchmark
    CHECK_CUDA(cudaEventRecord(start));
    for (int i = 0; i < 200; i++) {
        v3_pipeline_wmma::matmul<<<grid_v3, block_v3, smem_v3>>>(d_A, d_B, d_C, M, N, K);
    }
    CHECK_CUDA(cudaEventRecord(stop));
    CHECK_CUDA(cudaEventSynchronize(stop));
    CHECK_CUDA(cudaGetLastError());
    
    float v3_ms = 0;
    CHECK_CUDA(cudaEventElapsedTime(&v3_ms, start, stop));
    v3_ms /= 200;
    
    BenchmarkResult v3_result;
    v3_result.time_ms = v3_ms;
    v3_result.tflops = flops / (v3_ms * 1e-3) / 1e12;
    v3_result.percentage_of_cublas = v3_result.tflops / cublas_tflops * 100.0f;
    
    print_benchmark_result("V3: 4-Stage Pipeline WMMA", v3_result);
    
    // Verify
    CHECK_CUDA(cudaMemcpy(h_C_test, d_C, size_C, cudaMemcpyDeviceToHost));
    verify_results(h_C_ref, h_C_test, M, N, "V3");
    printf("\n");
    
    // =======================================================================
    // Performance Summary
    // =======================================================================
    print_benchmark_footer();
    printf("\n");
    printf("Performance Summary:\n");
    printf("=================================================================\n");
    printf("%-30s: %.3f TFLOPS (baseline)\n", "cuBLAS", cublas_tflops);
    printf("%-30s: %.3f TFLOPS (%.2f%% of cuBLAS)\n", "V2: Naive WMMA", 
           v2_result.tflops, v2_result.percentage_of_cublas);
    printf("%-30s: %.3f TFLOPS (%.2f%% of cuBLAS)\n", "V3: 4-Stage Pipeline WMMA",
           v3_result.tflops, v3_result.percentage_of_cublas);
    printf("=================================================================\n");
    printf("Speedup (V3 vs V2): %.2fx\n", v2_result.time_ms / v3_result.time_ms);
    printf("=================================================================\n");
    
    // Cleanup
    CHECK_CUDA(cudaEventDestroy(start));
    CHECK_CUDA(cudaEventDestroy(stop));
    CHECK_CUBLAS(cublasDestroy(handle));
    CHECK_CUDA(cudaFree(d_A));
    CHECK_CUDA(cudaFree(d_B));
    CHECK_CUDA(cudaFree(d_C));
    CHECK_CUDA(cudaFree(d_C_ref));
    
    free(h_A);
    free(h_B_row);
    free(h_B);
    free(h_C);
    free(h_C_ref);
    free(h_C_test);
    
    return 0;
}
