#pragma once

#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include <cublas_v2.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>

// =============================================================================
// Error Checking Macros
// =============================================================================

#define CHECK_CUDA(call) \
    do { \
        cudaError_t status = call; \
        if (status != cudaSuccess) { \
            fprintf(stderr, "CUDA error at %s:%d: %s\n", __FILE__, __LINE__, \
                    cudaGetErrorString(status)); \
            exit(EXIT_FAILURE); \
        } \
    } while(0)

#define CHECK_CUBLAS(call) \
    do { \
        cublasStatus_t status = call; \
        if (status != CUBLAS_STATUS_SUCCESS) { \
            fprintf(stderr, "cuBLAS error at %s:%d: %d\n", __FILE__, __LINE__, status); \
            exit(EXIT_FAILURE); \
        } \
    } while(0)

// =============================================================================
// cuBLAS Baseline Wrapper
// =============================================================================

// C = alpha * A * B + beta * C
// A: row-major (M x K)
// B: col-major (K x N)  
// C: row-major (M x N)
// accumulator: FP32 precision
inline cublasStatus_t
cublas_gemm_baseline(cublasHandle_t handle,
                     cublasOperation_t transA, cublasOperation_t transB,
                     int m, int n, int k,
                     const float* alpha,
                     const half* A, int ldA,
                     const half* B, int ldB,
                     const float* beta,
                     half* C, int ldC)
{
    return cublasGemmEx(handle, transA, transB,
                        m, n, k,
                        reinterpret_cast<const float*>(alpha),
                        reinterpret_cast<const __half*>(A), CUDA_R_16F, ldA,
                        reinterpret_cast<const __half*>(B), CUDA_R_16F, ldB,
                        reinterpret_cast<const float*>(beta),
                        reinterpret_cast<__half*>(C), CUDA_R_16F, ldC,
                        CUBLAS_COMPUTE_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP);
}

// =============================================================================
// Matrix Utilities
// =============================================================================

// Initialize matrix with random FP16 values
void init_matrix_fp16(half* h_data, int size) {
    for (int i = 0; i < size; i++) {
        h_data[i] = __float2half((float)rand() / RAND_MAX);
    }
}

// Convert matrix from row-major to col-major
void transpose_matrix(const half* src, half* dst, int rows, int cols) {
    for (int i = 0; i < rows; i++) {
        for (int j = 0; j < cols; j++) {
            dst[j * rows + i] = src[i * cols + j];
        }
    }
}

// Verify results
void verify_results(const half* C_ref, const half* C_test, int M, int N, 
                   const char* kernel_name, bool verbose = false) {
    // ========================================================================
    // 阈值选择思路：
    // ========================================================================
    // 1. FP16 的精度：10 位尾数，相对精度约 2^-10 ≈ 0.001 (0.1%)
    // 2. 每次 FP16→FP32 转换：引入 ε_fp16 ≈ 0.001 * value 的误差
    // 3. FP32 累加 K 次后：累积误差约 sqrt(K) * ε ≈ sqrt(512) * 0.001 ≈ 0.023
    // 4. FP32→FP16 输出转换：再次引入 0.001 * value 的误差
    // 5. 不同累加顺序：cuBLAS vs 我们的 kernel，可能导致 1-2% 的额外差异
    // 
    // 理论相对误差：≈ 2-3%
    // 实际观察到：约 5-10%（由于更复杂的舍入和累加模式）
    //
    // 因此：
    // - 相对容差设为 5%（正常情况）到 10%（边界情况）
    // - 绝对容差考虑典型值（100-150），设为 5-10
    // ========================================================================
    
    const float abs_tolerance = 5.0f;    // 绝对误差容差
    const float rel_tolerance = 0.05f;   // 5% 相对误差容差（正常应该在此范围内）
    
    int errors = 0;
    float max_error = 0.0f;
    float max_rel_error = 0.0f;
    
    // 统计不同误差范围的数量
    int errors_1pct = 0;   // 相对误差 > 1%
    int errors_2pct = 0;   // 相对误差 > 2%
    int errors_5pct = 0;   // 相对误差 > 5%
    int errors_10pct = 0;  // 相对误差 > 10%
    float sum_rel_error = 0.0f;
    
    for (int i = 0; i < M * N; i++) {
        float ref = __half2float(C_ref[i]);
        float test = __half2float(C_test[i]);
        float abs_error = fabsf(ref - test);
        float rel_error = (fabsf(ref) > 1e-5f) ? (abs_error / fabsf(ref)) : 0.0f;
        
        max_error = fmaxf(max_error, abs_error);
        max_rel_error = fmaxf(max_rel_error, rel_error);
        sum_rel_error += rel_error;
        
        // 统计不同误差级别
        if (rel_error > 0.01f) errors_1pct++;
        if (rel_error > 0.02f) errors_2pct++;
        if (rel_error > 0.05f) errors_5pct++;
        if (rel_error > 0.10f) errors_10pct++;
        
        // Check both absolute and relative tolerance
        if (abs_error > abs_tolerance && rel_error > rel_tolerance) {
            errors++;
            if (verbose && errors <= 10) {
                printf("  Mismatch at %d: ref=%.6f, test=%.6f, abs_err=%.6f, rel_err=%.4f%%\n", 
                       i, ref, test, abs_error, rel_error * 100.0f);
            }
        }
    }
    
    float avg_rel_error = sum_rel_error / (M * N);
    
    printf("  Verification: %s\n", (errors == 0) ? "PASSED" : "FAILED");
    printf("    Max absolute error: %.6f\n", max_error);
    printf("    Max relative error: %.4f%%\n", max_rel_error * 100.0f);
    printf("    Avg relative error: %.4f%%\n", avg_rel_error * 100.0f);
    printf("    Error distribution:\n");
    printf("      >1%%:  %d/%d (%.2f%%)\n", errors_1pct, M*N, 100.0f*errors_1pct/(M*N));
    printf("      >2%%:  %d/%d (%.2f%%)\n", errors_2pct, M*N, 100.0f*errors_2pct/(M*N));
    printf("      >5%%:  %d/%d (%.2f%%)\n", errors_5pct, M*N, 100.0f*errors_5pct/(M*N));
    printf("      >10%%: %d/%d (%.2f%%)\n", errors_10pct, M*N, 100.0f*errors_10pct/(M*N));
    printf("    Failed (abs>%.1f AND rel>%.1f%%): %d/%d (%.2f%%)\n",
           abs_tolerance, rel_tolerance * 100.0f, errors, M*N, 100.0f*errors/(M*N));
}

// =============================================================================
// Benchmark Function
// =============================================================================

struct BenchmarkResult {
    float time_ms;
    float tflops;
    float percentage_of_cublas;
};

template<typename KernelFunc>
BenchmarkResult benchmark_kernel(
    KernelFunc kernel_func,
    half* d_A, half* d_B, half* d_C,
    int M, int N, int K,
    dim3 grid, dim3 block, int smem_size,
    const char* kernel_name,
    float cublas_tflops = 0.0f,
    int warmup = 10, 
    int repeat = 200)
{
    cudaEvent_t start, stop;
    CHECK_CUDA(cudaEventCreate(&start));
    CHECK_CUDA(cudaEventCreate(&stop));
    
    // Warmup
    for (int i = 0; i < warmup; i++) {
        kernel_func<<<grid, block, smem_size>>>(d_A, d_B, d_C, M, N, K);
    }
    CHECK_CUDA(cudaDeviceSynchronize());
    CHECK_CUDA(cudaGetLastError());
    
    // Benchmark
    CHECK_CUDA(cudaEventRecord(start));
    for (int i = 0; i < repeat; i++) {
        kernel_func<<<grid, block, smem_size>>>(d_A, d_B, d_C, M, N, K);
    }
    CHECK_CUDA(cudaEventRecord(stop));
    CHECK_CUDA(cudaEventSynchronize(stop));
    
    float ms = 0;
    CHECK_CUDA(cudaEventElapsedTime(&ms, start, stop));
    ms /= repeat;
    
    // Calculate TFLOPS
    double flops = 2.0 * M * N * K;
    double tflops = flops / (ms * 1e-3) / 1e12;
    
    BenchmarkResult result;
    result.time_ms = ms;
    result.tflops = tflops;
    result.percentage_of_cublas = (cublas_tflops > 0) ? (tflops / cublas_tflops * 100.0f) : 0.0f;
    
    CHECK_CUDA(cudaEventDestroy(start));
    CHECK_CUDA(cudaEventDestroy(stop));
    
    return result;
}

// Benchmark cuBLAS
BenchmarkResult benchmark_cublas(
    cublasHandle_t handle,
    half* d_A, half* d_B, half* d_C,
    int M, int N, int K,
    int warmup = 10,
    int repeat = 200)
{
    cudaEvent_t start, stop;
    CHECK_CUDA(cudaEventCreate(&start));
    CHECK_CUDA(cudaEventCreate(&stop));
    
    float alpha = 1.0f;
    float beta = 0.0f;
    
    // Warmup
    for (int i = 0; i < warmup; i++) {
        // For row-major A and col-major B: C = A * B
        // cuBLAS expects col-major, so we transpose A
        CHECK_CUBLAS(cublas_gemm_baseline(handle, CUBLAS_OP_T, CUBLAS_OP_N,
                                         N, M, K, &alpha,
                                         d_B, K,  // B is col-major
                                         d_A, K,  // A transposed
                                         &beta,
                                         d_C, N));
    }
    CHECK_CUDA(cudaDeviceSynchronize());
    
    // Benchmark
    CHECK_CUDA(cudaEventRecord(start));
    for (int i = 0; i < repeat; i++) {
        CHECK_CUBLAS(cublas_gemm_baseline(handle, CUBLAS_OP_T, CUBLAS_OP_N,
                                         N, M, K, &alpha,
                                         d_B, K,
                                         d_A, K,
                                         &beta,
                                         d_C, N));
    }
    CHECK_CUDA(cudaEventRecord(stop));
    CHECK_CUDA(cudaEventSynchronize(stop));
    
    float ms = 0;
    CHECK_CUDA(cudaEventElapsedTime(&ms, start, stop));
    ms /= repeat;
    
    double flops = 2.0 * M * N * K;
    double tflops = flops / (ms * 1e-3) / 1e12;
    
    BenchmarkResult result;
    result.time_ms = ms;
    result.tflops = tflops;
    result.percentage_of_cublas = 100.0f;
    
    CHECK_CUDA(cudaEventDestroy(start));
    CHECK_CUDA(cudaEventDestroy(stop));
    
    return result;
}

// =============================================================================
// Print Utilities
// =============================================================================

void print_benchmark_header() {
    printf("\n");
    printf("================================================================================\n");
    printf("%-40s %10s %12s %15s\n", "Kernel", "Time(ms)", "TFLOPS", "vs cuBLAS(%)");
    printf("--------------------------------------------------------------------------------\n");
}

void print_benchmark_result(const char* name, const BenchmarkResult& result) {
    printf("%-40s %10.6f %12.3f %15.2f\n", 
           name, result.time_ms, result.tflops, result.percentage_of_cublas);
}

void print_benchmark_footer() {
    printf("================================================================================\n");
}
