#ifndef PROFILE_UTILS_CUH
#define PROFILE_UTILS_CUH

// 本文件存放cuda profile相关的工具函数

#include <cassert>
#include <cmath>
#include <functional>
#include <iostream>
#include <random>

#include <cublas_v2.h> // 调用cublas库，对比手写kernel和他的性能。
#include <cuda_runtime.h>

#include "cuda_gemm_utils.cuh"

// 打印设备信息
void print_device_info()
{
    int device_id{0};
    cudaGetDevice(&device_id);
    cudaDeviceProp device_prop;
    cudaGetDeviceProperties(&device_prop, device_id);
    std::cout << "Device Name: " << device_prop.name << std::endl;
    float const memory_size{static_cast<float>(device_prop.totalGlobalMem) /
                            (1 << 30)};
    std::cout << "Memory Size: " << memory_size << " GB" << std::endl;
    float const peak_bandwidth{
        static_cast<float>(2.0f * device_prop.memoryClockRate *
                           (device_prop.memoryBusWidth / 8) / 1.0e6)};
    std::cout << "Peak Bandwitdh: " << peak_bandwidth << " GB/s" << std::endl;
    std::cout << std::endl;
}

float compute_effective_bandwidth(size_t m, size_t n, size_t k, float latency)
{
    return ((m * k + k * n + m * n) * sizeof(float)) / (latency * 1e-3) / 1e9;
}

float compute_effective_tflops(size_t m, size_t n, size_t k, float latency)
{
    return (2.0 * m * k * n) / (latency * 1e-3) / 1e12;
}

// 模板函数，用于测量性能
float measure_performance(std::function<void(cudaStream_t)> bound_function,
                          cudaStream_t stream, size_t num_repeats = 100,
                          size_t num_warmups = 100)
{
    cudaEvent_t start, stop;
    float time;

    CHECK_CUDA_ERROR(cudaEventCreate(&start));
    CHECK_CUDA_ERROR(cudaEventCreate(&stop));

    for (size_t i{0}; i < num_warmups; ++i)
    {
        bound_function(stream);
    }

    CHECK_CUDA_ERROR(cudaStreamSynchronize(stream));

    CHECK_CUDA_ERROR(cudaEventRecord(start, stream));
    for (size_t i{0}; i < num_repeats; ++i)
    {
        bound_function(stream);
    }
    CHECK_CUDA_ERROR(cudaEventRecord(stop, stream));
    CHECK_CUDA_ERROR(cudaEventSynchronize(stop));
    CHECK_LAST_CUDA_ERROR();
    CHECK_CUDA_ERROR(cudaEventElapsedTime(&time, start, stop));
    CHECK_CUDA_ERROR(cudaEventDestroy(start));
    CHECK_CUDA_ERROR(cudaEventDestroy(stop));

    float const latency{time / num_repeats};

    return latency;
}

#define CHECK_CUBLASS_ERROR(val) check_cublass((val), #val, __FILE__, __LINE__)
void check_cublass(cublasStatus_t err, const char* const func,
                   const char* const file, const int line)
{
    if (err != CUBLAS_STATUS_SUCCESS)
    {
        std::cerr << "cuBLAS Error at: " << file << ":" << line << std::endl;
        std::cerr << cublasGetStatusString(err) << std::endl;
        std::exit(EXIT_FAILURE);
    }
}

void launch_gemm_cublas(size_t m, size_t n, size_t k, float* A, float* B, float* C, float alpha, float beta, cudaStream_t stream)
{
    cublasHandle_t handle;
    CHECK_CUBLASS_ERROR(cublasCreate(&handle));
    CHECK_CUBLASS_ERROR(cublasSetStream(handle, stream));
    CHECK_CUBLASS_ERROR(cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, A, m, B, k, &beta, C, m));
    CHECK_CUBLASS_ERROR(cublasDestroy(handle));
}

// 验证两个矩阵是否相等
bool all_close(float* C, float* C_ref, size_t m, size_t n, float abs_tol, double rel_tol)
{
    bool status{true};
    for (size_t i{0U}; i < m; ++i)
    {
        for (size_t j{0U}; j < n; ++j)
        {
            double const C_val{static_cast<double>(C[i * n + j])};
            double const C_ref_val{static_cast<double>(C_ref[i * n + j])};
            double const diff{C_val - C_ref_val};
            double const diff_val{std::abs(diff)};
            if (diff_val >
                std::max(static_cast<double>(abs_tol),
                         static_cast<double>(std::abs(C_ref_val)) * rel_tol))
            {
                std::cout << "C[" << i << ", " << j << "] = " << C_val
                          << " C_ref[" << i << ", " << j << "] = " << C_ref_val
                          << " Abs Diff: " << diff_val
                          << " Abs Diff Threshold: "
                          << static_cast<double>(abs_tol)
                          << " Rel->Abs Diff Threshold: "
                          << static_cast<double>(
                                 static_cast<double>(std::abs(C_ref_val)) *
                                 rel_tol)
                          << std::endl;
                status = false;
                return status;
            }
        }
    }
    return status;
}

void random_initialize_matrix(float* A, size_t m, size_t n, unsigned int seed = 0U)
{
    std::default_random_engine eng(seed);
    // The best way to verify is to use integer values.
    std::uniform_int_distribution<int> dis(0, 5);
    // std::uniform_real_distribution<float> dis(-1.0f, 1.0f);
    auto const rand = [&dis, &eng]() { return dis(eng); }; // 每次调用都会返回一个随机数
    for (size_t i{0U}; i < m; ++i)
    {
        for (size_t j{0U}; j < n; ++j)
        {
            A[i * n + j] = static_cast<float>(rand());
        }
    }
}

void print_performance_result(size_t m, size_t n, size_t k, float latency)
{
    float const effective_bandwidth = compute_effective_bandwidth(m, n, k, latency);
    float const effective_tflops{compute_effective_tflops(m, n, k, latency)};

    std::cout << "Latency: " << latency << " ms" << std::endl;
    std::cout << "Effective Bandwidth: " << effective_bandwidth << " GB/s"
              << std::endl;
    std::cout << "Effective TFLOPS: " << effective_tflops << " TFLOPS"
              << std::endl;
}

std::pair<float, float> profile_gemm(size_t m, size_t n, size_t k,
    std::function<void(size_t, size_t, size_t, float*, float*, float*, float, float, cudaStream_t)> gemm_kernel, 
    float abs_tol, double rel_tol, size_t num_repeats = 10, size_t num_warmups = 10, unsigned int seed = 0U)
{
    float const alpha{1.0f};
    float const beta{0.0f};

    // Create CUDA stream.
    cudaStream_t stream;
    CHECK_CUDA_ERROR(cudaStreamCreate(&stream));

    float* A_host{nullptr};
    float* B_host{nullptr};
    float* C_host{nullptr};
    float* C_host_ref{nullptr}; // C 参考值
    float* C_host_device{nullptr};

    CHECK_CUDA_ERROR(cudaMallocHost(&A_host, m * k * sizeof(float)));
    CHECK_CUDA_ERROR(cudaMallocHost(&B_host, k * n * sizeof(float)));
    CHECK_CUDA_ERROR(cudaMallocHost(&C_host, m * n * sizeof(float)));
    CHECK_CUDA_ERROR(cudaMallocHost(&C_host_ref, m * n * sizeof(float)));
    CHECK_CUDA_ERROR(cudaMallocHost(&C_host_device, m * n * sizeof(float)));

    // initialize matrix A, B and C
    random_initialize_matrix(A_host, m, k, seed);
    random_initialize_matrix(B_host, k, n, seed);
    random_initialize_matrix(C_host, m, n, seed);

    // allocate  memory on device
    float* A_device{nullptr};
    float* B_device{nullptr};
    float* C_device{nullptr};

    CHECK_CUDA_ERROR(cudaMalloc(&A_device, m * k * sizeof(float)));
    CHECK_CUDA_ERROR(cudaMalloc(&B_device, k * n * sizeof(float)));
    CHECK_CUDA_ERROR(cudaMalloc(&C_device, m * n * sizeof(float)));

    // copy data from host to device
    CHECK_CUDA_ERROR(cudaMemcpyAsync(A_device, A_host, m * k * sizeof(float),
                                     cudaMemcpyHostToDevice, stream));

    CHECK_CUDA_ERROR(cudaMemcpyAsync(B_device, B_host, k * n * sizeof(float),
                                        cudaMemcpyHostToDevice, stream));
    
    CHECK_CUDA_ERROR(cudaMemcpyAsync(C_device, C_host, m * n * sizeof(float),
                                        cudaMemcpyHostToDevice, stream));

    // create cuBLAS handle
    cublasHandle_t handle;
    CHECK_CUBLASS_ERROR(cublasCreate(&handle));
    CHECK_CUBLASS_ERROR(cublasSetStream(handle, stream));

    // compute reference value output by cuBLAS
    launch_gemm_cublas(m, n, k, A_device, B_device, C_device, alpha, beta, stream);
    CHECK_CUDA_ERROR(cudaStreamSynchronize(stream)); // 计算完成后，进行GPU线程同步

    // copy matrix C from device to host
    CHECK_CUDA_ERROR(cudaMemcpyAsync(C_host_ref, C_device, m * n * sizeof(float),
                                        cudaMemcpyDeviceToHost, stream));

    // Launch CUDA GEMM
    CHECK_CUDA_ERROR(cudaStreamSynchronize(stream)); // GPU线程同步
    
    // 再次将内存拷贝到device，用于还原C_device
    CHECK_CUDA_ERROR(cudaMemcpy(C_device, C_host, m * n * sizeof(float),
                                cudaMemcpyHostToDevice));

    // 调用手写kernel
    gemm_kernel(m, n, k, A_device, B_device, C_device, alpha, beta, stream);

    CHECK_CUDA_ERROR(cudaStreamSynchronize(stream)); // GPU线程同步
    CHECK_CUDA_ERROR(cudaMemcpy(C_host_device, C_device, m * n * sizeof(float),
                                cudaMemcpyDeviceToHost));

    // 验证两个矩阵是否相等
    assert(all_close(C_host_device, C_host_ref, m, n, abs_tol, rel_tol) && "result check failed!");

    // 计算耗时
    float const latency_cublas{
        measure_performance([&](cudaStream_t stream) {
            launch_gemm_cublas(m, n, k, A_device, B_device, C_device, alpha, beta, stream); // 套一层接口，将gemm参数封装在里面，对外的接口只有输入是stream，输出是latency
        },
        stream, num_repeats, num_warmups)};
        
    float const latency_gemm{
        measure_performance([&](cudaStream_t stream) {
            gemm_kernel(m, n, k, A_device, B_device, C_device, alpha, beta, stream);
        },
        stream, num_repeats, num_warmups)};

    // Release resources
    CHECK_CUDA_ERROR(cudaFreeHost(A_host));
    CHECK_CUDA_ERROR(cudaFreeHost(B_host));
    CHECK_CUDA_ERROR(cudaFreeHost(C_host));
    CHECK_CUDA_ERROR(cudaFreeHost(C_host_ref));
    CHECK_CUDA_ERROR(cudaFreeHost(C_host_device));

    CHECK_CUDA_ERROR(cudaFree(A_device));
    CHECK_CUDA_ERROR(cudaFree(B_device));
    CHECK_CUDA_ERROR(cudaFree(C_device));

    CHECK_CUBLASS_ERROR(cublasDestroy(handle));
    CHECK_CUDA_ERROR(cudaStreamDestroy(stream));

    std::cout << "cuBLAS GEMM Kernel Performance" << std::endl;
    print_performance_result(m, n, k, latency_cublas);
    std::cout << "Custom GEMM Kernel Performance" << std::endl;
    print_performance_result(m, n, k, latency_gemm);
    std::cout << "Custom GEMM VS cuBLAS GEMM Performance: "
              << latency_cublas / latency_gemm * 100.0f << "%"
              << std::endl;

    return std::pair<float, float>{latency_cublas, latency_gemm};
}

#endif // PROFILE_UTILS_CUH
