#include <chrono>
#include <hip/hip_runtime.h>
#include <hip/hiprtc.h>
#include <iostream>
#include <vector>
#include <unordered_map>
#include <mutex>
#include <cstdlib>
#include <atomic>
#include <cmath>
#include <algorithm>
#include <random>

#define HIPRTC_CHECK(cmd) \
if (cmd != HIPRTC_SUCCESS) { \
    std::cerr << "HIPRTC error: " << hiprtcGetErrorString(cmd) \
              << " at " << __FILE__ << ":" << __LINE__ << std::endl; \
    abort(); \
}

#define HIP_CHECK(cmd) \
if (cmd != hipSuccess) { \
    std::cerr << "HIP error: " << hipGetErrorString(cmd) \
              << " at " << __FILE__ << ":" << __LINE__ << std::endl; \
    abort(); \
}

// 核函数定义
constexpr auto small_kernel = R"(
extern "C" __global__ void emptyKernel() {}
)";

constexpr auto medium_kernel = R"(
extern "C" __global__ void vectorAdd(float a, float* x, float* y, float* out, size_t n) {
    size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
    if (tid < n) out[tid] = a * x[tid] + y[tid];
}
)";

constexpr auto large_kernel = R"(
extern "C" __global__ void convolve3D(float *input, float *output, float *kernel,
                           int depth, int height, int width, int kDepth,
                           int kHeight, int kWidth) {
  int x = blockIdx.x * blockDim.x + threadIdx.x;
  int y = blockIdx.y * blockDim.y + threadIdx.y;
  int z = blockIdx.z * blockDim.z + threadIdx.z;

  if (x >= width || y >= height || z >= depth) return;

  float value = 0;
  for (int i = -kDepth/2; i <= kDepth/2; i++) {
    for (int j = -kHeight/2; j <= kHeight/2; j++) {
      for (int k = -kWidth/2; k <= kWidth/2; k++) {
        int dz = z + i, dy = y + j, dx = x + k;
        if (dz >= 0 && dz < depth && dy >= 0 && dy < height && dx >= 0 && dx < width) {
          int input_idx = dz*(height*width) + dy*width + dx;
          int kernel_idx = (i+kDepth/2)*(kHeight*kWidth) + (j+kHeight/2)*kWidth + (k+kWidth/2);
          value += input[input_idx] * kernel[kernel_idx];
        }
      }
    }
  }
  output[z*(height*width) + y*width + x] = value;
}
)";

// 编译缓存
struct KernelCache {
    hipFunction_t function;
    hipModule_t module;
};

std::unordered_map<int, KernelCache> kernel_cache;
std::mutex cache_mutex;
std::atomic<bool> resource_cleaned{false};

// 获取编译选项
std::vector<const char*> get_compile_options() {
    hipDeviceProp_t props;
    HIP_CHECK(hipGetDeviceProperties(&props, 0));

    std::vector<const char*> opts;
#ifdef __HIP_PLATFORM_AMD__
    static std::string arch = "--gpu-architecture=" + std::string(props.gcnArchName);
    opts.push_back(arch.c_str());
#else
    opts.push_back("--fmad=false");
#endif
    return opts;
}

// 安全释放资源
void cleanup_resources() {
    if (resource_cleaned.exchange(true)) return;

    std::lock_guard<std::mutex> lock(cache_mutex);
    for (auto& [type, cache] : kernel_cache) {
        if (cache.module) {
            hipModuleUnload(cache.module);
            cache.module = nullptr;
        }
    }
    kernel_cache.clear();
}

// 编译并缓存核函数
KernelCache compile_kernel(int kernel_type, bool use_cache) {
    const char* kernel_code = nullptr;
    const char* kernel_name = nullptr;

    switch(kernel_type) {
        case 0:
            kernel_code = small_kernel;
            kernel_name = "emptyKernel";
            break;
        case 1:
            kernel_code = medium_kernel;
            kernel_name = "vectorAdd";
            break;
        case 2:
            kernel_code = large_kernel;
            kernel_name = "convolve3D";
            break;
        default:
            std::cerr << "Invalid kernel type" << std::endl;
            abort();
    }

    // 检查缓存
    if (use_cache) {
        std::lock_guard<std::mutex> lock(cache_mutex);
        auto it = kernel_cache.find(kernel_type);
        if (it != kernel_cache.end()) {
            return it->second;
        }
    }

    // 编译新核函数
    hiprtcProgram prog = nullptr;
    HIPRTC_CHECK(hiprtcCreateProgram(&prog, kernel_code, nullptr, 0, nullptr, nullptr));

    auto options = get_compile_options();
    auto start_compile = std::chrono::high_resolution_clock::now();
    hiprtcResult result = hiprtcCompileProgram(prog, options.size(), options.data());
    auto end_compile = std::chrono::high_resolution_clock::now();

    // 处理编译结果
    if (result != HIPRTC_SUCCESS) {
        size_t log_size;
        HIPRTC_CHECK(hiprtcGetProgramLogSize(prog, &log_size));
        std::vector<char> log(log_size);
        HIPRTC_CHECK(hiprtcGetProgramLog(prog, log.data()));
        std::cerr << "Compilation failed:\n" << log.data() << std::endl;
        HIPRTC_CHECK(hiprtcDestroyProgram(&prog));
        abort();
    }

    // 获取编译时间
    double compile_time = std::chrono::duration_cast<std::chrono::microseconds>(
        end_compile - start_compile).count() / 1000.0;
    std::cout << "Kernel " << kernel_type << " compiled in "
              << compile_time << " ms" << std::endl;

    // 获取编译后的代码
    size_t code_size;
    HIPRTC_CHECK(hiprtcGetCodeSize(prog, &code_size));
    std::vector<char> code(code_size);
    HIPRTC_CHECK(hiprtcGetCode(prog, code.data()));

    // 加载模块
    KernelCache cache;
    HIP_CHECK(hipModuleLoadData(&cache.module, code.data()));
    HIP_CHECK(hipModuleGetFunction(&cache.function, cache.module, kernel_name));

    HIPRTC_CHECK(hiprtcDestroyProgram(&prog));

    // 更新缓存
    if (use_cache) {
        std::lock_guard<std::mutex> lock(cache_mutex);
        kernel_cache[kernel_type] = cache;
    }

    return cache;
}

// 验证向量加法结果
bool verify_vector_add(const std::vector<float>& out, float a,
                       const std::vector<float>& x,
                       const std::vector<float>& y,
                       float tolerance = 1e-6f) {
    if (out.size() != x.size() || out.size() != y.size()) {
        std::cerr << "Error: Vector sizes don't match for verification" << std::endl;
        return false;
    }

    for (size_t i = 0; i < out.size(); ++i) {
        float expected = a * x[i] + y[i];
        if (std::fabs(out[i] - expected) > tolerance) {
            std::cerr << "Verification failed at index " << i
                      << ": got " << out[i] << ", expected " << expected
                      << " (delta = " << std::fabs(out[i] - expected) << ")"
                      << std::endl;
            return false;
        }
    }
    return true;
}

// 验证卷积结果
bool verify_convolution(const std::vector<float>& output,
                        const std::vector<float>& input,
                        const std::vector<float>& kernel,
                        int depth, int height, int width,
                        int kDepth, int kHeight, int kWidth,
                        float tolerance = 1e-6f) {
    // 简单验证边界值
    if (output.size() != static_cast<size_t>(depth * height * width)) {
        std::cerr << "Output size mismatch" << std::endl;
        return false;
    }

    // 验证中心点
    int center_z = depth / 2;
    int center_y = height / 2;
    int center_x = width / 2;

    float expected = 0.0f;
    for (int i = -kDepth/2; i <= kDepth/2; ++i) {
        for (int j = -kHeight/2; j <= kHeight/2; ++j) {
            for (int k = -kWidth/2; k <= kWidth/2; ++k) {
                int dz = center_z + i;
                int dy = center_y + j;
                int dx = center_x + k;

                if (dz >= 0 && dz < depth && dy >= 0 && dy < height && dx >= 0 && dx < width) {
                    int input_idx = dz * (height * width) + dy * width + dx;
                    int kernel_idx = (i + kDepth/2) * (kHeight * kWidth) +
                                    (j + kHeight/2) * kWidth +
                                    (k + kWidth/2);
                    expected += input[input_idx] * kernel[kernel_idx];
                }
            }
        }
    }

    int output_idx = center_z * (height * width) + center_y * width + center_x;
    float actual = output[output_idx];

    if (std::fabs(actual - expected) > tolerance) {
        std::cerr << "Convolution verification failed at center point: "
                  << "got " << actual << ", expected " << expected
                  << " (delta = " << std::fabs(actual - expected) << ")"
                  << std::endl;
        return false;
    }

    return true;
}

// 执行核函数
void execute_kernel(int kernel_type, const KernelCache& cache) {
    std::random_device rd;
    std::mt19937 gen(rd());
    std::uniform_real_distribution<float> dist(0.0f, 1.0f);

    switch(kernel_type) {
        case 0: {  // 小型空核函数
            std::cout << "Executing empty kernel..." << std::endl;

            hipEvent_t start, stop;
            HIP_CHECK(hipEventCreate(&start));
            HIP_CHECK(hipEventCreate(&stop));

            // 执行多次以获得更稳定的时间测量
            const int iterations = 1;
            HIP_CHECK(hipEventRecord(start));
            for (int i = 0; i < iterations; ++i) {
                HIP_CHECK(hipModuleLaunchKernel(cache.function,
                    128, 1, 1,  // grid
                    256, 1, 1,  // block
                    0, nullptr, nullptr, nullptr));
            }
            HIP_CHECK(hipEventRecord(stop));
            HIP_CHECK(hipEventSynchronize(stop));

            float total_ms;
            HIP_CHECK(hipEventElapsedTime(&total_ms, start, stop));
            float avg_ms = total_ms / iterations;

            std::cout << "Empty kernel executed " << iterations << " times" << std::endl;
            std::cout << "Total time: " << total_ms << " ms" << std::endl;
            std::cout << "Average time: " << avg_ms << " ms per iteration" << std::endl;

            HIP_CHECK(hipEventDestroy(start));
            HIP_CHECK(hipEventDestroy(stop));
            break;
        }
        case 1: {  // 中型向量加法
            constexpr size_t n = 128 * 1024;  // 数据大小
            std::cout << "Executing vectorAdd kernel with " << n << " elements..." << std::endl;

            // 准备主机数据
            std::vector<float> hA(n), hB(n), hC(n);
            std::generate(hA.begin(), hA.end(), [&]() { return dist(gen); });
            std::generate(hB.begin(), hB.end(), [&]() { return dist(gen); });
            float a = 2.5f;

            // 准备设备内存
            float *dA = nullptr, *dB = nullptr, *dC = nullptr;
            HIP_CHECK(hipMalloc(&dA, n * sizeof(float)));
            HIP_CHECK(hipMalloc(&dB, n * sizeof(float)));
            HIP_CHECK(hipMalloc(&dC, n * sizeof(float)));

            // 复制数据到设备
            HIP_CHECK(hipMemcpy(dA, hA.data(), n * sizeof(float), hipMemcpyHostToDevice));
            HIP_CHECK(hipMemcpy(dB, hB.data(), n * sizeof(float), hipMemcpyHostToDevice));

            // 启动参数
            struct Args {
                float a;
                float* x;
                float* y;
                float* out;
                size_t n;
            } args{a, dA, dB, dC, n};

            size_t arg_size = sizeof(args);
            void* config[] = {
                HIP_LAUNCH_PARAM_BUFFER_POINTER, &args,
                HIP_LAUNCH_PARAM_BUFFER_SIZE, &arg_size,
                HIP_LAUNCH_PARAM_END
            };

            // 执行核函数
            hipEvent_t start, stop;
            HIP_CHECK(hipEventCreate(&start));
            HIP_CHECK(hipEventCreate(&stop));

            HIP_CHECK(hipEventRecord(start));
            HIP_CHECK(hipModuleLaunchKernel(cache.function,
                (n + 255) / 256, 1, 1,  // grid
                256, 1, 1,               // block
                0, nullptr, nullptr, config));
            HIP_CHECK(hipEventRecord(stop));
            HIP_CHECK(hipEventSynchronize(stop));

            float ms;
            HIP_CHECK(hipEventElapsedTime(&ms, start, stop));

            // 复制结果回主机
            HIP_CHECK(hipMemcpy(hC.data(), dC, n * sizeof(float), hipMemcpyDeviceToHost));

            // 验证结果
            bool success = verify_vector_add(hC, a, hA, hB);
            if (success) {
                std::cout << "VectorAdd result verification SUCCESS" << std::endl;
            } else {
                std::cerr << "VectorAdd result verification FAILED" << std::endl;
            }

            std::cout << "VectorAdd executed in " << ms << " ms" << std::endl;

            // 清理
            HIP_CHECK(hipFree(dA));
            HIP_CHECK(hipFree(dB));
            HIP_CHECK(hipFree(dC));
            HIP_CHECK(hipEventDestroy(start));
            HIP_CHECK(hipEventDestroy(stop));
            break;
        }
        case 2: {  // 大型卷积核函数
            // 使用小尺寸数据以合理时间内完成验证
            constexpr int depth = 8, height = 8, width = 8;
            constexpr int kDepth = 3, kHeight = 3, kWidth = 3;

            std::cout << "Executing 3D convolution with dimensions: "
                      << depth << "x" << height << "x" << width
                      << " and kernel size: "
                      << kDepth << "x" << kHeight << "x" << kWidth << std::endl;

            // 准备主机数据
            const size_t input_size = depth * height * width;
            const size_t kernel_size = kDepth * kHeight * kWidth;
            const size_t output_size = depth * height * width;

            std::vector<float> hInput(input_size);
            std::vector<float> hKernel(kernel_size);
            std::vector<float> hOutput(output_size, 0.0f);

            // 生成随机输入数据和固定模式的卷积核
            std::generate(hInput.begin(), hInput.end(), [&]() { return dist(gen); });

            // 创建高斯卷积核
            const float sigma = 1.0f;
            const float two_sigma_sq = 2.0f * sigma * sigma;
            float sum = 0.0f;

            for (int i = 0; i < kDepth; ++i) {
                for (int j = 0; j < kHeight; ++j) {
                    for (int k = 0; k < kWidth; ++k) {
                        float di = i - kDepth/2;
                        float dj = j - kHeight/2;
                        float dk = k - kWidth/2;
                        float value = expf(-(di*di + dj*dj + dk*dk) / two_sigma_sq);
                        hKernel[i * kHeight * kWidth + j * kWidth + k] = value;
                        sum += value;
                    }
                }
            }

            // 归一化卷积核
            for (auto& val : hKernel) {
                val /= sum;
            }

            // 准备设备内存
            float *dInput = nullptr, *dOutput = nullptr, *dKernel = nullptr;
            HIP_CHECK(hipMalloc(&dInput, input_size * sizeof(float)));
            HIP_CHECK(hipMalloc(&dOutput, output_size * sizeof(float)));
            HIP_CHECK(hipMalloc(&dKernel, kernel_size * sizeof(float)));

            // 复制数据到设备
            HIP_CHECK(hipMemcpy(dInput, hInput.data(), input_size * sizeof(float), hipMemcpyHostToDevice));
            HIP_CHECK(hipMemcpy(dKernel, hKernel.data(), kernel_size * sizeof(float), hipMemcpyHostToDevice));

            // 启动参数
            struct Args {
                float* input;
                float* output;
                float* kernel;
                int depth;
                int height;
                int width;
                int kDepth;
                int kHeight;
                int kWidth;
            } args{dInput, dOutput, dKernel, depth, height, width, kDepth, kHeight, kWidth};

            size_t arg_size = sizeof(args);
            void* config[] = {
                HIP_LAUNCH_PARAM_BUFFER_POINTER, &args,
                HIP_LAUNCH_PARAM_BUFFER_SIZE, &arg_size,
                HIP_LAUNCH_PARAM_END
            };

            // 执行核函数
            hipEvent_t start, stop;
            HIP_CHECK(hipEventCreate(&start));
            HIP_CHECK(hipEventCreate(&stop));

            constexpr int block_x = 4, block_y = 4, block_z = 4;
            dim3 grid((width + block_x - 1) / block_x,
                      (height + block_y - 1) / block_y,
                      (depth + block_z - 1) / block_z);
            dim3 block(block_x, block_y, block_z);

            HIP_CHECK(hipEventRecord(start));
            HIP_CHECK(hipModuleLaunchKernel(cache.function,
                grid.x, grid.y, grid.z,  // grid
                block.x, block.y, block.z,  // block
                0, nullptr, nullptr, config));
            HIP_CHECK(hipEventRecord(stop));
            HIP_CHECK(hipEventSynchronize(stop));

            float ms;
            HIP_CHECK(hipEventElapsedTime(&ms, start, stop));

            // 复制结果回主机
            HIP_CHECK(hipMemcpy(hOutput.data(), dOutput, output_size * sizeof(float), hipMemcpyDeviceToHost));

            // 验证结果
            bool success = verify_convolution(hOutput, hInput, hKernel,
                                             depth, height, width,
                                             kDepth, kHeight, kWidth);
            if (success) {
                std::cout << "Convolution result verification SUCCESS" << std::endl;
            } else {
                std::cerr << "Convolution result verification FAILED" << std::endl;
            }

            std::cout << "Convolution executed in " << ms << " ms" << std::endl;

            // 清理
            HIP_CHECK(hipFree(dInput));
            HIP_CHECK(hipFree(dOutput));
            HIP_CHECK(hipFree(dKernel));
            HIP_CHECK(hipEventDestroy(start));
            HIP_CHECK(hipEventDestroy(stop));
            break;
        }
    }
}

int main(int argc, char** argv) {
    // 注册清理函数
    std::atexit(cleanup_resources);

    if (argc < 3) {
        std::cerr << "Usage: " << argv[0]
                  << " <kernel_type:0-small|1-medium|2-large> <num_compiles> [use_cache:0|1=1]"
                  << std::endl;
        return 1;
    }

    int kernel_type = std::atoi(argv[1]);
    int num_compiles = std::atoi(argv[2]);
    bool use_cache = (argc > 3) ? std::atoi(argv[3]) != 0 : true;

    std::cout << "Testing kernel type: " << kernel_type
              << ", Compilations: " << num_compiles
              << ", Cache: " << (use_cache ? "enabled" : "disabled")
              << std::endl;

    // 主测试循环
    for (int i = 0; i < num_compiles; ++i) {
        std::cout << "\nCompilation #" << (i+1) << ":" << std::endl;
        auto cache = compile_kernel(kernel_type, use_cache);

        execute_kernel(kernel_type, cache);

        // 清理非缓存模块
        if (!use_cache) {
            HIP_CHECK(hipModuleUnload(cache.module));
        }
    }

    // 确保资源清理
    cleanup_resources();
    
    return 0;
}