#include <gtest/gtest.h>
#include <spdlog/spdlog.h>
#include <torch/torch.h>

#include <limits>

#include "hbm_abs_bw.h"

/**
 * 测试hbm_abs_bw内核的正确性
 *
 * @tparam VectorSize - 向量大小 (1, 2, 或 4)
 * @tparam Unroll - 展开因子 (1, 2, 或 4)
 * @param n - 元素数量
 * @param epsilon - 允许的误差
 * @return 测试是否通过
 */
template <int VectorSize, int Unroll, bool HandleTail = false>
bool TestHbmAbsBw(size_t n, float epsilon = 1e-6) {
  spdlog::info("Testing hbm_abs_bw with VectorSize={}, Unroll={}, n={}",
               VectorSize, Unroll, n);

  // 创建输入张量，包含正数和负数
  torch::Tensor input_tensor = torch::randn(
      {static_cast<int64_t>(n)}, torch::TensorOptions().dtype(torch::kFloat32));

  // 计算PyTorch的绝对值作为参考结果
  torch::Tensor golden_output = torch::abs(input_tensor);

  // 创建输出张量
  torch::Tensor output_tensor = torch::zeros_like(input_tensor);

  // 将数据移动到GPU
  auto input_cuda = input_tensor.cuda();
  auto output_cuda = output_tensor.cuda();

  // 创建CUDA流
  cudaStream_t stream;
  cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking);

  // 调用我们的绝对值内核
  hbm::pointwise::launch_abs_kernel<VectorSize, Unroll, HandleTail>(
      static_cast<float*>(output_cuda.data_ptr()),
      static_cast<const float*>(input_cuda.data_ptr()), n, stream);

  // 同步CUDA流
  cudaStreamSynchronize(stream);
  cudaStreamDestroy(stream);

  // 将结果移回CPU进行比较
  auto output_cpu = output_cuda.cpu();

  // 计算最大误差
  torch::Tensor diff = torch::abs(output_cpu - golden_output);
  float max_diff = diff.max().item<float>();

  // 检查结果是否在误差范围内
  bool passed = max_diff <= epsilon;

  if (passed) {
    spdlog::info("Test passed! Maximum difference: {}", max_diff);
  } else {
    spdlog::error("Test failed! Maximum difference: {}", max_diff);

    // 找出误差最大的位置
    auto max_index = torch::argmax(diff).item<int64_t>();
    spdlog::error("Maximum error at index {}: expected {}, got {}", max_index,
                  golden_output[max_index].item<float>(),
                  output_cpu[max_index].item<float>());
  }

  return passed;
}

// 测试不同大小的输入
TEST(test_hbm_abs_bw, small_input) {
  // 测试小输入
  EXPECT_TRUE((TestHbmAbsBw<1, 1, true>(100)));
  EXPECT_TRUE((TestHbmAbsBw<2, 1, true>(100)));
  EXPECT_TRUE((TestHbmAbsBw<4, 1, true>(100)));
}

TEST(test_hbm_abs_bw, medium_input) {
  // 测试中等大小输入
  EXPECT_TRUE((TestHbmAbsBw<1, 2, true>(10000)));
  EXPECT_TRUE((TestHbmAbsBw<2, 2, true>(10000)));
  EXPECT_TRUE((TestHbmAbsBw<4, 2, true>(10000)));
}

TEST(test_hbm_abs_bw, large_input) {
  // 测试大输入
  EXPECT_TRUE((TestHbmAbsBw<1, 4, true>(1000000)));
  EXPECT_TRUE((TestHbmAbsBw<2, 4, true>(1000000)));
  EXPECT_TRUE((TestHbmAbsBw<4, 4, true>(1000000)));
}

// 测试非对齐大小的输入
TEST(test_hbm_abs_bw, unaligned_input) {
  // 测试非向量大小对齐的输入
  EXPECT_TRUE((TestHbmAbsBw<2, 1, true>(101)));  // 非2的倍数
  EXPECT_TRUE((TestHbmAbsBw<4, 1, true>(103)));  // 非4的倍数

  // 测试非展开因子对齐的输入
  EXPECT_TRUE((TestHbmAbsBw<1, 4, true>(1025)));  // 非网格大小*4的倍数
}

// 测试特殊情况
TEST(test_hbm_abs_bw, special_cases) {
  // 创建一个包含特殊值的测试
  torch::Tensor special_input = torch::zeros({10}, torch::kFloat32);
  special_input[0] = 0.0f;                                     // 零
  special_input[1] = -0.0f;                                    // 负零
  special_input[2] = std::numeric_limits<float>::max();        // 最大值
  special_input[3] = -std::numeric_limits<float>::max();       // 最小值
  special_input[4] = std::numeric_limits<float>::infinity();   // 正无穷
  special_input[5] = -std::numeric_limits<float>::infinity();  // 负无穷
  special_input[6] = std::numeric_limits<float>::quiet_NaN();  // NaN
  special_input[7] = 1.0f;                                     // 正数
  special_input[8] = -1.0f;                                    // 负数
  special_input[9] = std::numeric_limits<float>::min();        // 最小正规化数

  // 计算PyTorch的绝对值作为参考结果
  torch::Tensor golden_output = torch::abs(special_input);

  // 创建输出张量
  torch::Tensor output_tensor = torch::zeros_like(special_input);

  // 将数据移动到GPU
  auto input_cuda = special_input.cuda();
  auto output_cuda = output_tensor.cuda();

  // 创建CUDA流
  cudaStream_t stream;
  cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking);

  // 调用我们的绝对值内核
  hbm::pointwise::launch_abs_kernel<1, 1, true>(
      static_cast<float*>(output_cuda.data_ptr()),
      static_cast<const float*>(input_cuda.data_ptr()), 10, stream);

  // 同步CUDA流
  cudaStreamSynchronize(stream);
  cudaStreamDestroy(stream);

  // 将结果移回CPU进行比较
  auto output_cpu = output_cuda.cpu();

  // 对于NaN值，我们需要特殊处理
  for (int i = 0; i < 10; i++) {
    if (i == 6) {  // NaN位置
      // 对于NaN，我们只检查结果是否也是NaN
      EXPECT_TRUE(std::isnan(output_cpu[i].item<float>()));
    } else {
      // 对于其他值，我们检查结果是否与PyTorch的结果匹配
      EXPECT_FLOAT_EQ(output_cpu[i].item<float>(),
                      golden_output[i].item<float>());
    }
  }
}

// 性能测试
TEST(test_hbm_abs_bw, performance) {
  const size_t n = 10000000;  // 1千万个元素

  // 创建输入张量
  torch::Tensor input_tensor = torch::randn(
      {static_cast<int64_t>(n)}, torch::TensorOptions().dtype(torch::kFloat32));

  // 创建输出张量
  torch::Tensor output_tensor = torch::zeros_like(input_tensor);

  // 将数据移动到GPU
  auto input_cuda = input_tensor.cuda();
  auto output_cuda = output_tensor.cuda();

  // 创建CUDA流
  cudaStream_t stream;
  cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking);

  // 创建CUDA事件来测量时间
  cudaEvent_t start, stop;
  cudaEventCreate(&start);
  cudaEventCreate(&stop);

  // 测试不同配置的性能
  std::vector<std::pair<int, int>> configs = {
      {1, 1}, {1, 2}, {1, 4}, {2, 1}, {2, 2}, {2, 4}, {4, 1}, {4, 2}, {4, 4}};

  for (const auto& config : configs) {
    int vector_size = config.first;
    int unroll = config.second;

    // 预热
    for (int i = 0; i < 5; i++) {
      if (vector_size == 1 && unroll == 1) {
        hbm::pointwise::launch_abs_kernel<1, 1, true>(
            static_cast<float*>(output_cuda.data_ptr()),
            static_cast<const float*>(input_cuda.data_ptr()), n, stream);
      } else if (vector_size == 1 && unroll == 2) {
        hbm::pointwise::launch_abs_kernel<1, 2, true>(
            static_cast<float*>(output_cuda.data_ptr()),
            static_cast<const float*>(input_cuda.data_ptr()), n, stream);
      } else if (vector_size == 1 && unroll == 4) {
        hbm::pointwise::launch_abs_kernel<1, 4, true>(
            static_cast<float*>(output_cuda.data_ptr()),
            static_cast<const float*>(input_cuda.data_ptr()), n, stream);
      } else if (vector_size == 2 && unroll == 1) {
        hbm::pointwise::launch_abs_kernel<2, 1, true>(
            static_cast<float*>(output_cuda.data_ptr()),
            static_cast<const float*>(input_cuda.data_ptr()), n, stream);
      } else if (vector_size == 2 && unroll == 2) {
        hbm::pointwise::launch_abs_kernel<2, 2, true>(
            static_cast<float*>(output_cuda.data_ptr()),
            static_cast<const float*>(input_cuda.data_ptr()), n, stream);
      } else if (vector_size == 2 && unroll == 4) {
        hbm::pointwise::launch_abs_kernel<2, 4, true>(
            static_cast<float*>(output_cuda.data_ptr()),
            static_cast<const float*>(input_cuda.data_ptr()), n, stream);
      } else if (vector_size == 4 && unroll == 1) {
        hbm::pointwise::launch_abs_kernel<4, 1, true>(
            static_cast<float*>(output_cuda.data_ptr()),
            static_cast<const float*>(input_cuda.data_ptr()), n, stream);
      } else if (vector_size == 4 && unroll == 2) {
        hbm::pointwise::launch_abs_kernel<4, 2, true>(
            static_cast<float*>(output_cuda.data_ptr()),
            static_cast<const float*>(input_cuda.data_ptr()), n, stream);
      } else if (vector_size == 4 && unroll == 4) {
        hbm::pointwise::launch_abs_kernel<4, 4, true>(
            static_cast<float*>(output_cuda.data_ptr()),
            static_cast<const float*>(input_cuda.data_ptr()), n, stream);
      }
    }

    cudaStreamSynchronize(stream);

    // 开始计时
    cudaEventRecord(start, stream);

    // 运行内核
    if (vector_size == 1 && unroll == 1) {
      hbm::pointwise::launch_abs_kernel<1, 1, true>(
          static_cast<float*>(output_cuda.data_ptr()),
          static_cast<const float*>(input_cuda.data_ptr()), n, stream);
    } else if (vector_size == 1 && unroll == 2) {
      hbm::pointwise::launch_abs_kernel<1, 2, true>(
          static_cast<float*>(output_cuda.data_ptr()),
          static_cast<const float*>(input_cuda.data_ptr()), n, stream);
    } else if (vector_size == 1 && unroll == 4) {
      hbm::pointwise::launch_abs_kernel<1, 4, true>(
          static_cast<float*>(output_cuda.data_ptr()),
          static_cast<const float*>(input_cuda.data_ptr()), n, stream);
    } else if (vector_size == 2 && unroll == 1) {
      hbm::pointwise::launch_abs_kernel<2, 1, true>(
          static_cast<float*>(output_cuda.data_ptr()),
          static_cast<const float*>(input_cuda.data_ptr()), n, stream);
    } else if (vector_size == 2 && unroll == 2) {
      hbm::pointwise::launch_abs_kernel<2, 2, true>(
          static_cast<float*>(output_cuda.data_ptr()),
          static_cast<const float*>(input_cuda.data_ptr()), n, stream);
    } else if (vector_size == 2 && unroll == 4) {
      hbm::pointwise::launch_abs_kernel<2, 4, true>(
          static_cast<float*>(output_cuda.data_ptr()),
          static_cast<const float*>(input_cuda.data_ptr()), n, stream);
    } else if (vector_size == 4 && unroll == 1) {
      hbm::pointwise::launch_abs_kernel<4, 1, true>(
          static_cast<float*>(output_cuda.data_ptr()),
          static_cast<const float*>(input_cuda.data_ptr()), n, stream);
    } else if (vector_size == 4 && unroll == 2) {
      hbm::pointwise::launch_abs_kernel<4, 2, true>(
          static_cast<float*>(output_cuda.data_ptr()),
          static_cast<const float*>(input_cuda.data_ptr()), n, stream);
    } else if (vector_size == 4 && unroll == 4) {
      hbm::pointwise::launch_abs_kernel<4, 4, true>(
          static_cast<float*>(output_cuda.data_ptr()),
          static_cast<const float*>(input_cuda.data_ptr()), n, stream);
    }

    // 结束计时
    cudaEventRecord(stop, stream);
    cudaStreamSynchronize(stream);

    // 计算耗时
    float milliseconds = 0;
    cudaEventElapsedTime(&milliseconds, start, stop);

    // 计算带宽 (GB/s)
    float bytes_processed = n * sizeof(float) * 2;  // 读和写
    float bandwidth = bytes_processed / (milliseconds * 1e-3) / 1e9;

    spdlog::info(
        "VectorSize={}, Unroll={}: Time={:.3f} ms, Bandwidth={:.2f} GB/s",
        vector_size, unroll, milliseconds, bandwidth);
  }

  // 清理
  cudaEventDestroy(start);
  cudaEventDestroy(stop);
  cudaStreamDestroy(stream);
}

// int main(int argc, char** argv) {
//   // 初始化Google Test
//   ::testing::InitGoogleTest(&argc, argv);

//   // 设置日志级别
//   spdlog::set_level(spdlog::level::info);

//   // 运行所有测试
//   return RUN_ALL_TESTS();
// }