#pragma once
#include <cuda_fp16.h>
#include <cuda_runtime.h>

#include <cstddef>

namespace hbm {
namespace pointwise {

// Vector types for different vector sizes
template <int VectorSize>
struct FloatVector;

template <>
struct FloatVector<1> {
  using type = float;
};

template <>
struct FloatVector<2> {
  using type = float2;
};

template <>
struct FloatVector<4> {
  using type = float4;
};

// Helper function to compute absolute value for different vector types
__device__ __forceinline__ float abs_scalar(float x) { return fabsf(x); }

__device__ __forceinline__ float2 abs_vector(float2 v) {
  float2 result;
  result.x = fabsf(v.x);
  result.y = fabsf(v.y);
  return result;
}

__device__ __forceinline__ float4 abs_vector(float4 v) {
  float4 result;
  result.x = fabsf(v.x);
  result.y = fabsf(v.y);
  result.z = fabsf(v.z);
  result.w = fabsf(v.w);
  return result;
}

/**
 * CUDA kernel for computing absolute value with configurable vector size and
 * unroll factor
 *
 * @tparam VectorSize - Vector size (1, 2, or 4)
 * @tparam Unroll - Unroll factor (1, 2, or 4)
 * @tparam HandleTail - Whether to handle tail elements (default: false)
 * @param output - Output array
 * @param input - Input array
 * @param n - Number of elements
 */
template <int VectorSize, int Unroll, bool HandleTail = false>
__global__ void abs_kernel(float* output, const float* input, size_t n) {
  using VectorType = typename FloatVector<VectorSize>::type;

  // 确保n是VectorSize的倍数，否则向下取整
  size_t vector_n = n / VectorSize;

  // 计算线程索引和网格大小
  size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
  // size_t warp_id = idx / warpSize;
  // size_t warp_tid = idx - warp_id * warpSize;
  size_t grid_size = blockDim.x * gridDim.x;

  // 计算完全对齐的元素数量（同时考虑VectorSize和Unroll）
  size_t aligned_vector_n =
      (vector_n / Unroll / blockDim.x) * Unroll * blockDim.x;

  // 主循环 - 处理完全对齐的元素
  for (size_t i = blockIdx.x * Unroll * blockDim.x + threadIdx.x;
       i < aligned_vector_n; i += Unroll * grid_size) {
#pragma unroll
    for (int u = 0; u < Unroll; ++u) {
      size_t vec_idx = i + u * blockDim.x;

      size_t offset = vec_idx * VectorSize;

      // 加载输入向量
      VectorType in_val = *reinterpret_cast<const VectorType*>(input + offset);

      // 计算绝对值
      VectorType out_val;
      if constexpr (VectorSize == 1) {
        out_val = abs_scalar(in_val);
      } else {
        out_val = abs_vector(in_val);
      }

      // 存储结果
      *reinterpret_cast<VectorType*>(output + offset) = out_val;
    }
  }

  // 尾块处理 - 处理剩余的向量化元素（对齐到VectorSize但不是Unroll的倍数）
  if constexpr (HandleTail) {
    size_t remaining_n_start = aligned_vector_n * VectorSize;
    size_t i = remaining_n_start + idx;
    if (i < n) {
      output[i] = fabsf(input[i]);
    }
  }
}

/**
 * Launch the abs kernel with specified vector size and unroll factor
 *
 * @tparam VectorSize - Vector size (1, 2, or 4)
 * @tparam Unroll - Unroll factor (1, 2, or 4)
 * @param output - Output array
 * @param input - Input array
 * @param n - Number of elements
 * @param stream - CUDA stream
 */
template <int VectorSize, int Unroll, bool HandleTail = false>
void launch_abs_kernel(float* output, const float* input, size_t n,
                       cudaStream_t stream = nullptr) {
  // 参数检查
  if (output == nullptr || input == nullptr) {
    return;
  }

  // 确保n不为0，且是VectorSize的倍数
  if (n == 0) {
    return;
  }

  // 计算网格和块大小
  int block_size = 256;
  int grid_size = std::min(
      36 * 32, static_cast<int>((n / VectorSize / Unroll + block_size - 1) /
                                block_size));

  // 确保grid_size至少为1
  grid_size = std::max(Unroll * VectorSize, grid_size);

  // 启动内核
  abs_kernel<VectorSize, Unroll, HandleTail>
      <<<grid_size, block_size, 0, stream>>>(output, input, n);
}

}  // namespace pointwise
}  // namespace hbm