#include <torch/torch.h>
#include <cuda_fp16.h>
#include <iostream>

template <typename T>
__global__ void cuda_add_kernel(T *out, T *A, T *B, int size) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    if (idx < size) {
        T a = A[idx];
        T b = B[idx];
        T result;
        // 使用 if constexpr 在编译时判断类型，选择正确的加法操作
        if constexpr (std::is_same_v<T, __half>) {
            result = __hadd(a, b); // 对于 __half 类型使用 __hadd
        } else {
            result = a + b;         // 对于其他类型（如 float）使用 + 运算符
        }
        out[idx] = result;
    }
}

torch::Tensor cuda_add(torch::Tensor a, torch::Tensor b) {
    auto output = torch::empty_like(a);
    int size = a.numel();

    dim3 threads(256);
    dim3 blocks((size + threads.x - 1) / threads.x);

    //  不再需要 if-else 判断，直接根据输入 tensor 的类型来 launch kernel
    if (a.scalar_type() == torch::kFloat16) {
        cuda_add_kernel<__half><<<blocks, threads>>>(
            reinterpret_cast<__half*>(output.data_ptr<at::Half>()),
            reinterpret_cast<__half*>(a.data_ptr<at::Half>()),
            reinterpret_cast<__half*>(b.data_ptr<at::Half>()),
            size
        );
    } else if (a.scalar_type() == torch::kFloat32) {
        cuda_add_kernel<float><<<blocks, threads>>>(
            output.data_ptr<float>(),
            a.data_ptr<float>(),
            b.data_ptr<float>(),
            size
        );
    } else {
        throw std::runtime_error("Unsupported data type for cuda_add");
    }


    return output;
}