#include <ATen/Operators.h>
#include <ATen/cuda/CUDAContext.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <torch/all.h>
#include <torch/library.h>

namespace extension_cpp
{

    __global__ void muladd_kernel(int numel, const float* a, const float* b, float c, float* result)
    {
        int idx = blockIdx.x * blockDim.x + threadIdx.x;
        if (idx < numel) result[idx] = a[idx] * b[idx] + c;
    }

    at::Tensor mymuladd_cuda(const at::Tensor& a, const at::Tensor& b, double c)
    {
        TORCH_CHECK(a.sizes() == b.sizes());
        TORCH_CHECK(a.dtype() == at::kFloat);
        TORCH_CHECK(b.dtype() == at::kFloat);
        TORCH_INTERNAL_ASSERT(a.device().type() == at::DeviceType::CUDA);
        TORCH_INTERNAL_ASSERT(b.device().type() == at::DeviceType::CUDA);
        at::Tensor a_contig = a.contiguous();
        at::Tensor b_contig = b.contiguous();
        at::Tensor result = at::empty(a_contig.sizes(), a_contig.options());
        const float* a_ptr = a_contig.data_ptr<float>();
        const float* b_ptr = b_contig.data_ptr<float>();
        float* result_ptr = result.data_ptr<float>();

        int numel = a_contig.numel();
        cudaStream_t stream = at::cuda::getCurrentCUDAStream();
        muladd_kernel<<<(numel + 255) / 256, 256, 0, stream>>>(numel, a_ptr, b_ptr, c, result_ptr);
        return result;
    }

    __global__ void mul_kernel(int numel, const float* a, const float* b, float* result)
    {
        int idx = blockIdx.x * blockDim.x + threadIdx.x;
        if (idx < numel) result[idx] = a[idx] * b[idx];
    }

    at::Tensor mymul_cuda(const at::Tensor& a, const at::Tensor& b)
    {
        TORCH_CHECK(a.sizes() == b.sizes());
        TORCH_CHECK(a.dtype() == at::kFloat);
        TORCH_CHECK(b.dtype() == at::kFloat);
        TORCH_INTERNAL_ASSERT(a.device().type() == at::DeviceType::CUDA);
        TORCH_INTERNAL_ASSERT(b.device().type() == at::DeviceType::CUDA);
        at::Tensor a_contig = a.contiguous();
        at::Tensor b_contig = b.contiguous();
        at::Tensor result = at::empty(a_contig.sizes(), a_contig.options());
        const float* a_ptr = a_contig.data_ptr<float>();
        const float* b_ptr = b_contig.data_ptr<float>();
        float* result_ptr = result.data_ptr<float>();
        int numel = a_contig.numel();
        cudaStream_t stream = at::cuda::getCurrentCUDAStream();
        mul_kernel<<<(numel + 255) / 256, 256, 0, stream>>>(numel, a_ptr, b_ptr, result_ptr);
        return result;
    }

    __global__ void add_kernel(int numel, const float* a, const float* b, float* result)
    {
        int idx = blockIdx.x * blockDim.x + threadIdx.x;
        if (idx < numel) result[idx] = a[idx] + b[idx];
    }

    void myadd_out_cuda(const at::Tensor& a, const at::Tensor& b, at::Tensor& out)
    {
        TORCH_CHECK(a.sizes() == b.sizes());
        TORCH_CHECK(b.sizes() == out.sizes());
        TORCH_CHECK(a.dtype() == at::kFloat);
        TORCH_CHECK(b.dtype() == at::kFloat);
        TORCH_CHECK(out.dtype() == at::kFloat);
        TORCH_CHECK(out.is_contiguous());
        TORCH_INTERNAL_ASSERT(a.device().type() == at::DeviceType::CUDA);
        TORCH_INTERNAL_ASSERT(b.device().type() == at::DeviceType::CUDA);
        TORCH_INTERNAL_ASSERT(out.device().type() == at::DeviceType::CUDA);
        at::Tensor a_contig = a.contiguous();
        at::Tensor b_contig = b.contiguous();
        const float* a_ptr = a_contig.data_ptr<float>();
        const float* b_ptr = b_contig.data_ptr<float>();
        float* result_ptr = out.data_ptr<float>();
        int numel = a_contig.numel();
        cudaStream_t stream = at::cuda::getCurrentCUDAStream();
        add_kernel<<<(numel + 255) / 256, 256, 0, stream>>>(numel, a_ptr, b_ptr, result_ptr);
    }

    // Registers CUDA implementations for mymuladd, mymul, myadd_out
    TORCH_LIBRARY_IMPL(extension_cpp, CUDA, m)
    {
        m.impl("mymuladd", &mymuladd_cuda);
        m.impl("mymul", &mymul_cuda);
        m.impl("myadd_out", &myadd_out_cuda);
    }

}  // namespace extension_cpp
