#include <torch/all.h>
#include "registration.h"
#include <torch/extension.h>

//! 命名空间不影响，主要还是看TORCH_LIBRARY(NAME, MODULE) 中NAME的值
// namespace xllm {

at::Tensor mul_add_cpu(const at::Tensor& a, const at::Tensor& b, double c) {
  TORCH_CHECK(a.sizes() == b.sizes());
  TORCH_CHECK(a.dtype() == at::kFloat);
  TORCH_CHECK(b.dtype() == at::kFloat);

  TORCH_INTERNAL_ASSERT(a.device().type() == at::DeviceType::CPU);
  TORCH_INTERNAL_ASSERT(b.device().type() == at::DeviceType::CPU);
  
  at::Tensor a_contig = a.contiguous();
  at::Tensor b_contig = b.contiguous();

  at::Tensor result  = torch::empty(a_contig.sizes(), a_contig.options());
  const float* a_ptr = a_contig.data_ptr<float>();
  const float* b_ptr = b_contig.data_ptr<float>();
  
  float* result_ptr = result.data_ptr<float>();
  for (int64_t i = 0; i < result.numel(); i++) {
    result_ptr[i] = a_ptr[i] * b_ptr[i] + c;
  }
  return result;
}

at::Tensor mul_cpu(const at::Tensor& a, const at::Tensor& b) {
  TORCH_CHECK(a.sizes() == b.sizes());
  TORCH_CHECK(a.dtype() == at::kFloat);
  TORCH_CHECK(b.dtype() == at::kFloat);

  TORCH_INTERNAL_ASSERT(a.device().type() == at::DeviceType::CPU);
  TORCH_INTERNAL_ASSERT(b.device().type() == at::DeviceType::CPU);

  at::Tensor a_contig = a.contiguous();
  at::Tensor b_contig = b.contiguous();

  at::Tensor result  = torch::empty(a_contig.sizes(), a_contig.options());
  const float* a_ptr = a_contig.data_ptr<float>();
  const float* b_ptr = b_contig.data_ptr<float>();
  float* result_ptr  = result.data_ptr<float>();

  for (int64_t i = 0; i < result.numel(); i++) {
    result_ptr[i] = a_ptr[i] * b_ptr[i];
  }
  return result;
}

// An example of an operator that mutates one of its inputs.
void add_cpu(const at::Tensor& a, const at::Tensor& b, at::Tensor& out) {
  TORCH_CHECK(a.sizes() == b.sizes());
  TORCH_CHECK(b.sizes() == out.sizes());
  TORCH_CHECK(a.dtype() == at::kFloat);
  TORCH_CHECK(b.dtype() == at::kFloat);
  TORCH_CHECK(out.dtype() == at::kFloat);
  TORCH_CHECK(out.is_contiguous());

  TORCH_INTERNAL_ASSERT(a.device().type()   == at::DeviceType::CPU);
  TORCH_INTERNAL_ASSERT(b.device().type()   == at::DeviceType::CPU);
  TORCH_INTERNAL_ASSERT(out.device().type() == at::DeviceType::CPU);

  at::Tensor a_contig = a.contiguous();
  at::Tensor b_contig = b.contiguous();

  const float* a_ptr = a_contig.data_ptr<float>();
  const float* b_ptr = b_contig.data_ptr<float>();
  float* result_ptr  = out.data_ptr<float>();

  for (int64_t i = 0; i < out.numel(); i++) {
    result_ptr[i] = a_ptr[i] + b_ptr[i];
  }
}

//! 定义算子描述，只需要一次
TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, m) {
  m.def("muladd(Tensor a, Tensor b, float c) -> Tensor");
  m.def("mul(Tensor a, Tensor b) -> Tensor");
  //! Tensor(a!) 表示out张量是可修改的，类似常见的inout类型
  //! () 表示不返回任何值
  m.def("add(Tensor a, Tensor b, Tensor(a!) out) -> ()");
}

//! 注册CPU实现
TORCH_LIBRARY_IMPL_EXPAND(TORCH_EXTENSION_NAME, CPU, m) {
  m.impl("muladd",  &mul_add_cpu);
  m.impl("mul",     &mul_cpu);
  m.impl("add",     &add_cpu);
}

//! 注册Python模块
REGISTER_EXTENSION(TORCH_EXTENSION_NAME)

// }


