#include<torch/script.h>
#include<riscv_vector.h>
#include<iostream>

using namespace torch;

// RISCV vector add
// Compute a = b + c
void rvv_vec_add(double *a, double *b, double *c, size_t n) {

    vfloat64m8_t va, vb, vc;

    for (size_t vl; n > 0; n -= vl) {

        vl = vsetvl_e64m8(n);

        vb = vle64_v_f64m8(b, vl);
        vc = vle64_v_f64m8(c, vl);
        va = vfadd_vv_f64m8(vb, vc, vl);
        vse64_v_f64m8(a, va, vl);

        a += vl;
        b += vl;
        c += vl;
    }
}

// RISCV vector sub
// Compute a = b - c
void rvv_vec_sub(double *a, double *b, double *c, size_t n) {

    vfloat64m8_t va, vb, vc;

    for (size_t vl; n > 0; n -= vl) {

        vl = vsetvl_e64m8(n);

        vb = vle64_v_f64m8(b, vl);
        vc = vle64_v_f64m8(c, vl);
        va = vfsub_vv_f64m8(vb, vc, vl);
        vse64_v_f64m8(a, va, vl);

        a += vl;
        b += vl;
        c += vl;
    }
}

// RISCV vector multiply
// Compute a = b * c
void rvv_vec_mul(double *a, double *b, double *c, size_t n) {

    vfloat64m8_t va, vb, vc;

    for (size_t vl; n > 0; n -= vl) {

        vl = vsetvl_e64m8(n);

        vb = vle64_v_f64m8(b, vl);
        vc = vle64_v_f64m8(c, vl);
        va = vfmul_vv_f64m8(vb, vc, vl);
        vse64_v_f64m8(a, va, vl);

        a += vl;
        b += vl;
        c += vl;
    }
}

// RISCV vector division
// Compute a = b / c
void rvv_vec_div(double *a, double *b, double *c, size_t n) {

    vfloat64m8_t va, vb, vc;

    for (size_t vl; n > 0; n -= vl) {

        vl = vsetvl_e64m8(n);

        vb = vle64_v_f64m8(b, vl);
        vc = vle64_v_f64m8(c, vl);
        va = vfdiv_vv_f64m8(vb, vc, vl);
        vse64_v_f64m8(a, va, vl);

        a += vl;
        b += vl;
        c += vl;
    }
}

// RISCV vector multiply const
// Compute a = b * c
void rvv_vec_mul_const(double *a, double *b, const double c, size_t n) {

    vfloat64m8_t va, vb;

    for (size_t vl; n > 0; n -= vl) {

        vl = vsetvl_e64m8(n);

        vb = vle64_v_f64m8(b, vl);
        va = vfmul_vf_f64m8(vb, c, vl);
        vse64_v_f64m8(a, va, vl);

        a += vl;
        b += vl;
    }
}

Tensor rvv_tensor_add(const Tensor &self, const Tensor &other, const Scalar &alpha) {
    // printf("rvv_tensor_add has been called.\n");
    Tensor cself, cother;
    bool ok = true;

    cself = self.to(torch::kFloat64).clone();
    cother = other.to(torch::kFloat64).clone();

    if (cself.sizes() == cother.sizes()) {
        ok = true;
    }
    else {
        try
        {
            cother = cother.expand(self.sizes()).clone().to(torch::kFloat64);
        }catch(...){}
        try
        {
            cself = cself.expand(other.sizes()).clone().to(torch::kFloat64);
        }
        catch (...){}
    }

    TORCH_CHECK(ok);
    TORCH_INTERNAL_ASSERT(cself.device().type() == DeviceType::CPU);
    TORCH_INTERNAL_ASSERT(cother.device().type() == DeviceType::CPU);

    // printf("Start getting self pointers.\n");

    double *self_data = cself.data_ptr<double>();

    // printf("Start getting other pointers.\n");

    double *other_data = cother.data_ptr<double>();
    size_t n = cself.numel();
    Tensor sum = torch::zeros(cself.sizes(), cself.options());
    double *sum_data = sum.data_ptr<double>();

    // printf("Start converting alpha.\n");

    double alpha_data = alpha.to<double>();

    // printf("Start entering rvv_vec_mul_const.\n");

    rvv_vec_mul_const(sum_data, other_data, alpha_data, n);
    rvv_vec_add(sum_data, self_data, sum_data, n);

    // printf("rvv_tensor_add finished.\n");

    if(self.dtype() == torch::kFloat64 || other.dtype() == torch::kFloat64 ||
    self.dtype() == torch::kFloat32 || other.dtype() == torch::kFloat32)
    {
        return sum.to(torch::kFloat32);
    }

    if(self.dtype() == torch::kInt64 || other.dtype() == torch::kInt64)
    {
        return sum.to(torch::kInt64);
    }

    if(self.dtype() == torch::kInt32 || other.dtype() == torch::kInt32)
    {
        return sum.to(torch::kInt32);
    }

    if(self.dtype() == torch::kInt16 || other.dtype() == torch::kInt16)
    {
        return sum.to(torch::kInt16);
    }

    if(self.dtype() == torch::kBool && other.dtype() == torch::kBool)
    {
        return sum.to(torch::kBool);
    }

    return sum.to(self.dtype());
}

Tensor rvv_tensor_sub(const Tensor &self, const Tensor &other, const Scalar &alpha) {
    // printf("rvv_tensor_add has been called.\n");
    Tensor cself, cother;
    bool ok = true;

    cself = self.to(torch::kFloat64).clone();
    cother = other.to(torch::kFloat64).clone();

    if (cself.sizes() == cother.sizes()) {
        ok = true;
    }
    else {
        try
        {
            cother = cother.expand(self.sizes()).clone().to(torch::kFloat64);
        }catch(...){}
        try
        {
            cself = cself.expand(other.sizes()).clone().to(torch::kFloat64);
        }
        catch (...){}
    }

    TORCH_CHECK(ok);
    TORCH_INTERNAL_ASSERT(cself.device().type() == DeviceType::CPU);
    TORCH_INTERNAL_ASSERT(cother.device().type() == DeviceType::CPU);

    double *self_data = cself.data_ptr<double>();
    double *other_data = cother.data_ptr<double>();

    size_t n = cself.numel();
    Tensor diff = torch::zeros(cself.sizes(), cself.options());
    double *diff_data = diff.data_ptr<double>();

    rvv_vec_mul_const(diff_data, other_data, alpha.to<double>(), n);
    rvv_vec_sub(diff_data, self_data, diff_data, n);

    // printf("rvv_tensor_sub finished.\n");

    if(self.dtype() == torch::kFloat64 || other.dtype() == torch::kFloat64 ||
    self.dtype() == torch::kFloat32 || other.dtype() == torch::kFloat32)
    {
        return diff.to(torch::kFloat32);
    }

    if(self.dtype() == torch::kInt64 || other.dtype() == torch::kInt64)
    {
        return diff.to(torch::kInt64);
    }

    if(self.dtype() == torch::kInt32 || other.dtype() == torch::kInt32)
    {
        return diff.to(torch::kInt32);
    }

    if(self.dtype() == torch::kInt16 || other.dtype() == torch::kInt16)
    {
        return diff.to(torch::kInt16);
    }

    return diff.to(self.dtype());
}

Tensor rvv_tensor_mul(const Tensor &self, const Tensor &other) {
    // printf("rvv_tensor_mul has been called.\n");
    Tensor cself, cother;
    bool ok = true;

    cself = self.to(torch::kFloat64).clone();
    cother = other.to(torch::kFloat64).clone();

    if (cself.sizes() == cother.sizes()) {
        ok = true;
    }
    else {
        try
        {
            cother = cother.expand(self.sizes()).clone().to(torch::kFloat64);
        }catch(...){}
        try
        {
            cself = cself.expand(other.sizes()).clone().to(torch::kFloat64);
        }
        catch (...){}
    }

    TORCH_CHECK(ok);
    TORCH_INTERNAL_ASSERT(cself.device().type() == DeviceType::CPU);
    TORCH_INTERNAL_ASSERT(cother.device().type() == DeviceType::CPU);

    Tensor result = at::empty_like(cself);

    double *self_data = cself.data_ptr<double>();
    double *other_data = cother.data_ptr<double>();
    double *result_data = result.data_ptr<double>();

    int64_t n = cself.numel();

    rvv_vec_mul(result_data, self_data, other_data, n);

    // printf("rvv_tensor_mul finished.\n");

    if(self.dtype() == torch::kFloat64 || other.dtype() == torch::kFloat64 ||
    self.dtype() == torch::kFloat32 || other.dtype() == torch::kFloat32)
    {
        return result.to(torch::kFloat32);
    }

    if(self.dtype() == torch::kInt64 || other.dtype() == torch::kInt64)
    {
        return result.to(torch::kInt64);
    }

    if(self.dtype() == torch::kInt32 || other.dtype() == torch::kInt32)
    {
        return result.to(torch::kInt32);
    }

    if(self.dtype() == torch::kInt16 || other.dtype() == torch::kInt16)
    {
        return result.to(torch::kInt16);
    }

    if(self.dtype() == torch::kBool && other.dtype() == torch::kBool)
    {
        return result.to(torch::kBool);
    }

    return result.to(self.dtype());
}

Tensor rvv_tensor_div(const Tensor &self, const Tensor &other) {
    // printf("rvv_tensor_mul has been called.\n");
    Tensor cself, cother;
    bool ok = true;

    cself = self.to(torch::kFloat64).clone();
    cother = other.to(torch::kFloat64).clone();

    if (cself.sizes() == cother.sizes()) {
        ok = true;
    }
    else {
        try
        {
            cother = cother.expand(self.sizes()).clone().to(torch::kFloat64);
        }catch(...){}
        try
        {
            cself = cself.expand(other.sizes()).clone().to(torch::kFloat64);
        }
        catch (...){}
    }

    TORCH_CHECK(ok);
    TORCH_INTERNAL_ASSERT(cself.device().type() == DeviceType::CPU);
    TORCH_INTERNAL_ASSERT(cother.device().type() == DeviceType::CPU);

    Tensor result = at::empty_like(cself);

    double *self_data = cself.data_ptr<double>();

    double *other_data = cother.data_ptr<double>();

    double *result_data = result.data_ptr<double>();

    int64_t n = cself.numel();

    rvv_vec_div(result_data, self_data, other_data, n);

    // printf("rvv_tensor_mul finished.\n");

    return result.to(torch::kFloat32);
}

TORCH_LIBRARY_IMPL(aten, CPU, m) {
    m.impl("aten::add.Tensor", &rvv_tensor_add);
    m.impl("aten::sub.Tensor", &rvv_tensor_sub);
    m.impl("aten::mul.Tensor", &rvv_tensor_mul);
    m.impl("aten::div.Tensor", &rvv_tensor_div);
}
