#ifndef TENSOR_H
#define TENSOR_H

#pragma once

#include <torch/torch.h>
#include <LibDL/Tensor/Scalar.h>
#include <LibDL/options/options.h>
#include <LibDL/TensorOptions/TensorOptions.h>

//using at::Generator;
//using torch::ScalarType;
//using torch::TensorList;

#define _EXCEPT noexcept(false)

class Tensor {

public:
    using core_type = torch::Tensor;
    core_type core;

    ENABLE_CAST;

    Tensor();

    Tensor(torch::Tensor core);

    std::string toString() {
        std::stringstream ss;
        ss << core;
        return ss.str();
    }


    int64_t get_address() {
        return (int64_t)(core.unsafeGetTensorImpl());
    }

    bool requires_grad() {
        return core.requires_grad();
    }

#if defined(SWIG)
    %ignore Tensor::Tensor();
    %javamethodmodifiers Tensor::x "private";
    %typemap(javabase) Tensor "LibDL.supporting.CheckMemory";
//    %typemap(javabody) Tensor %{
//private transient long swigCPtr;
//protected transient boolean swigCMemOwn;
//public int ndim;
//public Dtype dtype;
//
//public $javaclassname(long cPtr, boolean cMemoryOwn) {
//    swigCMemOwn = cMemoryOwn;
//    swigCPtr = cPtr;
//
//    ndim = this.dim();
//    dtype = this.dtype();
//}
//
//public static long getCPtr($javaclassname obj) {
//    return (obj == null) ? 0 : obj.swigCPtr;
//}
//    %}
    %proxycode{
public void backward(){
    this.backward_();
    LibDL.supporting.CheckMemory.check();
}
    }
#endif

    void backward_();
//    define

    bool is_cuda() {
        return core.is_cuda();
    }

    bool is_coalesced() {
        return core.is_coalesced();
    }

    bool is_complex() {
        return core.is_complex();
    }

    bool is_distributed() {
        return core.is_distributed();
    }

    bool is_floating_point() {
        return core.is_floating_point();
    }

    bool is_mkldnn() {
        return core.is_mkldnn();
    }

    bool is_quantized() {
        return core.is_quantized();
    }

    bool is_hip() {
        return core.is_hip();
    }

    bool is_nonzero() {
        return core.is_nonzero();
    }

    bool is_signed() {
        return core.is_signed();
    }

    bool is_sparse() {
        return core.is_sparse();
    }

#if not defined(Torch_Version_Less_14)
    C10_DEPRECATED_MESSAGE("Tensor.is_variable() is deprecated; everything is a variable now.")
    bool is_variable() {
        return !at::impl::variable_excluded_from_dispatch();
    }
#elif defined(Torch_Version_Less_14)
    C10_DEPRECATED_MESSAGE("Tensor.is_variable() is deprecated in libtorch version greater 1.4; everything is a variable now.")
    bool is_variable() {
        return core.is_variable();
    }
#endif


    inline Tensor argsort(int64_t dim = -1, bool descending = false) {
        return Tensor(this->core.argsort(dim, descending));
    }

    template<class ScalarType>
    std::vector <ScalarType> tolist_() _EXCEPT {
#ifdef Torch_Version_Less_13
        ScalarType *data = this->core.data<ScalarType>();
#else
        ScalarType *data = this->core.data_ptr<ScalarType>();
#endif
        return std::vector<ScalarType>(data, data + this->core.numel());
    }

    Dtype dtype() {
        return Dtype(core.scalar_type());
    }

//    declare

    Tensor add(const Tensor &rhs) const;

    Tensor abs() const;

    Tensor sub(const Tensor &rhs) const;

    Tensor sum() const;

    int64_t dim() const noexcept(false);

    Tensor div(const Tensor &rhs) const;

    Tensor mul(const Tensor &rhs) const;

    Tensor add(const Scalar &rhs) const;

    Tensor sub(const Scalar &rhs) const;

    Tensor div(const Scalar &rhs) const;

    Tensor div_(const Scalar &rhs);

    Tensor mul(const Scalar &rhs) const;

    Tensor pow(const Scalar &exponent) const;

    Tensor log() const;

    Tensor sum(int64_t dim) const;

    Tensor sum(const std::vector <int64_t> &dim) const;

    Tensor reshape(const std::vector <int64_t> &size);

    Tensor view(const std::vector <int64_t> &size);

    Tensor grad() const;

    Tensor normal_(double weight, double std);

    Tensor zero_();

    Tensor squeeze() const;

    Tensor squeeze(int64_t dim);

    Tensor squeeze_();

    Tensor squeeze_(int64_t dim);

    Tensor unsqueeze(int64_t dim) const;

    Tensor unsqueeze_(int64_t dim);

    Tensor nonzero() const;

    Tensor to(Dtype dty);

    Tensor to(Device device);

    std::vector <int64_t> sizes() const;

    int size(int dim) const;

    Tensor flatten(int64_t start_dim = 0, int64_t end_dim = -1) const;

    Scalar item() const;

    Tensor narrow_copy(int64_t dim, int64_t start, int64_t length) const;

    Tensor narrow(int64_t dim, int64_t start, int64_t length) const;

    Tensor cuda() const;

    Tensor cpu() const;

    Tensor hip() const;

    Tensor index_add_(int64_t dim, const Tensor &index, const Tensor &source);

    Tensor index_put_(const std::vector <Tensor> &indices, const Tensor &values, bool accumulate = false) _EXCEPT;

    Tensor index_put_(const Tensor &index, const Tensor &values, bool accumulate = false);

    Tensor index_select(int64_t dim, const Tensor &index) _EXCEPT;

    Tensor masked_select(const Tensor &mask) const;

    Tensor add(double value, double alpha = 1);

    Tensor add_(double value, double alpha = 1);

    Tensor get(const Scalar &index) const;

    Tensor get(const Tensor &index) const;

    Tensor get(const int64_t &index) const;

    Tensor mean() const;

    Tensor _indices() const;

    Tensor _values() const;

    Tensor coalesce() const;

    Tensor indices() const;

    Tensor values() const;

    Tensor t() const;

    Tensor clone() const;

    Tensor mul(long other) const;

    Tensor to_dense() const;

    Tensor getIndices() noexcept(false) {
        return Tensor(this->core.indices());
    }

    Tensor select(int64_t dim, int64_t index) const;

    bool contains(Scalar item) {
        int eq = torch::eq(this->core, item.core).nonzero().size(0);
        return eq != 0;
    }

    Tensor max() const;

    TensorOptions options() const;

    void test() {
        this->core[5] = 3;
    }

    Tensor mm(const Tensor &rhs) const;

    std::vector <int64_t>
    recommend(const std::vector <int64_t> &user_ids, int64_t topN, const std::vector <std::vector<int64_t>> &train) {
        std::vector <int64_t> res(user_ids.size() * topN);
        Tensor sorted = Tensor(this->core.argsort(-1, true));
        int train_idx = 0;
        for (int64_t user : user_ids) {
            Tensor slice = sorted.index_select(0, torch::tensor(user));
#if defined(Torch_Version_Less_13)
            int64_t *data = slice.core.data<int64_t>();
#else
            int64_t *data = slice.core.data_ptr<int64_t>();
#endif
            std::vector <int64_t> single_user_pred = std::vector<int64_t>(data, data + slice.core.numel());
            int count = 0;
            std::vector <int64_t> single_user_train = train[train_idx];
            for (int64_t t : single_user_pred) {
                if (std::find(single_user_train.begin(), single_user_train.end(), t) == single_user_train.end()) {
                    ++count;
                    res.push_back(t);
                    if (count >= topN) break;
                }
            }
            ++train_idx;
        }

        return res;
    }
};

#endif