#pragma once

#include <LibDL/Tensor/Tensor.h>
#include <LibDL/Tensor/Scalar.h>
#include <LibDL/utils.h>
#include <TH/TH.h>
#include <ATen/ATen.h>
#include <algorithm>
#include <utility>
//#include <torch/nn/parallel/data_parallel.h>

#ifdef CUDA_AVAILABLE
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAFunctions.h>
#include <c10/cuda/CUDACachingAllocator.h>
#endif //CUDA_AVAILABLE

#include <LibDL/TensorOptions/TensorOptions.h>

class functional {
public:
#define DEFINE_TORCH_WRAPPER_FUNCTION(NAME, PARAM_LIST, PARAM_CALL)             \
    static Tensor NAME PARAM_LIST noexcept(false) {                             \
    return Tensor(torch:: NAME PARAM_CALL);                                     \
    }
#define DEFINE_TORCH_WRAPPER_FUNCTION_WITH_EXCEPT(NAME, PARAM_LIST, PARAM_CALL) \
    static Tensor NAME PARAM_LIST _EXCEPT {                                     \
    return Tensor(torch:: NAME PARAM_CALL);                                     \
    }
#define DEFINE_TORCH_FUNCTION_RETURN_TUPLE(NAME, PARAM_LIST, CALL_PARAM_LIST)   \
    static std::pair<Tensor, Tensor>                                            \
    NAME PARAM_LIST noexcept(false) {                                           \
        auto tuple = torch:: NAME CALL_PARAM_LIST;                              \
        return std::make_pair(                                                  \
                std::move(Tensor(std::get<0>(tuple))),                          \
                std::move(Tensor(std::get<1>(tuple)))                           \
        );                                                                      \
    }
#define DEFINE_NN_FUNCTIONAL_DETAIL_FUNC(NAME, PARAM_LIST, PARAM_CALL)          \
    static Tensor NAME PARAM_LIST {                                             \
        return torch::nn::functional::detail:: NAME PARAM_CALL;                 \
    }

#define DEFINE_TORCH_TENSOR_ACTION(NAME) DEFINE_TORCH_WRAPPER_FUNCTION(NAME,(const Tensor& self),(self.core))

#define AS_TENSOR_LIST(type)                                                                    \
    static Tensor as_tensor(const std::vector<type>& data, const TensorOptions& options = {}) { \
        return Tensor(torch::tensor(at::ArrayRef<type>(data), options));                        \
    }
#define AS_TENSOR_ITEM(type)                                                                    \
    static Tensor as_tensor(type data, const TensorOptions& options = {}) {                     \
        return Tensor(torch::tensor(data, options));                                            \
    }

    /**
     *******************  creation function *************************
     */
    DEFINE_TORCH_WRAPPER_FUNCTION(ones, (int
            v), (v));

    DEFINE_TORCH_WRAPPER_FUNCTION(ones, (const std::vector<int64_t>& v), (v));

    DEFINE_TORCH_WRAPPER_FUNCTION(eye, (int
            v), (v));

    DEFINE_TORCH_WRAPPER_FUNCTION(empty, (int
            v), (v));

    DEFINE_TORCH_WRAPPER_FUNCTION(zeros, (int
            v), (v));

    DEFINE_TORCH_WRAPPER_FUNCTION(zeros, (const std::vector<int64_t>& v), (v));

    DEFINE_TORCH_WRAPPER_FUNCTION(randn, (int
            v), (v));

    DEFINE_TORCH_WRAPPER_FUNCTION(randn, (const std::vector<int64_t>&
            v), (v));

    DEFINE_TORCH_WRAPPER_FUNCTION(rand, (int
            v), (v));

    DEFINE_TORCH_WRAPPER_FUNCTION(rand, (const std::vector<int64_t>&
            v), (v));

    static Tensor zeros(const std::vector<int64_t> &size, const TensorOptions &options) {
        return Tensor(torch::zeros(size, options));
    }

    static Tensor zeros(int64_t v, const TensorOptions &options) {
        return Tensor(torch::zeros(v, options));
    }

    static Tensor ones(const std::vector<int64_t> &size, const TensorOptions &options) {
        return Tensor(torch::ones(size, options));
    }

    static Tensor ones(int64_t v, const TensorOptions &options) {
        return Tensor(torch::ones(v, options));
    }

    static Tensor eye(int64_t v, const TensorOptions &options) {
        return Tensor(torch::eye(v, options));
    }

    static Tensor rand(const std::vector<int64_t> &size, const TensorOptions &options) {
        return Tensor(torch::rand(size, options));
    }

    static Tensor rand(int64_t v, const TensorOptions &options) {
        return Tensor(torch::rand(v, options));
    }

    static Tensor rand_like(const Tensor &self, const TensorOptions &options = {}) {
        return Tensor(torch::rand_like(self, options));
    }

    static Tensor arange(double start, double end, double step = 1,
                         TensorOptions options = {}) { //TODO default data type is float now
        return Tensor(torch::arange(Scalar(start).core, Scalar(end).core, Scalar(step).core, options));
    }

    /**
     ************************* common operations ********************************
     */
    template<class Type>
    static Tensor tensor_(Type data) {
        return torch::tensor(data);
    }

    static std::pair<Tensor, Tensor>
    topk(const Tensor &self, int64_t k, int64_t dim = -1, bool largest = true, bool sorted = true) {
        auto tuple = torch::topk(self.core, k, dim, largest, sorted);
        return std::make_pair(
                std::move(Tensor(std::get<0>(tuple))),
                std::move(Tensor(std::get<1>(tuple)))
        );
    }

    static Tensor stack(const std::vector<Tensor> &tensors, int64_t dim = 0) {
        std::vector<at::Tensor> v;
        v.reserve(tensors.size());
        for (const auto &tensor : tensors) {
            v.push_back(tensor);
        }
        at::TensorList tensorList = c10::ArrayRef < at::Tensor > (v);
        return Tensor(torch::stack(tensorList, dim));
    }

    static std::vector<Tensor> split(const Tensor &self, int64_t split_size, int64_t dim = 0) {
        std::vector<at::Tensor> v = torch::split(self, split_size, dim);
        std::vector<at::Tensor>::iterator it;
        std::vector<Tensor> result;
        for (it = v.begin(); it != v.end(); it++) {
            result.emplace_back(*it);
        }
        return result;
    }

    DEFINE_TORCH_TENSOR_ACTION(log);
    DEFINE_TORCH_WRAPPER_FUNCTION(sum, (Tensor
            self,
            const std::vector<int64_t> &dim,
            bool keep_dim = false),
                                  (self, dim, keep_dim));

    DEFINE_TORCH_WRAPPER_FUNCTION(sum, (Tensor
            self,
            const int64_t dim,
            bool keep_dim = false),
                                  (self, dim, keep_dim));

    DEFINE_TORCH_WRAPPER_FUNCTION(sum, (Tensor
            self, bool
            keep_dim = false),
                                  (self, keep_dim));

    static std::vector<Tensor>
    unique(const Tensor &self, bool sorted, bool return_inverse = false, bool return_counts = true,
           int64_t dim = -9999) {
//        return_counts = true; //TODO default params should be fix
        std::tuple<at::Tensor, at::Tensor, at::Tensor> tuple;
        if (dim != -9999) {
            tuple = torch::unique_dim(self.core, dim, sorted, return_inverse, return_counts);
        } else {
            tuple = torch::_unique2(self.core, sorted, return_inverse, return_counts);
        }

        std::vector<Tensor> v;
        v.emplace_back(std::get<0>(tuple));
        v.emplace_back(std::get<1>(tuple));
        v.emplace_back(std::get<2>(tuple));
        return v;
    }

    template<typename Value>
    static void load(Value &value, std::string filename) {
        torch::load(value, filename);
    }

    template<typename Value>
    static void save(const Value &value, std::string filename) {
        torch::save(value, filename);
    }

    DEFINE_TORCH_WRAPPER_FUNCTION(mean, (const Tensor& self), (self.core));

    DEFINE_TORCH_WRAPPER_FUNCTION(eq, (const Tensor &self, Scalar other), (self, other));

    DEFINE_TORCH_WRAPPER_FUNCTION(gt, (const Tensor &self, Scalar other), (self, other));

    DEFINE_TORCH_WRAPPER_FUNCTION_WITH_EXCEPT(cat, (const std::vector<Tensor>& tensor_lst, int64_t dim),
                                              (utils::map_to_core<at::Tensor>(tensor_lst), dim));

    static Tensor masked_select(const Tensor &self, const Tensor &mask) noexcept(false) {
        return Tensor(torch::masked_select(self, mask));
    }
/**
 ***************************** matrix operations **************************
 */
    DEFINE_TORCH_WRAPPER_FUNCTION(baddbmm,
                                  (const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta = 1, Scalar alpha = 1),
                                  (self.core, batch1.core, batch2.core, beta.core, alpha.core));

    /**
     ******************* gradient mode controlling ***********************
     */
    static void no_grad() {
        torch::autograd::GradMode::set_enabled(false);
    }
    //	DEFINE_TORCH_WRAPPER_FUNCTION(no_grad, (), ());

    static void enable_grad() {
        torch::autograd::GradMode::set_enabled(true);
    }

    static void set_grad_enabled(bool requires_grad) {
        torch::autograd::GradMode::set_enabled(requires_grad);
    }

    /**
     **************************convolution functions *****************************
     */
    DEFINE_TORCH_WRAPPER_FUNCTION(conv1d, (const Tensor & input, const Tensor & weight, const Tensor & bias,
            int stride, int padding=0, int dilation=1, int64_t groups=1),
            (input, weight, bias, stride, padding, dilation, groups));
    DEFINE_TORCH_WRAPPER_FUNCTION(conv1d, (const Tensor & input, const Tensor & weight, const Tensor & bias={},
            std::vector<int64_t> stride=std::vector<int64_t>({1}), std::vector<int64_t> padding=std::vector<int64_t>({0}),
            std::vector<int64_t> dilation=std::vector<int64_t>({1}), int64_t groups=1),
                                  (input, weight, bias, stride, padding, dilation, groups));
    DEFINE_TORCH_WRAPPER_FUNCTION(conv2d, (const Tensor & input, const Tensor & weight, const Tensor & bias,
            int stride, int padding=0, int dilation=1, int64_t groups=1),
                                  (input, weight, bias, stride, padding, dilation, groups));
    DEFINE_TORCH_WRAPPER_FUNCTION(conv2d, (const Tensor & input, const Tensor & weight, const Tensor & bias={},
            std::vector<int64_t> stride=std::vector<int64_t>({1}), std::vector<int64_t> padding=std::vector<int64_t>({0}),
            std::vector<int64_t> dilation=std::vector<int64_t>({1}), int64_t groups=1),
                                  (input, weight, bias, stride, padding, dilation, groups));
    DEFINE_TORCH_WRAPPER_FUNCTION(conv3d, (const Tensor & input, const Tensor & weight, const Tensor & bias,
            int stride, int padding=0, int dilation=1, int64_t groups=1),
                                  (input, weight, bias, stride, padding, dilation, groups));
    DEFINE_TORCH_WRAPPER_FUNCTION(conv3d, (const Tensor & input, const Tensor & weight, const Tensor & bias={},
            std::vector<int64_t> stride=std::vector<int64_t>({1}), std::vector<int64_t> padding=std::vector<int64_t>({0}),
            std::vector<int64_t> dilation=std::vector<int64_t>({1}), int64_t groups=1),
                                  (input, weight, bias, stride, padding, dilation, groups));
    DEFINE_TORCH_WRAPPER_FUNCTION(conv_transpose1d, (const Tensor & input, const Tensor & weight,
            const Tensor & bias, int stride, int padding=0, int output_padding=0, int64_t groups=1, int dilation=1),
            (input, weight, bias, stride, padding, output_padding, groups, dilation));
    DEFINE_TORCH_WRAPPER_FUNCTION(conv_transpose1d, (const Tensor & input, const Tensor & weight,
            const Tensor & bias={}, std::vector<int64_t> stride={1}, std::vector<int64_t> padding={0},
            std::vector<int64_t> output_padding={0}, int64_t groups=1, std::vector<int64_t> dilation={1}),
                                  (input, weight, bias, stride, padding, output_padding, groups, dilation));
    DEFINE_TORCH_WRAPPER_FUNCTION(conv_transpose2d, (const Tensor & input, const Tensor & weight,
            const Tensor & bias, int stride, int padding=0, int output_padding=0, int64_t groups=1, int dilation=1),
                                  (input, weight, bias, stride, padding, output_padding, groups, dilation));
    DEFINE_TORCH_WRAPPER_FUNCTION(conv_transpose2d, (const Tensor & input, const Tensor & weight,
            const Tensor & bias={}, std::vector<int64_t> stride={1}, std::vector<int64_t> padding={0},
            std::vector<int64_t> output_padding={0}, int64_t groups=1, std::vector<int64_t> dilation={1}),
                                  (input, weight, bias, stride, padding, output_padding, groups, dilation));
    DEFINE_TORCH_WRAPPER_FUNCTION(conv_transpose3d, (const Tensor & input, const Tensor & weight,
            const Tensor & bias, int stride, int padding=0, int output_padding=0, int64_t groups=1, int dilation=1),
                                  (input, weight, bias, stride, padding, output_padding, groups, dilation));
    DEFINE_TORCH_WRAPPER_FUNCTION(conv_transpose3d, (const Tensor & input, const Tensor & weight,
            const Tensor & bias={}, std::vector<int64_t> stride={1}, std::vector<int64_t> padding={0},
            std::vector<int64_t> output_padding={0}, int64_t groups=1, std::vector<int64_t> dilation={1}),
                                  (input, weight, bias, stride, padding, output_padding, groups, dilation));
    static Tensor fold(const Tensor& input,
                       std::vector<int64_t> output_size,
                       std::vector<int64_t> kernel_size,
                       std::vector<int64_t> dilation={1, 1},
                       std::vector<int64_t> padding={0, 0},
                       std::vector<int64_t> stride={1, 1}) {
        return torch::nn::functional::fold(input,
                torch::nn::functional::FoldFuncOptions(std::move(output_size), std::move(kernel_size)).dilation(std::move(dilation)).padding(std::move(padding)).stride(std::move(stride)));
    }
    static Tensor unfold(const Tensor& input,
                         std::vector<int64_t> kernel_size,
                         std::vector<int64_t> dilation={1, 1},
                         std::vector<int64_t> padding={0, 0},
                         std::vector<int64_t> stride={1, 1}) {
        return torch::nn::functional::unfold(input,
                torch::nn::functional::UnfoldFuncOptions(std::move(kernel_size)).dilation(std::move(dilation)).padding(std::move(padding)).stride(std::move(stride)));
    }

    /**
     *************************** polling functions ******************************
     */

    DEFINE_TORCH_WRAPPER_FUNCTION(avg_pool1d, (const Tensor & self,
            int kernel_size, int stride, int padding, bool ceil_mode=false, bool count_include_pad=true),
                                  (self, kernel_size, stride, padding, ceil_mode, count_include_pad));

    DEFINE_TORCH_WRAPPER_FUNCTION(avg_pool1d, (const Tensor & self, int kernel_size, int stride),
                                  (self, kernel_size, stride));
    DEFINE_TORCH_WRAPPER_FUNCTION(avg_pool2d, (const Tensor & self,
            const std::vector<int64_t>& kernel_size,
            const std::vector<int64_t>& stride,
            const std::vector<int64_t>& padding,
            int64_t divisor_override, bool ceil_mode=false, bool count_include_pad=true),
                                  (self, kernel_size, stride, padding, ceil_mode, count_include_pad, c10::optional<int64_t>(
                                          divisor_override)));

    DEFINE_TORCH_WRAPPER_FUNCTION(avg_pool2d, (const Tensor & self, const std::vector<int64_t>& kernel_size),
                                  (self, kernel_size));

    DEFINE_TORCH_WRAPPER_FUNCTION(avg_pool2d, (const Tensor & self,int kernel_size, int stride, int padding, int64_t divisor_override, bool ceil_mode=false, bool count_include_pad=true),
                                  (self, kernel_size, stride, padding, ceil_mode, count_include_pad, c10::optional<int64_t>(divisor_override)));

    DEFINE_TORCH_WRAPPER_FUNCTION(avg_pool2d, (const Tensor & self, int kernel_size, int stride),
                                  (self, kernel_size, stride));

    DEFINE_TORCH_WRAPPER_FUNCTION(avg_pool3d, (const Tensor & self,const std::vector<int64_t>& kernel_size,const std::vector<int64_t>& stride,
            const std::vector<int64_t>& padding,int64_t divisor_override, bool ceil_mode=false, bool count_include_pad=true),
                    (self, kernel_size, stride, padding, ceil_mode, count_include_pad, c10::optional<int64_t>(divisor_override)));

    DEFINE_TORCH_WRAPPER_FUNCTION(avg_pool3d, (const Tensor & self, const std::vector<int64_t>& kernel_size),
                                  (self, kernel_size));

    DEFINE_TORCH_WRAPPER_FUNCTION(avg_pool3d, (const Tensor & self,
            int kernel_size, int stride, int padding, int64_t divisor_override, bool ceil_mode=false, bool count_include_pad=true),
                                  (self, kernel_size, stride, padding, ceil_mode, count_include_pad, c10::optional<int64_t>(
                                          divisor_override)));

    DEFINE_TORCH_WRAPPER_FUNCTION(avg_pool3d, (const Tensor & self, int kernel_size, int stride),
                                  (self, kernel_size, stride));

    DEFINE_TORCH_WRAPPER_FUNCTION(max_pool1d,
                                  (const Tensor & self, int kernel_size, int stride, int padding=0, int dilation=1, bool ceil_mode=false),
                                  (self, c10::IntArrayRef(kernel_size), c10::IntArrayRef(stride), c10::IntArrayRef(padding), c10::IntArrayRef(dilation), ceil_mode));

    DEFINE_TORCH_WRAPPER_FUNCTION(max_pool1d, (const Tensor& self,
            std::vector<int64_t>& kernel_size, std::vector<int64_t>& stride, std::vector<int64_t> padding={0}, std::vector<int64_t> dilation={1}, bool ceil_mode=false),
                                  (self, kernel_size, stride, padding, dilation, ceil_mode));

    DEFINE_TORCH_WRAPPER_FUNCTION(max_pool2d,
                                  (const Tensor& self, int kernel_size, int stride, int padding=0, int dilation=1, bool ceil_mode=false),
                                  (self, kernel_size, stride, padding, dilation, ceil_mode));

    DEFINE_TORCH_WRAPPER_FUNCTION(max_pool2d, (const Tensor& self,
            std::vector<int64_t>& kernel_size, std::vector<int64_t>& stride, std::vector<int64_t> padding={0}, std::vector<int64_t> dilation={1}, bool ceil_mode=false),
                                  (self, kernel_size, stride, padding, dilation, ceil_mode));

    DEFINE_TORCH_WRAPPER_FUNCTION(max_pool3d,
                                  (const Tensor& self, int kernel_size, int stride, int padding=0, int dilation=1, bool ceil_mode=false),
                                  (self, kernel_size, stride, padding, dilation, ceil_mode));

    DEFINE_TORCH_WRAPPER_FUNCTION(max_pool3d, (const Tensor& self,
            std::vector<int64_t>& kernel_size, std::vector<int64_t>& stride, std::vector<int64_t> padding={0}, std::vector<int64_t> dilation={1}, bool ceil_mode=false),
                                  (self, kernel_size, stride, padding, dilation, ceil_mode));

    DEFINE_TORCH_FUNCTION_RETURN_TUPLE(max_pool1d_with_indices,
                                       (const Tensor & self, int kernel_size, int stride, int padding=0, int dilation=1, bool ceil_mode=false),
                                       (self, kernel_size, stride, padding, dilation, ceil_mode));

    DEFINE_TORCH_FUNCTION_RETURN_TUPLE(max_pool1d_with_indices,
                                       (const Tensor& self, std::vector<int64_t>& kernel_size, std::vector<int64_t> stride,
                                               std::vector<int64_t> padding=std::vector<int64_t>({ 0 }),
                                               std::vector<int64_t> dilation=std::vector<int64_t>({ 1 }), bool ceil_mode=false),
                                       (self, kernel_size, stride, padding, dilation, ceil_mode));

    DEFINE_TORCH_FUNCTION_RETURN_TUPLE(max_pool2d_with_indices,
                                       (const Tensor & self, int kernel_size, int stride, int padding=0, int dilation=1, bool ceil_mode=false),
                                       (self, kernel_size, stride, padding, dilation, ceil_mode));

    DEFINE_TORCH_FUNCTION_RETURN_TUPLE(max_pool2d_with_indices,
                                       (const Tensor& self, std::vector<int64_t>& kernel_size, std::vector<int64_t> stride, std::vector<int64_t> padding=std::vector<int64_t>({ 0 }), std::vector<int64_t> dilation=std::vector<int64_t>({ 1 }), bool ceil_mode=false),
                                       (self, kernel_size, stride, padding, dilation, ceil_mode));

    DEFINE_TORCH_FUNCTION_RETURN_TUPLE(max_pool3d_with_indices,
                                       (const Tensor & self, int kernel_size, int stride, int padding=0, int dilation=1, bool ceil_mode=false),
                                       (self, kernel_size, stride, padding, dilation, ceil_mode));

    DEFINE_TORCH_FUNCTION_RETURN_TUPLE(max_pool3d_with_indices,
                                       (const Tensor& self, std::vector<int64_t>& kernel_size, std::vector<int64_t> stride, std::vector<int64_t> padding=std::vector<int64_t>({ 0 }), std::vector<int64_t> dilation=std::vector<int64_t>({ 1 }), bool ceil_mode=false),
                                       (self, kernel_size, stride, padding, dilation, ceil_mode));

    DEFINE_NN_FUNCTIONAL_DETAIL_FUNC(max_unpool1d, (Tensor &self, Tensor &indices, int kernel_size, int stride, int padding = 0),
                                     (self, indices, kernel_size, stride, padding, c10::nullopt));
    DEFINE_NN_FUNCTIONAL_DETAIL_FUNC(max_unpool1d, (Tensor &self, Tensor &indices, std::vector<int64_t> output_size, int kernel_size, int stride, int padding = 0),
                                     (self, indices, kernel_size, stride, padding, c10::optional<std::vector<int64_t>>(output_size)));
    DEFINE_NN_FUNCTIONAL_DETAIL_FUNC(max_unpool1d, (Tensor &self, Tensor &indices, std::vector<int64_t> kernel_size,
            std::vector<int64_t> stride, std::vector<int64_t> padding = {0}),
                                     (self, indices, kernel_size, stride, padding, c10::nullopt));
    DEFINE_NN_FUNCTIONAL_DETAIL_FUNC(max_unpool1d, (Tensor &self, Tensor &indices, std::vector<int64_t> output_size,
            std::vector<int64_t> kernel_size, std::vector<int64_t> stride, std::vector<int64_t> padding),
                                     (self, indices, kernel_size, stride, padding, c10::optional<std::vector<int64_t>>(output_size)));
    DEFINE_NN_FUNCTIONAL_DETAIL_FUNC(max_unpool2d, (Tensor &self, Tensor &indices, int kernel_size, int stride, int padding = 0),
                                     (self, indices, kernel_size, stride, padding, c10::nullopt));
    DEFINE_NN_FUNCTIONAL_DETAIL_FUNC(max_unpool2d, (Tensor &self, Tensor &indices, std::vector<int64_t> output_size, int kernel_size, int stride, int padding = 0),
                                     (self, indices, kernel_size, stride, padding, c10::optional<std::vector<int64_t>>(output_size)));
    DEFINE_NN_FUNCTIONAL_DETAIL_FUNC(max_unpool2d, (Tensor &self, Tensor &indices, std::vector<int64_t> kernel_size,
            std::vector<int64_t> stride, std::vector<int64_t> padding = {0}),
                                     (self, indices, kernel_size, stride, padding, c10::nullopt));
    DEFINE_NN_FUNCTIONAL_DETAIL_FUNC(max_unpool2d, (Tensor &self, Tensor &indices, std::vector<int64_t> output_size,
            std::vector<int64_t> kernel_size, std::vector<int64_t> stride, std::vector<int64_t> padding),
                                     (self, indices, kernel_size, stride, padding, c10::optional<std::vector<int64_t>>(output_size)));
    DEFINE_NN_FUNCTIONAL_DETAIL_FUNC(max_unpool3d, (Tensor &self, Tensor &indices, int kernel_size, int stride, int padding = 0),
                                     (self, indices, kernel_size, stride, padding, c10::nullopt));
    DEFINE_NN_FUNCTIONAL_DETAIL_FUNC(max_unpool3d, (Tensor &self, Tensor &indices, std::vector<int64_t> output_size, int kernel_size, int stride, int padding = 0),
                                     (self, indices, kernel_size, stride, padding, c10::optional<std::vector<int64_t>>(output_size)));
    DEFINE_NN_FUNCTIONAL_DETAIL_FUNC(max_unpool3d, (Tensor &self, Tensor &indices, std::vector<int64_t> kernel_size,
            std::vector<int64_t> stride, std::vector<int64_t> padding = {0}),
                    (self, indices, kernel_size, stride, padding, c10::nullopt));
    DEFINE_NN_FUNCTIONAL_DETAIL_FUNC(max_unpool3d, (Tensor &self, Tensor &indices, std::vector<int64_t> output_size,
            std::vector<int64_t> kernel_size, std::vector<int64_t> stride, std::vector<int64_t> padding),
                                     (self, indices, kernel_size, stride, padding, c10::optional<std::vector<int64_t>>(output_size)));


    static Tensor
    lp_pooling1d(Tensor &self, double norm_type, std::vector<int64_t> &kernel_size, std::vector<int64_t> &stride,
                 bool ceil_mode=false) {
        return torch::nn::functional::lp_pool1d(self, torch::nn::functional::LPPool1dFuncOptions(norm_type,
                kernel_size).stride(stride).ceil_mode(ceil_mode));
    }
    static Tensor lp_pooling1d(Tensor &self, double norm_type, int kernel_size, int stride, bool ceil_mode=false){
        return torch::nn::functional::lp_pool1d(self, torch::nn::functional::LPPool1dFuncOptions(norm_type,
                kernel_size).stride(stride).ceil_mode(ceil_mode));
    }

    static Tensor
    lp_pooling2d(Tensor &self, double norm_type, std::vector<int64_t> &kernel_size, std::vector<int64_t> &stride,
                 bool ceil_mode=false) {
        return torch::nn::functional::lp_pool2d(self, torch::nn::functional::LPPool2dFuncOptions(norm_type,
                kernel_size).stride(stride).ceil_mode(ceil_mode));
    }
    static Tensor
    lp_pooling2d(Tensor &self, double norm_type, int kernel_size, int stride,
                 bool ceil_mode=false) {
        return torch::nn::functional::lp_pool2d(self, torch::nn::functional::LPPool2dFuncOptions(norm_type,
                kernel_size).stride(stride).ceil_mode(ceil_mode));
    }
    DEFINE_TORCH_FUNCTION_RETURN_TUPLE(adaptive_max_pool1d, (const Tensor& self, std::vector<int64_t>& output_size),
                                       (self, output_size));

    DEFINE_TORCH_FUNCTION_RETURN_TUPLE(adaptive_max_pool2d, (const Tensor& self, std::vector<int64_t>& output_size),
                                       (self, output_size));

    DEFINE_TORCH_FUNCTION_RETURN_TUPLE(adaptive_max_pool3d, (const Tensor& self, std::vector<int64_t>& output_size),
                                       (self, output_size));

    DEFINE_TORCH_WRAPPER_FUNCTION(adaptive_avg_pool1d, (const Tensor& self, std::vector<int64_t>& output_size),
                                  (self, output_size));

    DEFINE_TORCH_WRAPPER_FUNCTION(adaptive_avg_pool2d, (const Tensor& self, std::vector<int64_t>& output_size),
                                  (self, output_size));

    DEFINE_TORCH_WRAPPER_FUNCTION(adaptive_avg_pool3d, (const Tensor& self, std::vector<int64_t>& output_size),
                                  (self, output_size));

/**
 *********************** activation functions ***********************************
 */
    DEFINE_TORCH_WRAPPER_FUNCTION(threshold, (const Tensor & self, Scalar threshold, Scalar value), (self, threshold, value));
    DEFINE_TORCH_TENSOR_ACTION(relu);
    DEFINE_TORCH_WRAPPER_FUNCTION(hardtanh, (const Tensor & self, Scalar min_val=-1, Scalar max_val=1), (self, min_val, max_val));
    static Tensor relu6 (Tensor& self, bool inplace){
        return torch::nn::functional::relu6(self, torch::nn::functional::ReLU6FuncOptions(inplace));
    }
    DEFINE_TORCH_WRAPPER_FUNCTION(elu, (const Tensor & self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1), (self, alpha, scale, input_scale));
    DEFINE_TORCH_TENSOR_ACTION(selu);
    DEFINE_TORCH_WRAPPER_FUNCTION(celu, (const Tensor & self, Scalar alpha=1.0), (self, alpha));
    DEFINE_TORCH_WRAPPER_FUNCTION(leaky_relu, (const Tensor& self, Scalar negative_slope), (self, negative_slope));
    DEFINE_TORCH_WRAPPER_FUNCTION(prelu, (const Tensor & self, const Tensor & weight), (self, weight));
    DEFINE_TORCH_WRAPPER_FUNCTION(rrelu, (const Tensor & self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=false), (self, lower, upper, training));
    DEFINE_TORCH_WRAPPER_FUNCTION(glu, (const Tensor & self, int64_t dim=-1), (self, dim));
    DEFINE_TORCH_WRAPPER_FUNCTION(gelu,(const Tensor & self),(self));
    DEFINE_TORCH_TENSOR_ACTION(log_sigmoid);
    DEFINE_TORCH_WRAPPER_FUNCTION(hardshrink, (const Tensor & self, Scalar lambd=0.5), (self, lambd));
    static Tensor tanhshrink(Tensor& self){
        return torch::nn::functional::tanhshrink(self);
    }
    static Tensor softsign(Tensor& self){
        return torch::nn::functional::softsign(self);
    }
    DEFINE_TORCH_WRAPPER_FUNCTION(softplus, (const Tensor & self, Scalar beta=1, Scalar threshold=20), (self, beta, threshold));
    static Tensor softmin(const Tensor& input, int64_t dim) {
        return torch::nn::functional::softmin(input, dim);
    }
    DEFINE_TORCH_WRAPPER_FUNCTION(softmax, (const Tensor & self, int64_t dim), (self, dim));
    DEFINE_TORCH_WRAPPER_FUNCTION(softshrink, (const Tensor & self, Scalar lambd=0.5), (self, lambd));
    static Tensor gumbel_softmax(const Tensor& logits,
                                 double tau,
                                 bool hard,
                                 int dim) {
        return torch::nn::functional::gumbel_softmax(logits, torch::nn::functional::GumbelSoftmaxFuncOptions().tau(tau).hard(hard).dim(dim));
    }
    DEFINE_TORCH_WRAPPER_FUNCTION(log_softmax, (const Tensor & self, int64_t dim), (self, dim));
    DEFINE_TORCH_TENSOR_ACTION(sigmoid);
    DEFINE_TORCH_WRAPPER_FUNCTION(log_softmax, (const Tensor &self, int dim), (self.core, dim));
    DEFINE_TORCH_TENSOR_ACTION(tanh);

/**
 ***************************** normalization func ***********************
 */
    DEFINE_NN_FUNCTIONAL_DETAIL_FUNC(batch_norm, (const Tensor & input,const Tensor & running_mean, const Tensor & running_var, const Tensor & weight={}, const Tensor & bias={},
             bool training=false, double momentum=0.1, double eps=1e-5, bool cudnn_enabled=false),(input,running_mean,running_var,weight,bias,training,momentum,eps));
    DEFINE_NN_FUNCTIONAL_DETAIL_FUNC(instance_norm, (const Tensor& input, const Tensor& running_mean={},
            const Tensor& running_var={}, const Tensor& weight={}, const Tensor& bias={},bool use_input_stats=true, double momentum=0.1, double eps=1e-5),
                    (input,running_mean, running_var,weight, bias, use_input_stats,momentum, eps));
    DEFINE_NN_FUNCTIONAL_DETAIL_FUNC(layer_norm, (const Tensor & input, std::vector<int64_t> normalized_shape, const Tensor & weight={},
            const Tensor & bias={}, double eps=1e-05), (input,normalized_shape,weight,bias,eps));
    DEFINE_NN_FUNCTIONAL_DETAIL_FUNC(normalize, (const Tensor& input, double p=2, int64_t dim=2, double eps=1e-12), (input, p, dim, eps, c10::nullopt));
    DEFINE_NN_FUNCTIONAL_DETAIL_FUNC(normalize, (const Tensor& input, Tensor& out, double p=2, int64_t dim=2, double eps=1e-12), (input, p, dim, eps, c10::optional<torch::Tensor>(out.core)));
    DEFINE_NN_FUNCTIONAL_DETAIL_FUNC(local_response_norm, (const Tensor& input,int64_t size,double alpha=0.0001,double beta=0.75,double k=1.0), (input, size, alpha, beta, k));

/**
 ******************************** linear functions ***************************
 */
    DEFINE_TORCH_WRAPPER_FUNCTION(linear, (const Tensor & input, const Tensor & weight, const Tensor & bias={}), (input, weight, bias));
    DEFINE_TORCH_WRAPPER_FUNCTION(bilinear, (const Tensor & input1, const Tensor & input2, const Tensor & weight, const Tensor & bias), (input1, input2, weight, bias));


/**
 ********************************** dropout func ***************************************
 */
    DEFINE_NN_FUNCTIONAL_DETAIL_FUNC(dropout, (const Tensor& input, double p=0.5, bool train=true, bool inplace=false), (input.core, p, train, inplace));
    DEFINE_NN_FUNCTIONAL_DETAIL_FUNC(alpha_dropout, (const Tensor & input, double p=0.5, bool train=true, bool inplace=false),(input, p, train, inplace));
    DEFINE_NN_FUNCTIONAL_DETAIL_FUNC(dropout2d, (const Tensor& input, double p=0.5, bool training=true, bool inplace=false),(input, p, training, inplace));
    DEFINE_NN_FUNCTIONAL_DETAIL_FUNC(dropout3d, (const Tensor& input, double p=0.5, bool training=true, bool inplace=false),(input, p, training, inplace));

/**
 ****************************** sparse functions ********************************
 */

    DEFINE_NN_FUNCTIONAL_DETAIL_FUNC(embedding, (const Tensor& input,const Tensor& weight,int64_t padding_idx,
            double max_norm,double norm_type,bool scale_grad_by_freq,bool sparse),
            (input,weight,padding_idx,max_norm,norm_type,scale_grad_by_freq,sparse));
    DEFINE_NN_FUNCTIONAL_DETAIL_FUNC(embedding, (const Tensor& input,const Tensor& weight,int64_t padding_idx),
            (input,weight,padding_idx,c10::nullopt,2, false, false));
    DEFINE_NN_FUNCTIONAL_DETAIL_FUNC(embedding, (const Tensor& input,const Tensor& weight),
            (input,weight,c10::nullopt,c10::nullopt,2, false, false));
    static Tensor embedding_bag(const Tensor& input,const Tensor& weight,const Tensor& offsets=Tensor(),double max_norm=DBL_MAX,double norm_type=2.,bool scale_grad_by_freq=false,
                                std::string mode="mean",bool sparse=false,const Tensor& per_sample_weights=Tensor(),bool include_last_offset=false){
        static torch::nn::EmbeddingBagMode types[] = {torch::kMax, torch::kMean, torch::kSum};
        static std::string args[] = {"max","mean","sum"};
        int idx = std::distance(args, std::find(args, args + (sizeof(args) / sizeof(args[0])), mode));
        if  (idx == std::distance(args, std::end(args))){
            throw std::invalid_argument("the reduction can only be one of \"mean\", \"sum\" or \"none\"");
        }
        torch::nn::EmbeddingBagMode modeObj = torch::nn::EmbeddingBagMode(types[idx]);
        return torch::nn::functional::detail::embedding_bag(input, weight, offsets, max_norm, norm_type, scale_grad_by_freq, modeObj, sparse, per_sample_weights, include_last_offset);
    }
    DEFINE_TORCH_WRAPPER_FUNCTION(one_hot,(const Tensor & self, int64_t num_classes=-1),(self,num_classes));

/**
 *************************** distance functions *****************************
 */
    DEFINE_TORCH_WRAPPER_FUNCTION(pairwise_distance, (const Tensor & x1, const Tensor & x2, double p=2, double eps=1e-06, bool keepdim=false),(x1,x2,p,eps,keepdim));
    DEFINE_TORCH_WRAPPER_FUNCTION(cosine_similarity,(const Tensor & x1, const Tensor & x2, int64_t dim=1, double eps=1e-08),(x1,x2,dim,eps));
    DEFINE_TORCH_WRAPPER_FUNCTION(pdist,(const Tensor & self, double p=2),(self,p));

/**
 **************************** loss function *******************************
 */
    DEFINE_TORCH_WRAPPER_FUNCTION(binary_cross_entropy,(const Tensor & self, const Tensor & target,
            const Tensor & weight={},std::string reduction="mean"),(self, target, weight, toReductionEnum(reduction)));
    DEFINE_TORCH_WRAPPER_FUNCTION(binary_cross_entropy_with_logits,(const Tensor & self, const Tensor & target, const Tensor & weight={},
            const Tensor & pos_weight={}, std::string reduction="mean"),(self, target, weight, pos_weight, toReductionEnum(
            reduction)));
    DEFINE_TORCH_WRAPPER_FUNCTION(poisson_nll_loss,(const Tensor & input, const Tensor & target, bool log_input=true, bool full=false, double eps=1e-8, std::string reduction="mean"),
            (input,target,log_input,full,eps, toReductionEnum(reduction)));
    DEFINE_TORCH_WRAPPER_FUNCTION(cosine_embedding_loss,(const Tensor & input1, const Tensor & input2, const Tensor & target,
            double margin=0.0, std::string reduction="mean"),(input1, input2, target, margin, toReductionEnum(
            reduction)));
    DEFINE_NN_FUNCTIONAL_DETAIL_FUNC(cross_entropy,(const Tensor& input,const Tensor& target,const Tensor& weight={},
            int64_t ignore_index=(int64_t)-100,std::string reduction="mean"),(input,target,weight,ignore_index, toReductionEnumobj(
            reduction)));
    DEFINE_TORCH_WRAPPER_FUNCTION(ctc_loss,(const Tensor & log_probs, const Tensor & targets, const Tensor & input_lengths,
            const Tensor & target_lengths, int64_t blank=0, std::string reduction="mean", bool zero_infinity=false),
            (log_probs,targets,input_lengths,target_lengths,blank, toReductionEnum(reduction),zero_infinity));
    DEFINE_TORCH_WRAPPER_FUNCTION(hinge_embedding_loss,(const Tensor & self, const Tensor & target, double margin=1.0,
            std::string reduction="mean"),(self,target,margin, toReductionEnum(reduction)));
    DEFINE_TORCH_WRAPPER_FUNCTION(kl_div,(const Tensor & self, const Tensor & target, std::string reduction="mean"),
            (self,target, toReductionEnum(reduction)));
    DEFINE_TORCH_WRAPPER_FUNCTION(l1_loss,(const Tensor & self, const Tensor & target, std::string reduction="mean"),
            (self,target, toReductionEnum(reduction)));
    DEFINE_TORCH_WRAPPER_FUNCTION(mse_loss,(const Tensor & self, const Tensor & target, std::string reduction="mean"),
            (self,target, toReductionEnum(reduction)));
    DEFINE_TORCH_WRAPPER_FUNCTION(margin_ranking_loss,(const Tensor & input1, const Tensor & input2, const Tensor & target,
            double margin=0.0, std::string reduction="mean"),(input1,input2,target,margin, toReductionEnum(reduction)));
    DEFINE_TORCH_WRAPPER_FUNCTION(multilabel_margin_loss,(const Tensor & self, const Tensor & target, std::string reduction="mean"),
            (self,target, toReductionEnum(reduction)));
    DEFINE_NN_FUNCTIONAL_DETAIL_FUNC(multilabel_soft_margin_loss,(const Tensor& input,const Tensor& target,
            const Tensor& weight={},std::string reduction="mean"),(input,target,weight, toReductionEnumobj(reduction)));
    DEFINE_TORCH_WRAPPER_FUNCTION(multi_margin_loss,(const Tensor & self, const Tensor & target, Scalar p=1, Scalar margin=1,
            const Tensor & weight={}, std::string reduction="mean"),(self,target,p,margin,weight, toReductionEnum(
            reduction)));
    DEFINE_TORCH_WRAPPER_FUNCTION(nll_loss,(const Tensor & self, const Tensor & target, const Tensor & weight={}, std::string reduction="mean",
            int64_t ignore_index=-100),(self,target,weight, toReductionEnum(reduction),ignore_index));
    DEFINE_TORCH_WRAPPER_FUNCTION(nll_loss2d, (const Tensor& self, const Tensor& target), (self, target));
    DEFINE_TORCH_WRAPPER_FUNCTION(smooth_l1_loss, (const Tensor & self, const Tensor & target, std::string reduction="mean"),
            (self, target, toReductionEnum(reduction)));
    DEFINE_TORCH_WRAPPER_FUNCTION(soft_margin_loss,(const Tensor & self, const Tensor & target, std::string reduction="mean"),(self,target, toReductionEnum(
            reduction)));
    DEFINE_TORCH_WRAPPER_FUNCTION(triplet_margin_loss,(const Tensor & anchor, const Tensor & positive, const Tensor & negative, double margin=1.0, double p=2,
            double eps=1e-06, bool swap=false, std::string reduction="mean"),(anchor,positive,negative,margin,p,eps,swap, toReductionEnum(
            reduction)));

/**
 ******************************** vision functions ******************************
 */

    DEFINE_TORCH_WRAPPER_FUNCTION(pixel_shuffle,
            (const Tensor & self, int64_t upscale_factor),(self,upscale_factor));
    DEFINE_NN_FUNCTIONAL_DETAIL_FUNC(pad,
            (const Tensor& input,std::vector<int64_t> pad,std::string mode="constant",double value=0.0),
                                     (input,pad, toPadEnumobj(mode),value));
    DEFINE_NN_FUNCTIONAL_DETAIL_FUNC(interpolate,(const Tensor& input,std::vector<double> scale_factor,
            std::string mode="nearest",bool align_corners=false,bool recompute_scale_factor=true),
                    (input,c10::nullopt,scale_factor, toInterpolateEnumobj(mode),
                            ((mode=="nearest"||mode=="area")&&!align_corners?c10::nullopt:c10::optional<bool>(align_corners))
                            ,recompute_scale_factor));
    DEFINE_NN_FUNCTIONAL_DETAIL_FUNC(interpolate,(const Tensor& input,std::vector<int64_t> size,
            std::string mode="nearest",bool align_corners=false,bool recompute_scale_factor=true),
                    (input,size,c10::nullopt, toInterpolateEnumobj(mode),
                            ((mode=="nearest"||mode=="area")&&!align_corners?c10::nullopt:c10::optional<bool>(align_corners)),
                            recompute_scale_factor));
    DEFINE_NN_FUNCTIONAL_DETAIL_FUNC(grid_sample, (const Tensor& input,const Tensor& grid,std::string mode="bilinear",
            std::string padding_mode="zeros",bool align_corners=false),
                    (input, grid, toGridSampleModeEnumobj(mode), toGridSamplePaddingModeEnumobj(padding_mode), align_corners));
    static Tensor affine_grid(const Tensor& theta,const std::vector<int64_t>& size,bool align_corners = false){
        return torch::nn::functional::affine_grid(theta, size, align_corners);
    }

/**
 **************************** DataParallel functions (multi-GPU, distributed) ************************************
 */

//    static Tensor data_paralle(ModuleType module,
//                               Tensor input,
//                               std::vector<Device> devices,
//                               Device output_device,
//                               int64_t dim){
//        return torch::nn::parallel::data_parallel(module,input,devices,c10::optional<Device>(output_device),dim);
//    }

/**
 ***************************** others ***************************
 */
    static Tensor equal_indices(const Tensor &self, const Scalar &mask) {
        return self.core == mask;
    }
    static Tensor equal_indices(const Tensor &self, const Tensor &target) {
        return self.core == target;
    }


    AS_TENSOR_LIST(double);

    AS_TENSOR_LIST(int64_t);

    AS_TENSOR_ITEM(int64_t);

    template<typename T>
    static Tensor get_indices_from_unique_(const Tensor &src, const Tensor &target) {
#ifdef Torch_Version_Less_13
        T *src_data = src.core.data<T>();
            T *target_data = target.core.data<T>();
#else
        T *src_data = src.core.data_ptr<T>();
        T *target_data = target.core.data_ptr<T>();
#endif
        int64_t src_len = src.core.numel();
        int64_t target_len = target.core.numel();
        at::Tensor v = torch::empty({target_len}, c10::TensorOptions(at::kLong));
        at::parallel_for(0, target_len, 1, [&](int64_t begin, int64_t end) {
            for (; begin < end; begin++) {
                int64_t idx = 0;
                while (idx < src_len) {
                    if (src_data[idx] == target_data[begin]) {
                        v[begin] = idx;
                        break;
                    } else {
                        idx++;
                        continue;
                    }
                }
            }
        });
        return Tensor(v);
    }

#ifdef CUDA_AVAILABLE
    static void print_debug_info() {
            using namespace std;
            auto x= at::cuda::getDeviceProperties(at::cuda::current_device());
            cout << "Long size " << sizeof(long) << endl;
            cout<<"totalGlobalMem "<<x->totalGlobalMem<<endl
            <<"totalConstMem "<<x->totalConstMem<<endl;
        }
#else

    inline static void print_debug_info() {
        std::cout << "cuda available: " << torch::cuda::is_available() << std::endl;
    }

#endif //CUDA_AVAILABLE

    static bool is_cuda_available() {
        return torch::cuda::is_available();

    }

    static inline Tensor sparse_coo_tensor(const Tensor &indices, const Tensor &values) {
        return torch::sparse_coo_tensor(indices, values);
    }

    static inline Tensor
    sparse_coo_tensor(const Tensor &indices, const Tensor &values, const std::vector<int64_t> &size,
                      const c10::TensorOptions &options = {}) {
        return Tensor(at::sparse_coo_tensor(indices, values, size, options));
    }


    static Tensor randperm(int64_t n, const TensorOptions &options = {}) {
        return Tensor(torch::randperm(n, options));
    }

    static std::vector<Tensor>
    split_with_sizes(const Tensor &self, const std::vector<int64_t> &split_sizes, int64_t dim = 0) {
        std::vector<at::Tensor> v = torch::split_with_sizes(self, split_sizes, dim);
        std::vector<at::Tensor>::iterator it;
        std::vector<Tensor> result;
        for (it = v.begin(); it != v.end(); it++) {
            result.emplace_back(*it);
        }
        return result;
    }

    static void manual_seed(uint64_t seed) {
        torch::manual_seed(seed);
    }

private:
    static int64_t toReductionEnum(std::string& reduction){
        std::string args[] = {"none","mean","sum"};

        int64_t reductionEnum = std::distance(args,std::find(args, args+(sizeof(args)/sizeof(args[0])), reduction));
        if  (reductionEnum == std::distance(args,std::end(args))){
            throw std::invalid_argument("the reduction can only be one of \"mean\", \"sum\" or \"none\"");
        }
        return reductionEnum;
    }
    static c10::variant<torch::enumtype::kNone, torch::enumtype::kMean, torch::enumtype::kSum>
    toReductionEnumobj(std::string& reduction) {
        c10::variant<torch::enumtype::kNone, torch::enumtype::kMean, torch::enumtype::kSum> types[] = {torch::kNone,torch::kMean,torch::kSum};
        return types[toReductionEnum(reduction)];
    }
    static c10::variant<
            torch::enumtype::kNearest,
            torch::enumtype::kLinear,
            torch::enumtype::kBilinear,
            torch::enumtype::kBicubic,
            torch::enumtype::kTrilinear,
            torch::enumtype::kArea> toInterpolateEnumobj (std::string mode){
        std::unordered_map<std::string,
                c10::variant<
                        torch::enumtype::kNearest,
                        torch::enumtype::kLinear,
                        torch::enumtype::kBilinear,
                        torch::enumtype::kBicubic,
                        torch::enumtype::kTrilinear,
                        torch::enumtype::kArea>> modeMap = {
                {"nearest",torch::kNearest},
                {"bilinear",torch::kBilinear},
                {"linear", torch::kLinear},
                {"bicubic",torch::kBicubic},
                {"trilinear",torch::kTrilinear},
                {"area",torch::kArea}
        };
        if (modeMap.find(mode) == modeMap.end()){
            throw std::invalid_argument("invalid value for the mode of interpolate()");
        }
        return modeMap[mode];
    }
    static c10::variant<
            torch::enumtype::kConstant,
            torch::enumtype::kReflect,
            torch::enumtype::kReplicate,
            torch::enumtype::kCircular> toPadEnumobj (std::string mode){
        std::unordered_map<std::string,
                c10::variant<
                        torch::enumtype::kConstant,
                        torch::enumtype::kReflect,
                        torch::enumtype::kReplicate,
                        torch::enumtype::kCircular>> modeMap = {
                {"constant",torch::kConstant},
                {"reflect",torch::kReflect},
                {"replicate",torch::kReplicate},
                {"circular",torch::kCircular}
        };
        if (modeMap.find(mode) == modeMap.end()){
            throw std::invalid_argument("invalid value for the mode of pad()");
        }
        return modeMap[mode];
    }
    static c10::variant<torch::enumtype::kBilinear, torch::enumtype::kNearest>
    toGridSampleModeEnumobj(std::string mode){
        std::unordered_map<std::string,
        c10::variant<torch::enumtype::kBilinear, torch::enumtype::kNearest>>
        modeMap = {
                {"bilinear", torch::kBilinear},
                {"nearest", torch::kNearest}
        };
        if (modeMap.find(mode) == modeMap.end()){
            throw std::invalid_argument("invalid value for the mode of grid_sample()");
        }
        return modeMap[mode];
    }
    static c10::variant<torch::enumtype::kZeros, torch::enumtype::kBorder, torch::enumtype::kReflection>
    toGridSamplePaddingModeEnumobj(std::string mode){
        std::unordered_map<std::string,
        c10::variant<torch::enumtype::kZeros, torch::enumtype::kBorder, torch::enumtype::kReflection>>
        modeMap = {
                {"zeros", torch::kZeros},
                {"border", torch::kBorder},
                {"reflection", torch::kReflection}
        };
        if (modeMap.find(mode) == modeMap.end()){
            throw std::invalid_argument("invalid value for the padding_mode of grid_sample()");
        }
        return modeMap[mode];
    }
};
