#pragma once

#include <utility>
#include <torch/torch.h>
#include <LibDL/Tensor/Tensor.h>
#include <LibDL/nn/Modules/Modules.h>
#include <LibDL/utils.h>

namespace nn {
#define WIERD_HASHHASH(a, b) a##b
#define HASHHASH(a, b) WIERD_HASHHASH(a,b)

#define DEFAULT_NULL_CTOR(ClassName)\
        ClassName():Module(nullptr){}

#define DEFINE_MODULE(ClassName, TorchClass)\
    class ClassName : public Module {\
    public:\
        using ImplClassImpl= HASHHASH(TorchClass,Impl);\
        using ImplClass=TorchClass;\
        std::shared_ptr<ImplClassImpl> get_ptr()\
            {return Module::get_ptr<ImplClassImpl>();}\
        ClassName():Module(nullptr){}

#define DEFINE_MODULE_ENDS };

#define DEFINE_CTOR(ClassName, CtorList, Create, ...)\
explicit ClassName CtorList :Module(std::make_shared<ImplClassImpl> Create), __VA_ARGS__ {}

#define DEFAULT_CTOR(CLassName, CtorList, Create)\
explicit CLassName CtorList :Module(std::make_shared<ImplClassImpl> Create) {}

#define DEFAULT_CTOR_VA(CLassName, CtorList, Create, ...)\
explicit CLassName CtorList :Module(std::make_shared<ImplClassImpl> Create), __VA_ARGS__ {}


#define DEFINE_FORWARD_FUNCTION(\
        ForwardFuncParamInit, \
        ForwardFuncReturnType, \
        ForwardFuncParamToNative)\
        ForwardFuncReturnType forward(ForwardFuncParamInit) noexcept(false){\
                return ForwardFuncReturnType(std::dynamic_pointer_cast<ImplClassImpl>(core_)\
                    ->forward(ForwardFuncParamToNative)); \
        }


#define DEFINE_DEFAULT_FORWARD_FUNC DEFINE_FORWARD_FUNCTION(const Tensor& t, Tensor,t.core)


#define CREATE_MODULE(ClassName, TorchClass, \
    ForwardFuncParamInit, \
    ForwardFuncReturnType, \
    ForwardFuncParamToNative, \
    CtorList, Create, \
    OtherCode)\
    DEFINE_MODULE(ClassName,TorchClass)\
    DEFAULT_CTOR(ClassName,CtorList,Create)\
    DEFINE_FORWARD_FUNCTION(ForwardFuncParamInit,ForwardFuncReturnType,ForwardFuncParamToNative)\
        OtherCode\
    }

#define CREATE_DEFAULT_MODULE(ClassName, ImplClass, CtorList, Create, OtherCode)\
    CREATE_MODULE(ClassName,ImplClass,const Tensor& t, Tensor,t, CtorList,Create,OtherCode)

#pragma region Pooling

#define DEFINE_FORWARD_WITH_INDICES \
        std::pair<Tensor, Tensor> forward_with_indices(const Tensor& input) noexcept(false){\
            std::tuple<at::Tensor, at::Tensor> pool_with_indices = std::dynamic_pointer_cast<ImplClassImpl>(core_) ->forward_with_indices(input.core); \
            std::pair<Tensor, Tensor> with_indices = std::pair<Tensor, Tensor>(std::get<0>(pool_with_indices), \
                                                                               std::get<1>(pool_with_indices)); \
            return std::pair<Tensor, Tensor>(std::get<0>(pool_with_indices),std::get<1>(pool_with_indices)); \
        }
#define DEFINE_UNPOOL_FOWARD_WITH_OUTPUT_SIZE \
        Tensor forward(const Tensor &input, const Tensor &indices, const std::vector<int64_t>& output_size) { \
            return Tensor(std::dynamic_pointer_cast<ImplClassImpl>(core_)->forward(input.core, indices.core, output_size));\
        }
#define DEFINE_UNPOOL_FOWARD_WITHOUT_OUTPUT_SIZE \
        Tensor forward(const Tensor &input, const Tensor &indices) { \
            return Tensor(std::dynamic_pointer_cast<ImplClassImpl>(core_)->forward(input.core, indices.core));\
        }


    DEFINE_MODULE(AvgPool1d, torch::nn::AvgPool1d)

        DEFAULT_CTOR(AvgPool1d, (const std::vector<int64_t>& kernel_size, const std::vector<int64_t>& stride,
                const std::vector<int64_t>& padding = {0}, bool ceil_mode = false, bool count_include_pad = true),
                     (torch::nn::AvgPool1dOptions(at::IntArrayRef(kernel_size))
                             .stride(at::IntArrayRef(stride))
                             .padding(at::IntArrayRef(padding))
                             .ceil_mode(ceil_mode)
                             .count_include_pad(count_include_pad)));

        DEFAULT_CTOR(AvgPool1d, (int kernel_size, int stride, int padding = 0, bool ceil_mode = false,
                bool count_include_pad = true),
                     (torch::nn::AvgPool1dOptions(kernel_size).stride(stride).padding(padding)
                             .ceil_mode(ceil_mode)
                             .count_include_pad(count_include_pad)));

        DEFINE_DEFAULT_FORWARD_FUNC

    DEFINE_MODULE_ENDS

    DEFINE_MODULE(AvgPool2d, torch::nn::AvgPool2d)

        DEFAULT_CTOR(AvgPool2d, (const std::vector<int64_t>& kernel_size, const std::vector<int64_t>& stride,
                const std::vector<int64_t>& padding = {0, 0}, bool ceil_mode = false, bool count_include_pad = true),
                     (torch::nn::AvgPool2dOptions(at::IntArrayRef(kernel_size))
                             .stride(at::IntArrayRef(stride))
                             .padding(at::IntArrayRef(padding))
                             .ceil_mode(ceil_mode)
                             .count_include_pad(count_include_pad)));

        DEFAULT_CTOR(AvgPool2d, (int kernel_size, int stride, int padding = 0, bool ceil_mode = false,
                bool count_include_pad = true),
                     (torch::nn::AvgPool2dOptions(kernel_size).stride(stride).padding(padding)
                             .ceil_mode(ceil_mode)
                             .count_include_pad(count_include_pad)));

        DEFINE_DEFAULT_FORWARD_FUNC

    DEFINE_MODULE_ENDS

    DEFINE_MODULE(AvgPool3d, torch::nn::AvgPool3d)

        DEFAULT_CTOR(AvgPool3d, (const std::vector<int64_t>& kernel_size, const std::vector<int64_t>& stride,
                const std::vector<int64_t>& padding = {0, 0, 0}, bool ceil_mode = false, bool count_include_pad = true),
                     (torch::nn::AvgPool3dOptions(at::IntArrayRef(kernel_size))
                             .stride(at::IntArrayRef(stride))
                             .padding(at::IntArrayRef(padding))
                             .ceil_mode(ceil_mode)
                             .count_include_pad(count_include_pad)));

        DEFAULT_CTOR(AvgPool3d, (int kernel_size, int stride, int padding = 0, bool ceil_mode = false,
                bool count_include_pad = true),
                     (torch::nn::AvgPool3dOptions(kernel_size).stride(stride).padding(padding)
                             .ceil_mode(ceil_mode)
                             .count_include_pad(count_include_pad)));

        DEFINE_DEFAULT_FORWARD_FUNC

    DEFINE_MODULE_ENDS

    DEFINE_MODULE(MaxPool1d, torch::nn::MaxPool1d)

        DEFAULT_CTOR(MaxPool1d, (const std::vector<int64_t>& kernel_size, const std::vector<int64_t>& stride,
                const std::vector<int64_t>& padding = {0}, const std::vector<int64_t>& dilation = {1}, bool ceil_mode = false),
                     (torch::nn::MaxPool1dOptions(at::IntArrayRef(kernel_size))
                             .stride(at::IntArrayRef(stride))
                             .padding(at::IntArrayRef(padding))
                             .ceil_mode(ceil_mode)
                             .dilation(dilation)));

        DEFAULT_CTOR(MaxPool1d, (int kernel_size, int stride, int padding = 0,
                int dilation = 1, bool ceil_mode = false),
                     (torch::nn::MaxPool1dOptions(kernel_size).stride(stride).padding(padding).
                             ceil_mode(ceil_mode).dilation(dilation)));
        DEFINE_FORWARD_WITH_INDICES

        DEFINE_DEFAULT_FORWARD_FUNC
    DEFINE_MODULE_ENDS

    DEFINE_MODULE(MaxUnpool1d, torch::nn::MaxUnpool1d)

        DEFAULT_CTOR(MaxUnpool1d, (const std::vector<int64_t>& kernel_size, const std::vector<int64_t>& stride, const std::vector<int64_t>& padding = {0}),
                     (torch::nn::MaxUnpool1dOptions(at::IntArrayRef(kernel_size)).stride(stride).padding(at::IntArrayRef(padding))));

        DEFAULT_CTOR(MaxUnpool1d, (int kernel_size, int stride, int padding = 0),
                     (torch::nn::MaxUnpool1dOptions(kernel_size).stride(stride).padding(padding)));
        DEFINE_UNPOOL_FOWARD_WITH_OUTPUT_SIZE;
        DEFINE_UNPOOL_FOWARD_WITHOUT_OUTPUT_SIZE;

    DEFINE_MODULE_ENDS

    DEFINE_MODULE(MaxPool2d, torch::nn::MaxPool2d)

        DEFAULT_CTOR(MaxPool2d, (const std::vector<int64_t>& kernel_size, const std::vector<int64_t>& stride,
                const std::vector<int64_t>& padding = {0, 0}, const std::vector<int64_t>& dilation = {1, 1}, bool ceil_mode = false),
                     (torch::nn::MaxPool2dOptions(at::IntArrayRef(kernel_size))
                             .stride(at::IntArrayRef(stride))
                             .padding(at::IntArrayRef(padding))
                             .ceil_mode(ceil_mode)
                             .dilation(dilation)));

        DEFAULT_CTOR(MaxPool2d, (int kernel_size, int stride, int padding = 0,
                int dilation = 1, bool ceil_mode = false),
                     (torch::nn::MaxPool2dOptions(kernel_size).stride(stride).padding(padding).
                             ceil_mode(ceil_mode).dilation(dilation)));

        DEFINE_DEFAULT_FORWARD_FUNC
        DEFINE_FORWARD_WITH_INDICES

    DEFINE_MODULE_ENDS

    DEFINE_MODULE(MaxUnpool2d, torch::nn::MaxUnpool2d)

        DEFAULT_CTOR(MaxUnpool2d, (const std::vector<int64_t>& kernel_size, const std::vector<int64_t>& stride, const std::vector<int64_t>& padding = {0,0}),
                     (torch::nn::MaxUnpool2dOptions(at::IntArrayRef(kernel_size)).stride(stride).padding(at::IntArrayRef(padding))));

        DEFAULT_CTOR(MaxUnpool2d, (int kernel_size, int stride, int padding = 0),
                     (torch::nn::MaxUnpool2dOptions(kernel_size).stride(stride).padding(padding)));
        DEFINE_UNPOOL_FOWARD_WITH_OUTPUT_SIZE;
        DEFINE_UNPOOL_FOWARD_WITHOUT_OUTPUT_SIZE;

    DEFINE_MODULE_ENDS

    DEFINE_MODULE(MaxPool3d, torch::nn::MaxPool3d)

        DEFAULT_CTOR(MaxPool3d, (const std::vector<int64_t>& kernel_size, const std::vector<int64_t>& stride,
                const std::vector<int64_t>& padding = {0, 0, 0}, const std::vector<int64_t>& dilation = {1, 1,
                1}, bool ceil_mode = false),
                     (torch::nn::MaxPool3dOptions(at::IntArrayRef(kernel_size))
                             .stride(at::IntArrayRef(stride))
                             .padding(at::IntArrayRef(padding))
                             .ceil_mode(ceil_mode)
                             .dilation(dilation)));

        DEFAULT_CTOR(MaxPool3d, (int kernel_size, int stride, int padding = 0,
                int dilation = 1, bool ceil_mode = false),
                     (torch::nn::MaxPool3dOptions(kernel_size).stride(stride).padding(padding).
                             ceil_mode(ceil_mode).dilation(dilation)));

        DEFINE_DEFAULT_FORWARD_FUNC
        DEFINE_FORWARD_WITH_INDICES
    DEFINE_MODULE_ENDS

    DEFINE_MODULE(MaxUnpool3d, torch::nn::MaxUnpool3d)

        DEFAULT_CTOR(MaxUnpool3d, (const std::vector<int64_t>& kernel_size, const std::vector<int64_t>& stride, const std::vector<int64_t>& padding = {0,0,0}),
                     (torch::nn::MaxUnpool3dOptions(at::IntArrayRef(kernel_size)).stride(stride).padding(at::IntArrayRef(padding))));

        DEFAULT_CTOR(MaxUnpool3d, (int kernel_size, int stride, int padding = 0),
                     (torch::nn::MaxUnpool3dOptions(kernel_size).stride(stride).padding(padding)));
        DEFINE_UNPOOL_FOWARD_WITH_OUTPUT_SIZE;
        DEFINE_UNPOOL_FOWARD_WITHOUT_OUTPUT_SIZE;

    DEFINE_MODULE_ENDS


    DEFINE_MODULE(AdaptiveAvgPool1d, torch::nn::AdaptiveAvgPool1d)

        DEFAULT_CTOR(AdaptiveAvgPool1d, (const std::vector<int64_t>& output_size),
                     (torch::nn::AdaptiveAvgPool1dOptions(at::IntArrayRef(output_size))));

        DEFAULT_CTOR(AdaptiveAvgPool1d, (int output_size), (torch::nn::AdaptiveAvgPool1dOptions(output_size)));

        DEFINE_DEFAULT_FORWARD_FUNC
    DEFINE_MODULE_ENDS

    DEFINE_MODULE(AdaptiveAvgPool2d, torch::nn::AdaptiveAvgPool2d)

        DEFAULT_CTOR(AdaptiveAvgPool2d, (const std::vector<int64_t>& output_size),
                     (torch::nn::AdaptiveAvgPool2dOptions(at::IntArrayRef(output_size))));

        DEFAULT_CTOR(AdaptiveAvgPool2d, (int output_size), (torch::nn::AdaptiveAvgPool2dOptions(output_size)));

        DEFINE_DEFAULT_FORWARD_FUNC
    DEFINE_MODULE_ENDS

    DEFINE_MODULE(AdaptiveAvgPool3d, torch::nn::AdaptiveAvgPool3d)

        DEFAULT_CTOR(AdaptiveAvgPool3d, (const std::vector<int64_t>& output_size),
                     (torch::nn::AdaptiveAvgPool3dOptions(at::IntArrayRef(output_size))));

        DEFAULT_CTOR(AdaptiveAvgPool3d, (int output_size), (torch::nn::AdaptiveAvgPool3dOptions(output_size)));

        DEFINE_DEFAULT_FORWARD_FUNC
    DEFINE_MODULE_ENDS


    DEFINE_MODULE(AdaptiveMaxPool1d, torch::nn::AdaptiveMaxPool1d)

        DEFAULT_CTOR(AdaptiveMaxPool1d, (const std::vector<int64_t>& output_size),
                     (torch::nn::AdaptiveMaxPool1dOptions(at::IntArrayRef(output_size))));

        DEFAULT_CTOR(AdaptiveMaxPool1d, (int output_size), (torch::nn::AdaptiveMaxPool1dOptions(output_size)));

        DEFINE_DEFAULT_FORWARD_FUNC
        DEFINE_FORWARD_WITH_INDICES
    DEFINE_MODULE_ENDS

    DEFINE_MODULE(AdaptiveMaxPool2d, torch::nn::AdaptiveMaxPool2d)

        DEFAULT_CTOR(AdaptiveMaxPool2d, (const std::vector<int64_t>& output_size),
                     (torch::nn::AdaptiveMaxPool2dOptions(at::IntArrayRef(output_size))));

        DEFAULT_CTOR(AdaptiveMaxPool2d, (int output_size), (torch::nn::AdaptiveMaxPool2dOptions(output_size)));

        DEFINE_DEFAULT_FORWARD_FUNC
        DEFINE_FORWARD_WITH_INDICES
    DEFINE_MODULE_ENDS

    DEFINE_MODULE(AdaptiveMaxPool3d, torch::nn::AdaptiveMaxPool3d)

        DEFAULT_CTOR(AdaptiveMaxPool3d, (const std::vector<int64_t>& output_size),
                     (torch::nn::AdaptiveMaxPool3dOptions(at::IntArrayRef(output_size))));

        DEFAULT_CTOR(AdaptiveMaxPool3d, (int output_size), (torch::nn::AdaptiveMaxPool3dOptions(output_size)));

        DEFINE_DEFAULT_FORWARD_FUNC
        DEFINE_FORWARD_WITH_INDICES
    DEFINE_MODULE_ENDS

    DEFINE_MODULE(FractionalMaxPool2d, torch::nn::FractionalMaxPool2d)

        DEFAULT_CTOR(FractionalMaxPool2d, (const std::vector<int64_t>& kernel_size, const std::vector<int64_t>& output_size),
                     (torch::nn::FractionalMaxPool2dOptions(kernel_size).output_size(output_size)));

        DEFAULT_CTOR(FractionalMaxPool2d, (int kernel_size, int output_size),
                     (torch::nn::FractionalMaxPool2dOptions(kernel_size).output_size(output_size)));

        DEFAULT_CTOR(FractionalMaxPool2d, (int kernel_size, const std::vector<double>& output_ratio),
                     (torch::nn::FractionalMaxPool2dOptions(kernel_size).output_ratio(output_ratio)));

        DEFAULT_CTOR(FractionalMaxPool2d, (const std::vector<int64_t>& kernel_size, const std::vector<double>& output_ratio),
                     (torch::nn::FractionalMaxPool2dOptions(kernel_size).output_ratio(output_ratio)))

        DEFINE_DEFAULT_FORWARD_FUNC
        DEFINE_FORWARD_WITH_INDICES
    DEFINE_MODULE_ENDS

    DEFINE_MODULE(FractionalMaxPool3d, torch::nn::FractionalMaxPool3d)

        DEFAULT_CTOR(FractionalMaxPool3d, (const std::vector<int64_t>& kernel_size, const std::vector<int64_t>& output_size),
                     (torch::nn::FractionalMaxPool3dOptions(kernel_size).output_size(output_size)));

        DEFAULT_CTOR(FractionalMaxPool3d, (int kernel_size, int output_size),
                     (torch::nn::FractionalMaxPool3dOptions(kernel_size).output_size(output_size)));

        DEFAULT_CTOR(FractionalMaxPool3d, (int kernel_size, const std::vector<double>& output_ratio),
                     (torch::nn::FractionalMaxPool3dOptions(kernel_size).output_ratio(output_ratio)));

        DEFAULT_CTOR(FractionalMaxPool3d, (const std::vector<int64_t>& kernel_size, const std::vector<double>& output_ratio),
                     (torch::nn::FractionalMaxPool3dOptions(kernel_size).output_ratio(output_ratio)))
        DEFINE_DEFAULT_FORWARD_FUNC
        DEFINE_FORWARD_WITH_INDICES
    DEFINE_MODULE_ENDS

    DEFINE_MODULE(LPPool1d, torch::nn::LPPool1d)
        DEFAULT_CTOR(LPPool1d, (double norm_type, const std::vector<int64_t>& kernel_size, const std::vector<int64_t>& stride = {}, bool ceil_mode = false),
                     (torch::nn::LPPoolOptions<1>(norm_type, kernel_size).stride(stride.empty() ? kernel_size : stride).ceil_mode(ceil_mode)));
        DEFINE_DEFAULT_FORWARD_FUNC
    DEFINE_MODULE_ENDS

    DEFINE_MODULE(LPPool2d, torch::nn::LPPool2d)
        DEFAULT_CTOR(LPPool2d, (double norm_type, const std::vector<int64_t>& kernel_size, const std::vector<int64_t>& stride = {}, bool ceil_mode = false),
                     (torch::nn::LPPoolOptions<2>(norm_type, kernel_size).stride(stride.empty() ? kernel_size : stride).ceil_mode(ceil_mode)));
        DEFINE_DEFAULT_FORWARD_FUNC
    DEFINE_MODULE_ENDS
#pragma endregion

#pragma region RNN
#define DEFINE_RNN_FORWARD \
        std::pair<Tensor, Tensor> forward(const Tensor& input, const Tensor& hx = {}) { \
            std::tuple<torch::Tensor,torch::Tensor> output = std::dynamic_pointer_cast<ImplClassImpl >(core_)->forward(input.core, hx.core);\
            return std::pair<Tensor, Tensor>(std::get<0>(output),std::get<1>(output)); \
        }
#define DEFAULT_RNN_CTOR(CLassName, CtorList, Create)\
explicit CLassName CtorList :Module(std::make_shared<ImplClassImpl> Create)


    DEFINE_MODULE(RNN, torch::nn::RNN)
        DEFAULT_RNN_CTOR(RNN, (int64_t input_size, int64_t hidden_size, int64_t num_layers = 1, const std::string& nonlinearity_t = "", bool bias = true, bool batch_first = false, double dropout = 0.0, bool bidirectional = false),
                     (torch::nn::RNNOptions(input_size, hidden_size)
                     .num_layers(num_layers)
                     .bias(bias)
                     .batch_first(batch_first)
                     .dropout(dropout)
                     .bidirectional(bidirectional))){
            if(nonlinearity_t == "kReLU") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.nonlinearity(torch::kReLU);
            }
        }

        DEFINE_RNN_FORWARD
    //todo add attribute to the RNN
    DEFINE_MODULE_ENDS

    DEFINE_MODULE(LSTM, torch::nn::LSTM)
        DEFAULT_CTOR(LSTM, (int64_t input_size, int64_t hidden_size, int64_t num_layers = 1, const std::string& nonlinearity_t = "", bool bias = true, bool batch_first = false, double dropout = 0.0, bool bidirectional = false),
                     (torch::nn::LSTMOptions(input_size, hidden_size)
                             .num_layers(num_layers)
                             .bias(bias)
                             .batch_first(batch_first)
                             .dropout(dropout)
                             .bidirectional(bidirectional)));
    public:
        std::vector<Tensor> forward(const Tensor& input, const std::pair<Tensor, Tensor>& hx_opt) {
            std::vector<Tensor> output;
            std::tuple<Tensor, std::tuple<Tensor, Tensor>> torch_output = std::dynamic_pointer_cast<ImplClassImpl>(core_)->forward(input.core, hx_opt);
            output.emplace_back(Tensor(std::get<0>(torch_output)));
            output.emplace_back(Tensor(std::get<0>(std::get<1>(torch_output))));
            output.emplace_back(Tensor(std::get<1>(std::get<1>(torch_output))));
            return  output;
        }

        std::vector<Tensor> forward(const Tensor& input) {
            std::vector<Tensor> output;
            std::tuple<Tensor, std::tuple<Tensor, Tensor>> torch_output = std::dynamic_pointer_cast<ImplClassImpl>(core_)->forward(input.core);
            output.emplace_back(Tensor(std::get<0>(torch_output)));
            output.emplace_back(Tensor(std::get<0>(std::get<1>(torch_output))));
            output.emplace_back(Tensor(std::get<1>(std::get<1>(torch_output))));
            return  output;
        }


    DEFINE_MODULE_ENDS

    DEFINE_MODULE(GRU, torch::nn::GRU)
        DEFAULT_CTOR(GRU, (int64_t input_size, int64_t hidden_size, int64_t num_layers = 1, const std::string& nonlinearity_t = "", bool bias = true, bool batch_first = false, double dropout = 0.0, bool bidirectional = false),
                     (torch::nn::GRUOptions(input_size, hidden_size)
                             .num_layers(num_layers)
                             .bias(bias)
                             .batch_first(batch_first)
                             .dropout(dropout)
                             .bidirectional(bidirectional)));
        DEFINE_RNN_FORWARD
    DEFINE_MODULE_ENDS

#pragma endregion

#pragma region Conv
//todo add conv transpose

    DEFINE_MODULE(Conv1d, torch::nn::Conv1d)

        Tensor weight;
        Tensor bias;

        DEFAULT_CTOR_VA(Conv1d, (int in_channel, int out_channel, int kernel_size,
                int stride = 1,
                int padding = 0),
                        (torch::nn::Conv1dOptions(in_channel, out_channel, kernel_size)
                                .padding(padding).stride(stride)), weight(get_ptr()->weight), bias(get_ptr()->bias));

        DEFINE_DEFAULT_FORWARD_FUNC;

    };

    DEFINE_MODULE(Conv2d, torch::nn::Conv2d)

        Tensor weight;
        Tensor bias;

        DEFINE_CTOR(Conv2d, (int in_channel, int out_channel,
                const std::vector<int64_t>& kernel_size,
                const std::vector<int64_t>& stride = {1, 1},
                const std::vector<int64_t>& padding = {0, 0}),
                    (torch::nn::Conv2dOptions(in_channel, out_channel, at::IntArrayRef(kernel_size))
                            .padding(at::IntArrayRef(padding)).stride(at::IntArrayRef(stride))),
                    weight(get_ptr()->weight), bias(get_ptr()->bias));

        DEFAULT_CTOR_VA(Conv2d, (int in_channel, int out_channel, int kernel_size,
                int stride = 1,
                int padding = 0),
                        (torch::nn::Conv2dOptions(in_channel, out_channel, kernel_size)
                                .padding(padding).stride(stride)), weight(get_ptr()->weight), bias(get_ptr()->bias));

        DEFINE_DEFAULT_FORWARD_FUNC;

    };


    DEFINE_MODULE(Conv3d, torch::nn::Conv3d)

        Tensor weight;
        Tensor bias;

        DEFINE_CTOR(Conv3d, (int
                in_channel, int
                out_channel, const std::vector<int64_t>& kernel_size,
                const std::vector<int64_t>& stride = {1, 1, 1},
                const std::vector<int64_t>& padding = {0, 0, 0}
        ),
                    (torch::nn::Conv3dOptions(in_channel, out_channel, at::IntArrayRef(kernel_size))
                            .padding(at::IntArrayRef(padding)).stride(at::IntArrayRef(stride))),
                    weight(get_ptr()->weight), bias(get_ptr()->bias));

        DEFINE_CTOR(Conv3d, (int in_channel,
                int out_channel,
                int kernel_size,
                int stride = 1,
                int padding = 0),
                    (torch::nn::Conv3dOptions(in_channel, out_channel, kernel_size)
                            .padding(padding).stride(stride)), weight(get_ptr()->weight), bias(get_ptr()->bias));

        DEFINE_DEFAULT_FORWARD_FUNC;

    };

//    CREATE_DEFAULT_MODULE(Conv1d, torch::nn::Conv1d,
//                          (int in_channel, int
//                                  out_channel, int
//                                  kernel_size, int stride = 1, int padding = 0),
//                          (torch::nn::Conv1dOptions(in_channel, out_channel, kernel_size).padding(padding).stride(
//                                  stride)),);

#pragma endregion

#pragma region Embed

    DEFINE_MODULE(Embedding, torch::nn::Embedding)

        Tensor weight;

        DEFINE_CTOR(Embedding, (int64_t
                count, int64_t
                dimension),
                    (count, dimension), weight(get_ptr()->weight));

        DEFINE_DEFAULT_FORWARD_FUNC;

    };

#pragma endregion

//#pragma region Container
//    DEFINE_MODULE(Sequential, torch::nn::Sequential)
//        Sequential(Module modules[], std::string names[] ) {
//
//            torch::nn::Sequential sequential = torch::nn::Sequential({{names[0],modules[0].get_core()}});
//        }
//        Sequential(Module modules[]) {
//            std::vector<torch::nn::NamedAnyModule> vector;
//            std::initializer_list<torch::nn::NamedAnyModule> named_any_modules(vector);
//            torch::nn::Sequential sequential = torch::nn::Sequential({{modules[0].get_core()}});
//        }
//#pragma endregion


#pragma  region Linear
    DEFINE_MODULE(Linear, torch::nn::Linear)

        Tensor weight,bias;

        DEFINE_CTOR(Linear,  (int in, int
                out), (in, out), weight(get_ptr()->weight), bias(get_ptr()->bias));

        DEFINE_DEFAULT_FORWARD_FUNC;

    };

    DEFINE_MODULE(Bilinear, torch::nn::Bilinear)
        Tensor weight, bias;

        DEFINE_CTOR(Bilinear, (int64_t in1_features, int64_t in2_features, int64_t out_features),(in1_features, in2_features,out_features), weight(get_ptr()->weight),bias(get_ptr()->bias));
        Tensor forward(const Tensor& input1, const Tensor& input2){
            return Tensor(std::dynamic_pointer_cast<ImplClassImpl>(core_)->forward(input1.core, input2.core));
        }
    };

#pragma endregion

    CREATE_DEFAULT_MODULE(Dropout, torch::nn::Dropout,
                          (double
                                  rate),
                          (rate),);

//    CREATE_DEFAULT_MODULE(Linear, torch::nn::Linear,
//                          (int in, int
//                                  out), (in, out),);

//#ifdef Torch_Version_Less_14
//    CREATE_DEFAULT_MODULE(BatchNorm, torch::nn::BatchNorm, (
//            int64_t features,
//			double momentum = 0.1,
//            bool affine = true,
//            bool stateful = true,
//            double eps = 1e-5),
//                          (torch::nn::BatchNormOptions(features)
//                                  .affine(affine)
//                                  .stateful(stateful)
//                                  .eps(eps)
//                                  .momentum(momentum)),);
//#else
//    CREATE_DEFAULT_MODULE(BatchNorm, torch::nn::BatchNorm, (
//            int64_t features,
//            double momentum = 0.1,
//            bool affine = true,
//            bool stateful = true,
//            double eps = 1e-5),
//                          (torch::nn::BatchNormOptions(features)
//                                  .affine(affine)
//                                  .eps(eps)
//                                  .momentum(momentum)),);
//#endif

#pragma endregion BN

#ifdef Torch_Version_Less_14
    CREATE_DEFAULT_MODULE(BatchNorm, torch::nn::BatchNorm, (
            int64_t features,
			double momentum = 0.1,
            bool affine = true,
            bool stateful = true,
            double eps = 1e-5),
                          (torch::nn::BatchNormOptions(features)
                                  .affine(affine)
                                  .stateful(stateful)
                                  .eps(eps)
                                  .momentum(momentum)),);
#elif defined(Torch_Version_Less_15)
    CREATE_DEFAULT_MODULE(BatchNorm, torch::nn::BatchNorm, (
            int64_t features,
            double momentum = 0.1,
            bool affine = true,
            bool stateful = true,
            double eps = 1e-5),
                          (torch::nn::BatchNormOptions(features)
                                  .affine(affine)
                                  .eps(eps)
                                  .momentum(momentum)),);
#else
    CREATE_DEFAULT_MODULE(BatchNorm1d, torch::nn::BatchNorm1d, (
            int64_t features,
            double momentum = 0.1,
            bool affine = true,
            double eps = 1e-5,
            bool track_running_stats=true),
                          (torch::nn::BatchNormOptions(features)
                                  .affine(affine)
                                  .eps(eps)
                                  .momentum(momentum)
                                  .track_running_stats(track_running_stats)),);

    CREATE_DEFAULT_MODULE(BatchNorm2d, torch::nn::BatchNorm2d, (
            int64_t features,
            double momentum = 0.1,
            bool affine = true,
            double eps = 1e-5,
            bool track_running_stats=true),
                          (torch::nn::BatchNormOptions(features)
                                  .affine(affine)
                                  .eps(eps)
                                  .momentum(momentum)
                                  .track_running_stats(track_running_stats)),);

    CREATE_DEFAULT_MODULE(BatchNorm3d, torch::nn::BatchNorm3d, (
            int64_t features,
            double momentum = 0.1,
            bool affine = true,
            double eps = 1e-5,
            bool track_running_stats=true),
                          (torch::nn::BatchNormOptions(features)
                                  .affine(affine)
                                  .eps(eps)
                                  .momentum(momentum)
                                  .track_running_stats(track_running_stats)),);
#endif

#pragma endregion

#pragma region Activation
        DEFINE_MODULE(ELU, torch::nn::ELU)
            DEFAULT_CTOR(ELU,(bool inplace, double alpha = 1.0), (torch::nn::ELUOptions().inplace(inplace).alpha(alpha)))
            DEFINE_DEFAULT_FORWARD_FUNC
        DEFINE_MODULE_ENDS

        DEFINE_MODULE(RReLU, torch::nn::RReLU)
            DEFAULT_CTOR(RReLU,(bool inplace, double lower = 1.0 / 8.0, double upper = 1.0 / 3.0), (torch::nn::RReLUOptions().inplace(inplace).lower(lower).upper(upper)))
            DEFINE_DEFAULT_FORWARD_FUNC
        DEFINE_MODULE_ENDS

        DEFINE_MODULE(ReLU, torch::nn::ReLU)
            DEFAULT_CTOR(ReLU,(bool inplace), (torch::nn::ReLUOptions(inplace)))
            DEFINE_DEFAULT_FORWARD_FUNC
        DEFINE_MODULE_ENDS

        DEFINE_MODULE(Hardtanh, torch::nn::Hardtanh)
            DEFAULT_CTOR(Hardtanh, (bool inplace, double min_val = -1.0, double max_val = 1.0),
                         (torch::nn::HardtanhOptions().min_val(min_val).max_val(max_val).inplace(inplace)))
            DEFINE_DEFAULT_FORWARD_FUNC
        DEFINE_MODULE_ENDS

        DEFINE_MODULE(Hardshrink, torch::nn::Hardshrink)
            DEFAULT_CTOR(Hardshrink, (double lambda),
                         (torch::nn::HardshrinkOptions().lambda(lambda)));
            DEFINE_DEFAULT_FORWARD_FUNC
        DEFINE_MODULE_ENDS

        DEFINE_MODULE(LogSigmoid, torch::nn::LogSigmoid)
            DEFINE_DEFAULT_FORWARD_FUNC
        DEFINE_MODULE_ENDS

        DEFINE_MODULE(LeakyReLU, torch::nn::LeakyReLU)
            DEFAULT_CTOR(LeakyReLU, (bool inplace, double negative_slope = 1e-2),
                         (torch::nn::LeakyReLUOptions().negative_slope(negative_slope).inplace(inplace)))
            DEFINE_DEFAULT_FORWARD_FUNC
        DEFINE_MODULE_ENDS

        DEFINE_MODULE(PReLU, torch::nn::PReLU)
            Tensor weight;
            DEFINE_CTOR(PReLU, (int64_t num_parameters, double init),
                        (torch::nn::PReLUOptions().num_parameters(num_parameters).init(init)),
                        weight(get_ptr()->weight))
            DEFINE_DEFAULT_FORWARD_FUNC
        DEFINE_MODULE_ENDS


        DEFINE_MODULE(ReLU6, torch::nn::ReLU6)
            DEFAULT_CTOR(ReLU6, (bool inplace), (torch::nn::ReLU6Options().inplace(inplace)))
            DEFINE_DEFAULT_FORWARD_FUNC
        DEFINE_MODULE_ENDS

        DEFINE_MODULE(SELU, torch::nn::SELU)
            DEFAULT_CTOR(SELU, (bool inplace), (torch::nn::SELUOptions().inplace(inplace)))
            DEFINE_DEFAULT_FORWARD_FUNC
        DEFINE_MODULE_ENDS

        DEFINE_MODULE(CELU, torch::nn::CELU)
            DEFAULT_CTOR(CELU, (double alpha, bool inplace = false),
                         (torch::nn::CELUOptions().alpha(alpha).inplace(inplace)))
            DEFINE_DEFAULT_FORWARD_FUNC
        DEFINE_MODULE_ENDS

        DEFINE_MODULE(GELU, torch::nn::GELU)
            DEFINE_DEFAULT_FORWARD_FUNC
        DEFINE_MODULE_ENDS

        DEFINE_MODULE(Sigmoid, torch::nn::Sigmoid)
            DEFINE_DEFAULT_FORWARD_FUNC
        DEFINE_MODULE_ENDS

        DEFINE_MODULE(Softplus, torch::nn::Softplus)
            DEFAULT_CTOR(Softplus, (double beta, double threshold),
                         (torch::nn::SoftplusOptions().beta(beta).threshold(threshold)))
            DEFINE_DEFAULT_FORWARD_FUNC
        DEFINE_MODULE_ENDS

        DEFINE_MODULE(Softshrink, torch::nn::Softshrink)
            DEFAULT_CTOR(Softshrink, (double lambda) ,(torch::nn::SoftshrinkOptions().lambda(lambda)))
            DEFINE_DEFAULT_FORWARD_FUNC
        DEFINE_MODULE_ENDS

        DEFINE_MODULE(Softsign, torch::nn::Softsign)
            DEFINE_DEFAULT_FORWARD_FUNC
        DEFINE_MODULE_ENDS

        DEFINE_MODULE(Tanh, torch::nn::Tanh)
            DEFINE_DEFAULT_FORWARD_FUNC
        DEFINE_MODULE_ENDS

        DEFINE_MODULE(Tanhshrink, torch::nn::Tanhshrink)
            DEFINE_DEFAULT_FORWARD_FUNC
        DEFINE_MODULE_ENDS

        DEFINE_MODULE(Threshold, torch::nn::Threshold)
            DEFAULT_CTOR(Threshold, (double threshold, double value, bool inplace = false),
                         (torch::nn::ThresholdOptions(threshold, value).inplace(inplace)))
            DEFINE_DEFAULT_FORWARD_FUNC
        DEFINE_MODULE_ENDS

        DEFINE_MODULE(Softmin, torch::nn::Softmin)
            DEFAULT_CTOR(Softmin, (int64_t dim), (torch::nn::SoftminOptions(dim)))
            DEFINE_DEFAULT_FORWARD_FUNC
        DEFINE_MODULE_ENDS

        DEFINE_MODULE(Softmax, torch::nn::Softmax)
            DEFAULT_CTOR(Softmax, (int64_t dim), (torch::nn::SoftmaxOptions(dim)))
            DEFINE_DEFAULT_FORWARD_FUNC
        DEFINE_MODULE_ENDS

        DEFINE_MODULE(Softmax2d, torch::nn::Softmax2d)
            DEFINE_DEFAULT_FORWARD_FUNC
        DEFINE_MODULE_ENDS

        DEFINE_MODULE(LogSoftmax, torch::nn::LogSoftmax)
            DEFAULT_CTOR(LogSoftmax, (int64_t dim), (torch::nn::LogSoftmaxOptions(dim)))
            DEFINE_DEFAULT_FORWARD_FUNC
        DEFINE_MODULE_ENDS

        class ASMoutput {
          Tensor output;
          double loss;

        public:
          ASMoutput(Tensor  output_, double loss) : output(std::move(output_)),loss(loss){}
          Tensor getOutput() {
            return this->output;
          }

            double getLoss(){
              return this->loss;
          }
        };

        DEFINE_MODULE(AdaptiveLogSoftmaxWithLoss, torch::nn::AdaptiveLogSoftmaxWithLoss)
            std::vector<int64_t> cutoffs;
            int64_t shortlist_size;
            int64_t n_clusters;
            int64_t head_size;
            //todo add Linear attribute
            DEFINE_CTOR(AdaptiveLogSoftmaxWithLoss,
                        (int64_t in_features, int64_t n_classes, const std::vector<int64_t>& cutoffs, double div_value = 4, bool head_bias = false),
                        (torch::nn::AdaptiveLogSoftmaxWithLossOptions(in_features, n_classes, cutoffs)
                        .div_value(div_value)
                        .head_bias(head_bias))
                        ,cutoffs(get_ptr()->cutoffs),
                         shortlist_size(get_ptr()->shortlist_size), n_clusters(get_ptr()->n_clusters),head_size(get_ptr()->head_size))
            Tensor predict(const Tensor& input){
                return Tensor(std::dynamic_pointer_cast<ImplClassImpl>(core_)->predict(input));
            }

            Tensor _get_full_log_prob(const Tensor& input, const Tensor& head_output){
                return Tensor(std::dynamic_pointer_cast<ImplClassImpl>(core_)->_get_full_log_prob(input,head_output));
            }

            Tensor log_prob(const Tensor& input){
                return Tensor(std::dynamic_pointer_cast<ImplClassImpl >(core_)->log_prob(input));
            }

            ASMoutput forward(const Tensor& input, const Tensor& target){
                torch::nn::ASMoutput output = std::dynamic_pointer_cast<ImplClassImpl >(core_)->forward(input.core, target.core);
                return ASMoutput(output.output, output.loss);
            }

        DEFINE_MODULE_ENDS

        DEFINE_MODULE(MultiheadAttention, torch::nn::MultiheadAttention)
            bool _qkv_same_embed_dim;
            Tensor in_proj_weight;
            Tensor in_proj_bias;
            Tensor bias_k;
            Tensor bias_v;
            Tensor q_proj_weight;
            Tensor k_proj_weight;
            Tensor v_proj_weight;
            int64_t head_dim;

            DEFINE_CTOR(MultiheadAttention,
                    (int64_t embed_dim,
                            int64_t num_heads,
                            double dropout = 0.0,
                            bool bias = true,
                            bool add_bias_kv = true,
                            bool add_zero_attn = false
                            ),
                            (torch::nn::MultiheadAttentionOptions(embed_dim, num_heads)
                            .dropout(dropout)
                            .bias(bias)
                            .add_bias_kv(add_bias_kv)
                            .add_zero_attn(add_zero_attn)),
                            _qkv_same_embed_dim(get_ptr()->_qkv_same_embed_dim),
                            in_proj_weight(get_ptr()->in_proj_weight),
                            in_proj_bias(get_ptr()->in_proj_bias),
                            bias_k(get_ptr()->bias_k),
                            bias_v(get_ptr()->bias_v),
                            q_proj_weight(get_ptr()->q_proj_weight),
                            k_proj_weight(get_ptr()->k_proj_weight),
                            v_proj_weight(get_ptr()->v_proj_weight),
                            head_dim(get_ptr()->head_dim));

            std::pair<Tensor, Tensor> forward(const Tensor& query, const Tensor& key,
                                              const Tensor& value, const Tensor& key_padding_mask = {},
                                              bool need_weights = true, const Tensor& attn_mask = {}){
                std::tuple<torch::Tensor, torch::Tensor> output = std::dynamic_pointer_cast<ImplClassImpl >(core_)->forward(query,key,value ,key_padding_mask,need_weights,attn_mask);
                return std::pair<Tensor, Tensor>(std::get<0>(output),std::get<1>(output));
            };

        DEFINE_MODULE_ENDS
#pragma endregion

#pragma region Distance
    DEFINE_MODULE(CosineSimilarity, torch::nn::CosineSimilarity)

        DEFAULT_CTOR(CosineSimilarity, (int64_t dim, double eps ),
                     (torch::nn::CosineSimilarityOptions().dim(dim).eps(eps)))

        Tensor forward(const Tensor &input1, const Tensor &input2) {
            return Tensor(std::dynamic_pointer_cast<ImplClassImpl>(core_)
                                  ->forward(input1.core, input2.core));
        }
    DEFINE_MODULE_ENDS

    DEFINE_MODULE(PairwiseDistance, torch::nn::PairwiseDistance)

        DEFAULT_CTOR(PairwiseDistance, (double p, double eps, bool keepdim),
                     (torch::nn::PairwiseDistanceOptions().p(p).eps(eps).keepdim(keepdim)))

        Tensor forward(const Tensor &input1, const Tensor &input2) {
            return Tensor(std::dynamic_pointer_cast<ImplClassImpl>(core_)
                                  ->forward(input1.core, input2.core));
        }
    DEFINE_MODULE_ENDS

#pragma endregion

#pragma region Loss

#define DEFAULT_LOSS_CTOR(CLassName, CtorList, Create)\
explicit CLassName CtorList :Module(std::make_shared<ImplClassImpl> Create)

#define DEFINE_LOSS_CTOR(ClassName, CtorList, Create, ...)\
explicit ClassName CtorList :Module(std::make_shared<ImplClassImpl> Create), __VA_ARGS__


    DEFINE_MODULE(MSELoss, torch::nn::MSELoss)


        DEFAULT_LOSS_CTOR(MSELoss, (const std::string & reduction),
                          (torch::nn::MSELossOptions())) {
            if (reduction == "mean") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kMean);
            } else if (reduction == "sum") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kSum);
            } else if (reduction == "none") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kNone);
            }
        }


        Tensor forward(const Tensor &input, const Tensor &target) {
            return Tensor(std::dynamic_pointer_cast<ImplClassImpl>(core_)
                                  ->forward(input.core, target.core));
        }
    DEFINE_MODULE_ENDS


    DEFINE_MODULE(KLDivLoss, torch::nn::KLDivLoss)

        DEFAULT_LOSS_CTOR(KLDivLoss, (const std::string & reduction), (torch::nn::KLDivLossOptions())) {
            if (reduction == "mean") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kMean);
            } else if (reduction == "sum") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kSum);
            } else if (reduction == "none") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kNone);
            } else if (reduction == "batchmean") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kBatchMean);
            }
        }

        Tensor forward(const Tensor &input, const Tensor &target) {
            return Tensor(std::dynamic_pointer_cast<ImplClassImpl>(core_)
                                  ->forward(input.core, target.core));
        }

    DEFINE_MODULE_ENDS

    DEFINE_MODULE(L1Loss, torch::nn::L1Loss)

        DEFAULT_LOSS_CTOR(L1Loss, (const std::string & reduction),
                          (torch::nn::L1LossOptions())) {
            if (reduction == "mean") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kMean);
            } else if (reduction == "sum") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kSum);
            } else if (reduction == "none") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kNone);
            }
        }

        Tensor forward(const Tensor &input, const Tensor &target) {
            return Tensor(std::dynamic_pointer_cast<ImplClassImpl>(core_)
                                  ->forward(input.core, target.core));
        }
    DEFINE_MODULE_ENDS

    DEFINE_MODULE(CrossEntropyLoss, torch::nn::CrossEntropyLoss)

        Tensor weight;

        DEFINE_LOSS_CTOR(CrossEntropyLoss, (Tensor weight, int64_t ignore_index, const std::string &reduction),
                         (torch::nn::CrossEntropyLossOptions().weight(weight).ignore_index(ignore_index)),
                         weight(get_ptr()->weight)) {
            if (reduction == "mean") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kMean);
            } else if (reduction == "sum") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kSum);
            } else if (reduction == "none") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kNone);
            }
        }

        Tensor forward(const Tensor &input, const Tensor &target) {
            return Tensor(std::dynamic_pointer_cast<ImplClassImpl>(core_)
                                  ->forward(input.core, target.core));
        }
    DEFINE_MODULE_ENDS


    DEFINE_MODULE(CTCLoss, torch::nn::CTCLoss)

        DEFAULT_LOSS_CTOR(CTCLoss, (int64_t blank, const std::string &reduction, bool zero_infinity),
                          (torch::nn::CTCLossOptions().blank(blank).zero_infinity(zero_infinity
                          ))) {
            if (reduction == "mean") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kMean);
            } else if (reduction == "sum") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kSum);
            } else if (reduction == "none") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kNone);
            }
        }

        Tensor forward(const Tensor &log_probs, const Tensor &targets,
                       const Tensor &input_lengths, const Tensor &target_lengths) {
            return Tensor(std::dynamic_pointer_cast<ImplClassImpl>(core_)->forward(log_probs.core, targets.core,
                                                                                   input_lengths.core,
                                                                                   target_lengths.core));
        }
    DEFINE_MODULE_ENDS


    DEFINE_MODULE(NLLLoss, torch::nn::NLLLoss)

        Tensor weight;


        DEFINE_LOSS_CTOR(NLLLoss, (Tensor weight, int64_t ignore_index, const std::string &reduction),
                         (torch::nn::NLLLossOptions().weight(weight).ignore_index(ignore_index)),
                         weight(get_ptr()->weight)) {
            if (reduction == "mean") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kMean);
            } else if (reduction == "sum") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kSum);
            } else if (reduction == "none") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kNone);
            }
        }

        Tensor forward(const Tensor &input, const Tensor &target) {
            return Tensor(std::dynamic_pointer_cast<ImplClassImpl>(core_)
                                  ->forward(input.core, target.core));
        }
    DEFINE_MODULE_ENDS


    DEFINE_MODULE(PoissonNLLLoss, torch::nn::PoissonNLLLoss)


        DEFAULT_LOSS_CTOR(PoissonNLLLoss, (bool log_input, bool full, double eps, const std::string &reduction),
                          (torch::nn::PoissonNLLLossOptions().log_input(log_input).full(full).eps(eps))) {
            if (reduction == "mean") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kMean);
            } else if (reduction == "sum") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kSum);
            } else if (reduction == "none") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kNone);
            }
        }

        Tensor forward(const Tensor &input, const Tensor &target) {
            return Tensor(std::dynamic_pointer_cast<ImplClassImpl>(core_)
                                  ->forward(input.core, target.core));
        }
    DEFINE_MODULE_ENDS


    DEFINE_MODULE(BCELoss, torch::nn::BCELoss)


        DEFAULT_LOSS_CTOR(BCELoss, (Tensor weight, const std::string &reduction),
                          (torch::nn::BCELossOptions().weight(weight))) {
            if (reduction == "mean") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kMean);
            } else if (reduction == "sum") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kSum);
            } else if (reduction == "none") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kNone);
            }
        }

        Tensor forward(const Tensor &input, const Tensor &target) {
            return Tensor(std::dynamic_pointer_cast<ImplClassImpl>(core_)
                                  ->forward(input.core, target.core));
        }
    DEFINE_MODULE_ENDS


    DEFINE_MODULE(BCEWithLogitsLoss, torch::nn::BCEWithLogitsLoss)

        Tensor weight;

        Tensor pos_weight;


        DEFINE_LOSS_CTOR(BCEWithLogitsLoss, (Tensor weight, const std::string &reduction, Tensor pos_weight),
                         (torch::nn::BCEWithLogitsLossOptions().weight(weight).pos_weight(
                                 pos_weight)), weight(get_ptr()->weight), pos_weight(get_ptr()->pos_weight)) {
            if (reduction == "mean") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kMean);
            } else if (reduction == "sum") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kSum);
            } else if (reduction == "none") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kNone);
            }
        }

        Tensor forward(const Tensor &input, const Tensor &target) {
            return Tensor(std::dynamic_pointer_cast<ImplClassImpl>(core_)
                                  ->forward(input.core, target.core));
        }
    DEFINE_MODULE_ENDS


    DEFINE_MODULE(MarginRankingLoss, torch::nn::MarginRankingLoss)


        DEFAULT_LOSS_CTOR(MarginRankingLoss, (double margin, const std::string &reduction),
                          (torch::nn::MarginRankingLossOptions().margin(margin))) {
            if (reduction == "mean") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kMean);
            } else if (reduction == "sum") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kSum);
            } else if (reduction == "none") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kNone);
            }
        }

        Tensor forward(const Tensor &input1, const Tensor &input2, const Tensor &target) {
            return Tensor(std::dynamic_pointer_cast<ImplClassImpl>(core_)
                                  ->forward(input1.core, input2.core, target.core));
        }
    DEFINE_MODULE_ENDS

    DEFINE_MODULE(HingeEmbeddingLoss, torch::nn::HingeEmbeddingLoss)


        DEFAULT_LOSS_CTOR(HingeEmbeddingLoss, (double margin, const std::string &reduction),
                          (torch::nn::HingeEmbeddingLossOptions().margin(margin))) {
            if (reduction == "mean") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kMean);
            } else if (reduction == "sum") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kSum);
            } else if (reduction == "none") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kNone);
            }
        }

        Tensor forward(const Tensor &input, const Tensor &target) {
            return Tensor(std::dynamic_pointer_cast<ImplClassImpl>(core_)
                                  ->forward(input.core, target.core));
        }
    DEFINE_MODULE_ENDS

    DEFINE_MODULE(MultiLabelMarginLoss, torch::nn::MultiLabelMarginLoss)


        DEFAULT_LOSS_CTOR(MultiLabelMarginLoss, (const std::string & reduction),
                          (torch::nn::MultiLabelMarginLossOptions())) {
            if (reduction == "mean") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kMean);
            } else if (reduction == "sum") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kSum);
            } else if (reduction == "none") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kNone);
            }
        }

        Tensor forward(const Tensor &input, const Tensor &target) {
            return Tensor(std::dynamic_pointer_cast<ImplClassImpl>(core_)
                                  ->forward(input.core, target.core));
        }

    DEFINE_MODULE_ENDS

    DEFINE_MODULE(SmoothL1Loss, torch::nn::SmoothL1Loss)


        DEFAULT_LOSS_CTOR(SmoothL1Loss, (const std::string & reduction),
                          (torch::nn::SmoothL1LossOptions())) {
            if (reduction == "mean") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kMean);
            } else if (reduction == "sum") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kSum);
            } else if (reduction == "none") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kNone);
            }
        }

        Tensor forward(const Tensor &input, const Tensor &target) {
            return Tensor(std::dynamic_pointer_cast<ImplClassImpl>(core_)
                                  ->forward(input.core, target.core));
        }

    DEFINE_MODULE_ENDS

    DEFINE_MODULE(SoftMarginLoss, torch::nn::SoftMarginLoss)


        DEFAULT_LOSS_CTOR(SoftMarginLoss, (const std::string & reduction),
                          (torch::nn::SoftMarginLossOptions())) {
            if (reduction == "mean") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kMean);
            } else if (reduction == "sum") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kSum);
            } else if (reduction == "none") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kNone);
            }
        }

        Tensor forward(const Tensor &input, const Tensor &target) {
            return Tensor(std::dynamic_pointer_cast<ImplClassImpl>(core_)
                                  ->forward(input.core, target.core));
        }
    DEFINE_MODULE_ENDS

    DEFINE_MODULE(MultiLabelSoftMarginLoss, torch::nn::MultiLabelSoftMarginLoss)


        DEFAULT_LOSS_CTOR(MultiLabelSoftMarginLoss, (Tensor weight, const std::string &reduction),
                          (torch::nn::MultiLabelSoftMarginLossOptions().weight(weight))) {
            if (reduction == "mean") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kMean);
            } else if (reduction == "sum") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kSum);
            } else if (reduction == "none") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kNone);
            }
        }

        Tensor forward(const Tensor &input, const Tensor &target) {
            return Tensor(std::dynamic_pointer_cast<ImplClassImpl>(core_)
                                  ->forward(input.core, target.core));
        }
    DEFINE_MODULE_ENDS

    DEFINE_MODULE(CosineEmbeddingLoss, torch::nn::CosineEmbeddingLoss)


        DEFAULT_LOSS_CTOR(CosineEmbeddingLoss, (double margin, const std::string &reduction),
                          (torch::nn::CosineEmbeddingLossOptions().margin(margin))) {
            if (reduction == "mean") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kMean);
            } else if (reduction == "sum") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kSum);
            } else if (reduction == "none") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kNone);
            }
        }

        Tensor forward(const Tensor &input1, const Tensor &input2, const Tensor &target) {
            return Tensor(std::dynamic_pointer_cast<ImplClassImpl>(core_)
                                  ->forward(input1.core, input2.core, target.core));
        }
    DEFINE_MODULE_ENDS

    DEFINE_MODULE(MultiMarginLoss, torch::nn::MultiMarginLoss)


        DEFAULT_LOSS_CTOR(MultiMarginLoss, (int64_t p, double margin, Tensor weight, const std::string &reduction),
                          (torch::nn::MultiMarginLossOptions().p(p).margin(margin).weight(weight))) {
            if (reduction == "mean") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kMean);
            } else if (reduction == "sum") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kSum);
            } else if (reduction == "none") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kNone);
            }
        }

        Tensor forward(const Tensor &input, const Tensor &target) {
            return Tensor(std::dynamic_pointer_cast<ImplClassImpl>(core_)
                                  ->forward(input.core, target.core));
        }
    DEFINE_MODULE_ENDS

    DEFINE_MODULE(TripletMarginLoss, torch::nn::TripletMarginLoss)


        DEFAULT_LOSS_CTOR(TripletMarginLoss,
                          (double margin, double p, bool swap, const std::string &reduction),
                          (torch::nn::TripletMarginLossOptions().margin(margin).p(p).swap(swap))) {
            if (reduction == "mean") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kMean);
            } else if (reduction == "sum") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kSum);
            } else if (reduction == "none") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.reduction(torch::kNone);
            }
        }

        Tensor forward(const Tensor &anchor, const Tensor &positive, const Tensor &negative) {
            return Tensor(std::dynamic_pointer_cast<ImplClassImpl>(core_)
                                  ->forward(anchor.core, positive.core, negative.core));
        }
    DEFINE_MODULE_ENDS


#pragma endregion

#pragma region Vision
#define DEFAULT_VISION_CTOR(CLassName, CtorList, Create)\
explicit CLassName CtorList :Module(std::make_shared<ImplClassImpl> Create)

    DEFINE_MODULE(PixelShuffle, torch::nn::PixelShuffle)

        DEFAULT_CTOR(PixelShuffle, (int64_t upscale_factor), (torch::nn::PixelShuffleOptions(upscale_factor)))

        DEFINE_DEFAULT_FORWARD_FUNC
    DEFINE_MODULE_ENDS

    DEFINE_MODULE(Upsample, torch::nn::Upsample)

        DEFAULT_VISION_CTOR(Upsample,
                            (const std::vector<double> &scale_factor, const std::string &mode, bool align_corners),
                            (torch::nn::UpsampleOptions().scale_factor(scale_factor))) {
            if (mode == "nearest") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.mode(torch::kNearest);
            } else if (mode == "linear") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.mode(torch::kLinear).align_corners(
                        align_corners);
            } else if (mode == "bilinear") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.mode(torch::kBilinear).align_corners(
                        align_corners);
            } else if (mode == "bicubic") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.mode(torch::kBicubic);
            } else if (mode == "trilinear") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.mode(torch::kTrilinear).align_corners(
                        align_corners);
            } else {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.mode(torch::kNearest);
            }
        }

        DEFAULT_VISION_CTOR(Upsample,
                            (const std::vector<int64_t> &size, const std::string &mode, bool align_corners),
                            (torch::nn::UpsampleOptions().size(size))) {
            if (mode == "nearest") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.mode(torch::kNearest);
            } else if (mode == "linear") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.mode(torch::kLinear).align_corners(
                        align_corners);
            } else if (mode == "bilinear") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.mode(torch::kBilinear).align_corners(
                        align_corners);
            } else if (mode == "bicubic") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.mode(torch::kBicubic);
            } else if (mode == "trilinear") {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.mode(torch::kTrilinear).align_corners(
                        align_corners);
            } else {
                std::dynamic_pointer_cast<ImplClassImpl>(core_)->options.mode(torch::kNearest);
            }
        }

        DEFINE_DEFAULT_FORWARD_FUNC
    DEFINE_MODULE_ENDS

    DEFINE_MODULE(UpsamplingNearest2d, torch::nn::Upsample)

        DEFAULT_CTOR(UpsamplingNearest2d, (const std::vector<int64_t> &size),
                     (torch::nn::UpsampleOptions().size(size).mode(torch::kNearest)))

        DEFAULT_CTOR(UpsamplingNearest2d, (const std::vector<double> &scale_factor),
                     (torch::nn::UpsampleOptions().scale_factor(scale_factor).mode(
                             torch::kNearest)))

        DEFINE_DEFAULT_FORWARD_FUNC
    DEFINE_MODULE_ENDS

    DEFINE_MODULE(UpsamplingBilinear2d, torch::nn::Upsample)

        DEFAULT_CTOR(UpsamplingBilinear2d, (const std::vector<int64_t> &size),
                     (torch::nn::UpsampleOptions().size(size).align_corners(true).mode(torch::kBilinear)))

        DEFAULT_CTOR(UpsamplingBilinear2d, (const std::vector<double> &scale_factor),
                     (torch::nn::UpsampleOptions().scale_factor(scale_factor).align_corners(true).mode(
                             torch::kBilinear)))

        DEFINE_DEFAULT_FORWARD_FUNC
    DEFINE_MODULE_ENDS

#pragma endregion

}