//
// Created by SongpingWang on 2025/2/4.
//

#ifndef LSTM_XIEXIN2_MODEL_H
#define LSTM_XIEXIN2_MODEL_H
#include <torch/torch.h>

using namespace torch;

torch::nn::Sequential static _make_convolutional(
        int64_t in_channels,
        int64_t out_channels,
        int64_t kernel_size,
        int64_t stride = 1,
        int64_t padding = 0) {
    return nn::Sequential(
            nn::Conv2d(nn::Conv2dOptions(in_channels, out_channels, kernel_size)
                               .stride(stride).padding(padding).bias(false)),
            nn::BatchNorm2d(out_channels),
            nn::LeakyReLU(nn::LeakyReLUOptions().negative_slope(0.1).inplace(true))
    );
}


// 残差块定义
struct ResidualBlockImpl : nn::Module {
    nn::Conv2d conv1{ nullptr };
    nn::BatchNorm2d bn1{ nullptr };
    nn::Conv2d conv2{ nullptr };
    nn::BatchNorm2d bn2{ nullptr };
    nn::LeakyReLU relu{ nullptr };

    explicit ResidualBlockImpl(int64_t in_channels) {
        conv1 = register_module("conv1",
                                nn::Conv2d(nn::Conv2dOptions(in_channels, in_channels / 2, 1)
                                                   .stride(1).padding(0).bias(false)));
        bn1 = register_module("bn1", nn::BatchNorm2d(in_channels / 2));
        conv2 = register_module("conv2",
                                nn::Conv2d(nn::Conv2dOptions(in_channels / 2, in_channels, 3)
                                                   .stride(1).padding(1).bias(false)));
        bn2 = register_module("bn2", nn::BatchNorm2d(in_channels));
        relu = register_module("relu",
                               nn::LeakyReLU(nn::LeakyReLUOptions().negative_slope(0.1).inplace(true)));
    }

    torch::Tensor forward(torch::Tensor x) {
        auto identity = x;
        x = conv1->forward(x);
        x = bn1->forward(x);
        x = relu->forward(x);
        x = conv2->forward(x);
        x = bn2->forward(x);
        x += identity;
        x = relu->forward(x);
        return x;
    }
};
TORCH_MODULE(ResidualBlock);


// 主网络定义
struct QLNetImpl : nn::Module {
    nn::Sequential conv1{ nullptr };
    nn::Sequential conv2{ nullptr };
    nn::Sequential layer1{ nullptr };
    nn::Sequential conv3{ nullptr };
    nn::Sequential layer2{ nullptr };
    nn::AdaptiveMaxPool2d adapt_max_pool2d{ nullptr };

    QLNetImpl() {
        // 构建网络结构
        conv1 = _make_convolutional(3, 32, 3, 1, 1);
        conv2 = _make_convolutional(32, 64, 3, 2, 1);
        layer1 = _make_layer(64, 1, "layer1");
        conv3 = _make_convolutional(64, 128, 3, 2, 1);
        layer2 = _make_layer(128, 2, "layer2");
        adapt_max_pool2d = register_module("adapt_max_pool2d", nn::AdaptiveMaxPool2d(nn::AdaptiveMaxPool2dOptions({ 1, 50 })));

        // 初始化权重
        for (const auto& module : this->modules(/*include_self=*/false)) {
            if (auto conv = module->as<nn::Conv2d>()) {
                nn::init::kaiming_normal_(
                        conv->weight,
                        0.0,  // leaky_relu的负斜率平方
                        torch::kFanOut,
                        torch::kLeakyReLU);
            }
        }

    }

    torch::Tensor forward(torch::Tensor x) {
        x = conv1->forward(x);            // x: [N,  32,100,200]
        x = conv2->forward(x);            // x: [N,  64, 50,100]
        x = layer1->forward(x);           // x: [N,  64, 50,100]
        x = conv3->forward(x);            // x: [N, 128, 25, 50]
        x = layer2->forward(x);           // x: [N, 128, 25, 50]
        x = adapt_max_pool2d->forward(x); // x: [N, 128,  1, 50]
        return x;
    }

    nn::Sequential _make_layer(int64_t in_channels, int repeat_count, const std::string& layer_name) {
        nn::Sequential seq;
        for (int i = 0; i < repeat_count; ++i) {
            std::string block_name = layer_name + "_res_block_" + std::to_string(i);
            seq->push_back(
                    register_module(block_name, ResidualBlock(in_channels))
            );
        }
        return seq;
    }
};
TORCH_MODULE(QLNet);


struct ModelImpl : nn::Module {
    QLNet feature_extractor{ nullptr };
    int64_t num_layers = 2;
    int64_t n_directions = 2;
    int64_t hidden_size = 80;
    nn::GRU gru{ nullptr };
    nn::Linear fc{ nullptr };
    nn::LogSoftmax log_softmax{ nullptr };

    explicit ModelImpl(int64_t output_size) {
        feature_extractor = register_module("feature_extractor", QLNet());
        gru = register_module("gru", 
            nn::GRU(nn::GRUOptions(128, hidden_size)
            .num_layers(num_layers)
            .bidirectional(true)
            .batch_first(true)));
        fc = register_module("fc", nn::Linear(hidden_size * n_directions, output_size));
        log_softmax = register_module("log_softmax", nn::LogSoftmax(2));
    }

    torch::Tensor forward(const torch::Tensor& input) {
        auto x = feature_extractor->forward(input);          // x:[N, 128, 1, 50]
        x = x.squeeze(2);                                    // x:[N, 128, 50]
        x = x.permute({ 0, 2, 1 });                          // x:[N, 50, 128]

        auto hidden = torch::zeros({ num_layers * n_directions, x.size(0), hidden_size }, input.device()); // [4, N, 80]
        auto gru_output = gru->forward(x, hidden);           // gru_output:         [N, 50, 160]
        x = fc->forward(std::get<0>(gru_output));            // x:                  [N, 50, 69 ]
        x = x.permute({ 1, 0, 2 });                          // x:                  [50, N, 69 ]
        auto output_log_softmax = log_softmax->forward(x);   // output_log_softmax: [50, N, 69 ]
        return output_log_softmax;
    }
};
TORCH_MODULE(Model);


#endif //LSTM_XIEXIN2_MODEL_H
