//
// Created by SongpingWang on 2025/2/6.
//

#ifndef YOLOV11_CPP_MODEL_H
#define YOLOV11_CPP_MODEL_H

#include <torch/torch.h>
#include <utility>
#include <algorithm>
#include <torch/types.h>


std::tuple<torch::Tensor, torch::Tensor>
make_anchors(const std::vector<torch::Tensor> &x, const torch::Tensor &strides, double offset = 0.5) {
    assert(!x.empty());
    std::vector<torch::Tensor> anchor_tensor, stride_tensor;
    auto dtype = x[0].dtype();
    auto device = x[0].device();

    auto options = torch::TensorOptions()
            .dtype(dtype)
            .device(device);

    for (int i = 0; i < x.size(); ++i) {
        auto h = x[i].size(2);
        auto w = x[i].size(3);
        auto stride = strides[i].item<double>();

        auto sx = torch::arange(w, options).add(offset);
        auto sy = torch::arange(h, options).add(offset);
        auto sy_sx = torch::meshgrid({sy, sx});

        // // 转置网格以匹配(h, w)形状
        // auto sx_grid = sy_sx[1].t();  // 转置后形状为h x w
        // auto sy_grid = sy_sx[0].t();  // 转置后形状为h x w
        // anchor_tensor.push_back(torch::stack({sx_grid, sy_grid}, -1).view({-1, 2}));
        // stride_tensor.push_back(torch::full({h * w, 1}, stride, dtype, device));

        anchor_tensor.push_back(torch::stack({sy_sx[1], sy_sx[0]}, -1).view({-1, 2}));
        stride_tensor.push_back(torch::full({h * w, 1}, stride, options));
    }

    return std::make_tuple(torch::cat(anchor_tensor), torch::cat(stride_tensor));
}


// Conv 结构体
struct ConvImpl : torch::nn::Module {
    torch::nn::Conv2d conv{nullptr};
    torch::nn::BatchNorm2d norm{nullptr};
    torch::nn::Functional relu{nullptr};
    bool fused;    //// 融合标志

    ConvImpl(int64_t in_ch,
             int64_t out_ch,
             torch::nn::Functional activation,
             int64_t k = 1,
             int64_t s = 1,
             int64_t p = 0,
             int64_t g = 1) : fused(false) {
        conv = register_module("conv", torch::nn::Conv2d(
                torch::nn::Conv2dOptions(in_ch, out_ch, k)
                        .stride(s)
                        .padding(p)
                        .groups(g)
                        .bias(false)));

        norm = register_module("norm", torch::nn::BatchNorm2d(
                torch::nn::BatchNormOptions(out_ch)
                        .eps(0.001)
                        .momentum(0.03)));

        relu = register_module("relu", std::move(activation));
    }

    torch::Tensor forward(const torch::Tensor &x) {
        if (fused) {
            return relu(conv(x));        //// 融合后前向：跳过BatchNorm
        } else {
            return relu(norm(conv(x)));  //// 正常前向：Conv -> BatchNorm -> ReLU
        }
    }
};

TORCH_MODULE(Conv);  // 注册模块封装器


// Residual 结构体
struct ResidualImpl : torch::nn::Module {
    Conv conv1{nullptr}, conv2{nullptr};

    explicit ResidualImpl(int64_t ch, double e = 0.5) {
        auto mid_ch = static_cast<int64_t>((double) ch * e);

        // 显式指定所有卷积参数
        conv1 = register_module("conv1",
                                Conv(ch, mid_ch, torch::nn::Functional(torch::nn::functional::silu),
                                        /*k=*/3,
                                        /*s=*/1,
                                        /*p=*/1,
                                        /*g=*/1));  // 显式指定groups参数

        conv2 = register_module("conv2",
                                Conv(mid_ch, ch, torch::nn::Functional(torch::nn::functional::silu),
                                        /*k=*/3,
                                        /*s=*/1,
                                        /*p=*/1,
                                        /*g=*/1));  // 显式指定groups参数
    }

    torch::Tensor forward(const torch::Tensor &x) {
        return x + conv2->forward(conv1->forward(x));  // 显式调用forward
    }
};

TORCH_MODULE(Residual);


// CSPModule 结构体修复
struct CSPModuleImpl : torch::nn::Module {
    Conv conv1{nullptr}, conv2{nullptr}, conv3{nullptr};
    torch::nn::Sequential res_m{nullptr};

    CSPModuleImpl(int64_t in_ch, int64_t out_ch) {
        int64_t half_out = out_ch / 2;

        // 注册带独立激活函数实例的卷积层
        conv1 = register_module("conv1", Conv(
                in_ch, half_out,
                torch::nn::Functional(torch::nn::functional::silu),
                3, 1, 1, 1));
        conv2 = register_module("conv2", Conv(
                in_ch, half_out,
                torch::nn::Functional(torch::nn::functional::silu),
                3, 1, 1, 1));
        conv3 = register_module("conv3", Conv(
                2 * half_out, out_ch,
                torch::nn::Functional(torch::nn::functional::silu),
                3, 1, 1, 1));

        // 注册残差模块序列
        res_m = register_module("res_m", torch::nn::Sequential(
                Residual(half_out, 1.0),
                Residual(half_out, 1.0)
        ));
    }

    torch::Tensor forward(const torch::Tensor &x) {
        auto branch1 = res_m->forward(conv1->forward(x));
        auto branch2 = conv2->forward(x);
        return conv3->forward(torch::cat({branch1, branch2}, 1));
    }
};

TORCH_MODULE(CSPModule);


// CSP 结构体修复
struct CSPImpl : torch::nn::Module {
    Conv conv1{nullptr}, conv2{nullptr};
    torch::nn::ModuleList res_m{nullptr};

    CSPImpl(int64_t in_ch, int64_t out_ch, int64_t n, bool csp, int64_t r) {
        // 注册模块列表
        res_m = register_module("res_m", torch::nn::ModuleList());

        conv1 = register_module("conv1", Conv(
                in_ch, 2 * (out_ch / r),
                torch::nn::Functional(torch::nn::functional::silu),
                3, 1, 1, 1));
        conv2 = register_module("conv2", Conv(
                (2 + n) * (out_ch / r), out_ch,
                torch::nn::Functional(torch::nn::functional::silu),
                3, 1, 1, 1));

        // 动态添加子模块
        for (int64_t i = 0; i < n; ++i) {
            if (!csp) {
                res_m->push_back(register_module("res_" + std::to_string(i),
                                                 Residual(out_ch / r)));
            } else {
                res_m->push_back(register_module("csp_" + std::to_string(i),
                                                 CSPModule(out_ch / r, out_ch / r)));
            }
        }
    }

    torch::Tensor forward(const torch::Tensor &x) {
        auto y = conv1->forward(x).chunk(2, 1);
        std::vector<torch::Tensor> outputs(y.begin(), y.end());

        for (auto &module: *res_m) {
            //outputs.push_back(module->forward(outputs.back()));
            //outputs.push_back(module->as<torch::nn::Module>()->forward(outputs.back()));

            if (auto residual = module->as<Residual>()) {
                outputs.push_back(residual->forward(outputs.back()));
            } else if (auto csp_module = module->as<CSPModule>()) {
                outputs.push_back(csp_module->forward(outputs.back()));
            }
        }
        return conv2->forward(torch::cat(outputs, 1));
    }
};

TORCH_MODULE(CSP);

// SPP 结构体
struct SPPImpl : torch::nn::Module {
    Conv conv1{nullptr}, conv2{nullptr};
    torch::nn::MaxPool2d res_m{nullptr};

    SPPImpl(int64_t in_ch, int64_t out_ch, int64_t k = 5) {
        conv1 = register_module("conv1", Conv(
                in_ch, in_ch / 2,
                torch::nn::Functional(torch::nn::functional::silu),
                3, 1, 1, 1));
        conv2 = register_module("conv2", Conv(
                in_ch * 2, out_ch,
                torch::nn::Functional(torch::nn::functional::silu), 3, 1, 1, 1));
        res_m = register_module("res_m", torch::nn::MaxPool2d(
                torch::nn::MaxPool2dOptions(k)
                        .stride(1)
                        .padding(k / 2)));
    }

    torch::Tensor forward(torch::Tensor x) {
        x = conv1->forward(x);
        auto y1 = res_m->forward(x);
        auto y2 = res_m->forward(y1);
        auto y3 = res_m->forward(y2);
        return conv2->forward(torch::cat({x, y1, y2, y3}, 1));
    }
};

TORCH_MODULE(SPP);

// Attention 结构体
struct AttentionImpl : torch::nn::Module {
    int64_t num_head;
    int64_t dim_head;
    int64_t dim_key;
    double scale;
    Conv qkv{nullptr};
    Conv conv1{nullptr}, conv2{nullptr};

    AttentionImpl(int64_t ch, int64_t num_head)
            : num_head(num_head),
              dim_head(ch / num_head),
              dim_key(dim_head / 2),
              scale(std::pow(dim_key, -0.5)) {
        // 使用nn::Identity代替Functional包装
        auto identity = torch::nn::Identity();

        qkv = register_module("qkv", Conv(ch, ch + dim_key * num_head * 2,
                                          torch::nn::Functional(torch::nn::Identity())));

        conv1 = register_module("conv1",
                                Conv(ch, ch,
                                     torch::nn::Functional(torch::nn::Identity()),
                                        /*k=*/3,
                                        /*s=*/1,
                                        /*p=*/1,
                                        /*g=*/ch));

        conv2 = register_module("conv2",
                                Conv(ch, ch, torch::nn::Functional(torch::nn::Identity())));
    }

    torch::Tensor forward(const torch::Tensor &x) {
        auto b = x.size(0);
        auto c = x.size(1);
        auto h = x.size(2);
        auto w = x.size(3);

        auto qkv_output = qkv->forward(x);
        qkv_output = qkv_output.view({b, num_head, dim_key * 2 + dim_head, h * w});

        auto splits = qkv_output.split_with_sizes({dim_key, dim_key, dim_head}, /*dim=*/2);
        auto q = splits[0];
        auto k = splits[1];
        auto v = splits[2];

        auto attn = torch::matmul(q.transpose(-2, -1), k) * scale;
        attn = torch::softmax(attn, -1);

        auto x_attn = torch::matmul(v, attn.transpose(-2, -1)).view({b, c, h, w})
                      + conv1->forward(v.view({b, c, h, w}));
        return conv2->forward(x_attn);

    }
};

TORCH_MODULE(Attention);

// PSABlock 结构体
struct PSABlockImpl : torch::nn::Module {
    Attention conv1{nullptr};
    torch::nn::Sequential conv2{nullptr};

    PSABlockImpl(int64_t ch, int64_t num_head) {
        conv1 = register_module("conv1", Attention(ch, num_head));
        conv2 = register_module("conv2", torch::nn::Sequential(
                Conv(ch, ch * 2, torch::nn::Functional(torch::nn::SiLU())),
                Conv(ch * 2, ch, torch::nn::Functional(torch::nn::Identity()))
        ));
    }

    torch::Tensor forward(torch::Tensor x) {
        x = x + conv1->forward(x);
        return x + conv2->forward(x);
    }
};

TORCH_MODULE(PSABlock);

// PSA 结构体
struct PSAImpl : torch::nn::Module {
    Conv conv1{nullptr}, conv2{nullptr};
    torch::nn::Sequential res_m{nullptr};

    PSAImpl(int64_t ch, int64_t n) {
        int64_t half_ch = ch / 2;
        conv1 = register_module("conv1", Conv(ch, 2 * half_ch, torch::nn::Functional(torch::silu)));
        conv2 = register_module("conv2", Conv(2 * half_ch, ch, torch::nn::Functional(torch::silu)));

        res_m = register_module("res_m", torch::nn::Sequential());
        for (int64_t i = 0; i < n; ++i) {
            res_m->push_back(PSABlock(half_ch, ch / 64));
        }
    }

    torch::Tensor forward(const torch::Tensor &x) {
        auto y = conv1->forward(x).chunk(2, 1);
        auto x_part = y[0];
        auto y_part = res_m->forward(y[1]);
        return conv2->forward(torch::cat({x_part, y_part}, 1));
    }
};

TORCH_MODULE(PSA);


// DarkNet 结构体
struct DarkNetImpl : torch::nn::Module {
    torch::nn::Sequential p1{nullptr}, p2{nullptr}, p3{nullptr}, p4{nullptr}, p5{nullptr};

    DarkNetImpl(const std::vector<int64_t> &width, const std::vector<int64_t> &depth, const std::vector<bool> &csp) {
        p1 = register_module("p1", torch::nn::Sequential(
                Conv(width[0], width[1], torch::nn::Functional(torch::silu), 3, 2, 1)));
        p2 = register_module("p2", torch::nn::Sequential(
                Conv(width[1], width[2], torch::nn::Functional(torch::silu), 3, 2, 1),
                CSP(width[2], width[3], depth[0], csp[0], 4)));
        p3 = register_module("p3", torch::nn::Sequential(
                Conv(width[3], width[3], torch::nn::Functional(torch::silu), 3, 2, 1),
                CSP(width[3], width[4], depth[1], csp[0], 4)));
        p4 = register_module("p4", torch::nn::Sequential(
                Conv(width[4], width[4], torch::nn::Functional(torch::silu), 3, 2, 1),
                CSP(width[4], width[4], depth[2], csp[1], 2)));
        p5 = register_module("p5", torch::nn::Sequential(
                Conv(width[4], width[5], torch::nn::Functional(torch::silu), 3, 2, 1),
                CSP(width[5], width[5], depth[3], csp[1], 2),
                SPP(width[5], width[5]),
                PSA(width[5], depth[4])));
    }

    std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> forward(const torch::Tensor &x) {
        auto p1_out = p1->forward(x);
        auto p2_out = p2->forward(p1_out);
        auto p3_out = p3->forward(p2_out);
        auto p4_out = p4->forward(p3_out);
        auto p5_out = p5->forward(p4_out);
        return std::make_tuple(p3_out, p4_out, p5_out);
    }
};

TORCH_MODULE(DarkNet);

// DarkFPN 结构体
struct DarkFPNImpl : torch::nn::Module {
    torch::nn::Upsample up{nullptr};
    CSP h1{nullptr}, h2{nullptr}, h4{nullptr}, h6{nullptr};
    Conv h3{nullptr}, h5{nullptr};

    DarkFPNImpl(const std::vector<int64_t> &width, const std::vector<int64_t> &depth, const std::vector<bool> &csp) {
        up = register_module("up", torch::nn::Upsample(
                torch::nn::UpsampleOptions().scale_factor(std::vector<double>{2.0, 2.0})));
        h1 = register_module("h1", CSP(width[4] + width[5], width[4], depth[5], csp[0], 2));

        /*h2 = register_module("h2", CSP(width[4] + width[3], width[3], depth[5], csp[0], 2));*/
        h2 = register_module("h2", CSP(width[4] + width[4], width[3], depth[5], csp[0], 2));
        h3 = register_module("h3", Conv(width[3], width[3], torch::nn::Functional(torch::silu), 3, 2, 1));
        h4 = register_module("h4", CSP(width[3] + width[4], width[4], depth[5], csp[0], 2));
        h5 = register_module("h5", Conv(width[4], width[4], torch::nn::Functional(torch::silu), 3, 2, 1));
        h6 = register_module("h6", CSP(width[4] + width[5], width[5], depth[5], csp[1], 2));
    }

    std::tuple<torch::Tensor, torch::Tensor, torch::Tensor>
    forward(std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> x) {
        auto [p3, p4, p5] = std::move(x);
        p4 = h1->forward(torch::cat({up->forward(p5), p4}, 1));
        p3 = h2->forward(torch::cat({up->forward(p4), p3}, 1));
        p4 = h4->forward(torch::cat({h3->forward(p3), p4}, 1));
        p5 = h6->forward(torch::cat({h5->forward(p4), p5}, 1));
        return std::make_tuple(p3, p4, p5);
    }
};

TORCH_MODULE(DarkFPN);

// DFL 结构体
struct DFLImpl : torch::nn::Module {
    int64_t ch;
    torch::nn::Conv2d conv{nullptr};

    explicit DFLImpl(int64_t ch = 16) : ch(ch) {
        conv = register_module("conv", torch::nn::Conv2d(torch::nn::Conv2dOptions(ch, 1, 1).bias(false)));
        conv->weight.set_requires_grad(false);

        auto x = torch::arange(ch, torch::dtype(torch::kFloat))
                .view({1, ch, 1, 1}).contiguous();
        conv->weight.data().copy_(x);
    }

    torch::Tensor forward(torch::Tensor x) {
        auto sizes = x.sizes();
        auto b = sizes[0], a = sizes[2];

        x = x.view({b, 4, ch, a})
                .transpose(2, 1)            // 维度调整为 [b, ch, 4, a]
                .contiguous();              // 确保内存布局连续

        return conv->forward(x.softmax(1))  // 在通道维度(ch)做softmax
                .view({b, 4, a});
    }
};

TORCH_MODULE(DFL);

// Head 结构体
struct HeadImpl : torch::nn::Module {
    int64_t ch = 16;
    int64_t nc = 80;
    int64_t nl;
    int64_t no;
    torch::Tensor stride;
    DFL dfl{nullptr};
    torch::nn::ModuleList box{nullptr}, cls{nullptr};

    explicit HeadImpl(int64_t nc = 80, std::vector<int64_t> filters = {}) :
            nc(nc), nl((int64_t) filters.size()),
            box(register_module("box", torch::nn::ModuleList())),
            cls(register_module("cls", torch::nn::ModuleList())) {
        no = nc + ch * 4;
        stride = register_buffer("stride", torch::zeros({nl}));
        dfl = register_module("dfl", DFL(ch));

        int64_t box_ch = std::max(int64_t(64), filters[0] / 4);
        int64_t cls_ch = std::max(int64_t(80), std::max(filters[0], nc));

        for (auto x: filters) {
            box->push_back(torch::nn::Sequential(
                    Conv(x, box_ch, torch::nn::Functional(torch::silu), 3, 1),
                    Conv(box_ch, box_ch, torch::nn::Functional(torch::silu), 3, 1),
                    torch::nn::Conv2d(box_ch, 4 * ch, 1)
            ));

            cls->push_back(torch::nn::Sequential(
                    Conv(x, x, torch::nn::Functional(torch::silu), 3, 1, x),
                    Conv(x, cls_ch, torch::nn::Functional(torch::silu)),
                    Conv(cls_ch, cls_ch, torch::nn::Functional(torch::silu), 3, 1, cls_ch),
                    Conv(cls_ch, cls_ch, torch::nn::Functional(torch::silu)),
                    torch::nn::Conv2d(cls_ch, nc, 1)
            ));
        }
    }

    torch::Tensor forward(std::vector<torch::Tensor> x) {
        // 处理每个检测层
        for (size_t i = 0; i < x.size(); ++i) {
            torch::Tensor box_out = box[i]->as<torch::nn::Sequential>()->forward(x[i]);
            torch::Tensor cls_out = cls[i]->as<torch::nn::Sequential>()->forward(x[i]);
            x[i] = torch::cat({box_out, cls_out}, 1);
        }

        if (is_training()) {
            // 训练模式返回拼接后的特征图 [batch, channels, H*W*3]
            return torch::cat(x, 1);
        }

        // Assuming make_anchors is defined elsewhere
        auto anchors_strides = make_anchors(x, stride);
        auto anchors = std::get<0>(anchors_strides).transpose(0, 1);
        auto strides = std::get<1>(anchors_strides).transpose(0, 1);

        // 合并检测结果
        std::vector<torch::Tensor> parts;
        for (const auto &feat: x) {
            parts.push_back(feat.view({feat.size(0), no, -1}));
        }
        auto x_cat = torch::cat(parts, 2);

        // 分割检测结果
        auto split_res = x_cat.split_with_sizes({4 * ch, nc}, 1);
        auto box_pred = split_res[0];
        auto cls_pred = split_res[1];

        // 解码边界框
        auto a_b = dfl->forward(box_pred).chunk(2, 1);
        auto a = a_b[0];
        auto b = a_b[1];
        auto box_xy = (anchors.unsqueeze(0) - a + anchors.unsqueeze(0) + b) / 2;
        auto box_wh = (anchors.unsqueeze(0) + b) - (anchors.unsqueeze(0) - a);
        auto decoded_box = torch::cat({box_xy, box_wh}, 1);

        // 最终输出
        return torch::cat({decoded_box * strides.unsqueeze(0), cls_pred.sigmoid()}, 1);
    }

    void initialize_biases() {
        // 遍历每个检测层
        for (int i = 0; i < box->size(); ++i) {
            // 初始化box偏置
            auto box_layer = box[i]->as<torch::nn::Sequential>();
            auto conv_box = box_layer->ptr(2)->as<torch::nn::Conv2d>();
            conv_box->bias.data().fill_(1.0);

            // 初始化cls偏置
            auto cls_layer = cls[i]->as<torch::nn::Sequential>();
            auto conv_cls = cls_layer->ptr(4)->as<torch::nn::Conv2d>();
            auto s = stride[i].item<double>();
            conv_cls->bias.data().slice(0, 0, nc).fill_(
                    std::log(5.0 / (double) nc / std::pow(640.0 / s, 2))
            );
        }

    }
};

TORCH_MODULE(Head);


// 融合卷积层和归一化层
torch::nn::Conv2d fuse_conv(const torch::nn::Conv2d &conv, const torch::nn::BatchNorm2d &norm) {
    TORCH_CHECK(conv->options.groups() == 1, "Only support groups=1 fusion");
    TORCH_CHECK(conv->weight.device() == norm->weight.device(), "Device mismatch between conv and norm");

    // 获取设备信息
    auto device = conv->weight.device();

    // 创建融合后的卷积层
    auto fused_conv = torch::nn::Conv2d(torch::nn::Conv2dOptions(
            conv->options.in_channels(),
            conv->options.out_channels(),
            conv->options.kernel_size())
                                                .stride(conv->options.stride())
                                                .padding(conv->options.padding())
                                                .groups(conv->options.groups())
                                                .bias(true) // 强制启用偏置
    );

    // 转移参数到目标设备
    fused_conv->to(device);
    fused_conv->weight.set_requires_grad(false);
    fused_conv->bias.set_requires_grad(false);

    // 权重融合计算
    auto w_conv = conv->weight.clone()
            .view({conv->options.out_channels(), -1}); // [O, I*K*K]

    // 计算归一化缩放因子
    auto w_norm = torch::diag(norm->weight / torch::sqrt(norm->options.eps() + norm->running_var));

    // 矩阵乘法融合权重
    auto fused_w = torch::mm(w_norm, w_conv)
            .view(fused_conv->weight.sizes());
    fused_conv->weight.copy_(fused_w);

    // 偏置融合计算
    torch::Tensor b_conv = conv->bias.defined() ?
                           conv->bias :
                           torch::zeros(conv->options.out_channels(), device);

    // 计算归一化偏置项
    auto b_norm = norm->bias -
                  norm->weight * norm->running_mean /
                  torch::sqrt(norm->running_var + norm->options.eps());

    // 矩阵乘法融合偏置
    auto fused_b = torch::mm(w_norm, b_conv.view({-1, 1})).view({-1}) + b_norm;
    fused_conv->bias.copy_(fused_b);

    return fused_conv;
}

// YOLO 结构体
struct YOLOImpl : torch::nn::Module {
    DarkNet net{nullptr};
    DarkFPN fpn{nullptr};
    Head head{nullptr};
    torch::Tensor stride;

    YOLOImpl(const std::vector<int64_t> &width,
             const std::vector<int64_t> &depth,
             const std::vector<bool> &csp,
             int64_t num_classes) {

        net = register_module("net", DarkNet(DarkNetImpl(width, depth, csp)));
        fpn = register_module("fpn", DarkFPN(DarkFPNImpl(width, depth, csp)));
        head = register_module("head", Head(HeadImpl(num_classes, std::vector<int64_t>{width[3], width[4], width[5]})));

        auto img_dummy = torch::zeros({1, width[0], 256, 256}, torch::kCPU);
        if (torch::cuda::is_available()) {
            img_dummy = img_dummy.cuda();
        }

        // 计算stride
        auto outputs = this->forward(img_dummy);
        auto sizes = outputs.sizes();
        TORCH_CHECK(sizes.size() == 4, "Invalid output shape");

        std::vector<double> strides;
        for (int i = 0; i < 3; ++i) {
            int64_t layer_height = sizes[2] / (1 << i);
            strides.push_back(256.0 / (double) layer_height);
        }
        head->stride = torch::tensor(strides, torch::kFloat32);
        stride = head->stride;
        head->initialize_biases();
    }

    torch::Tensor forward(const torch::Tensor &x) {
        auto x_net = net->forward(x);
        auto x_fpn = fpn->forward(x_net);
        return head->forward(std::vector<torch::Tensor>{std::get<0>(x_fpn), std::get<1>(x_fpn), std::get<2>(x_fpn)});
    }

    void fuse() {
        for (auto &module: modules()) {
            if (auto conv = module->as<Conv>()) {
                if (conv->norm) {
                    conv->conv = fuse_conv(conv->conv, conv->norm);  //// 合并卷积和BatchNorm参数
                    conv->fused = true;                              //// 标记模块为已融合状态
                    conv->norm->reset();
                }
            }
        }
    }
};

TORCH_MODULE(YOLO);


// 模型类型枚举
enum class YOLOType {
    V11N, V11T, V11S, V11M, V11L, V11X
};

// 模板化工厂函数
template<YOLOType type_yolo>
YOLO yolo_factory(int64_t num_classes = 80) {
    // 公共默认参数
    std::vector<bool> csp;
    std::vector<int64_t> depth(6, 1); // 默认depth全1
    std::vector<int64_t> width;

    // 编译时分支处理
    if constexpr (type_yolo == YOLOType::V11N) {
        csp = {false, true};
        width = {3, 16, 32, 64, 128, 256};
    } else if constexpr (type_yolo == YOLOType::V11T) {
        csp = {false, true};
        width = {3, 24, 48, 96, 192, 384};
    } else if constexpr (type_yolo == YOLOType::V11S) {
        csp = {false, true};
        width = {3, 32, 64, 128, 256, 512};
    } else if constexpr (type_yolo == YOLOType::V11M) {
        csp = {true, true};
        width = {3, 64, 128, 256, 512, 512};
    } else if constexpr (type_yolo == YOLOType::V11L) {
        csp = {true, true};
        depth = std::vector<int64_t>(6, 2); // depth全2
        width = {3, 64, 128, 256, 512, 512};
    } else if constexpr (type_yolo == YOLOType::V11X) {
        csp = {true, true};
        depth = std::vector<int64_t>(6, 2); // depth全2
        width = {3, 96, 192, 384, 768, 768};
    } else {
        // 编译时类型检查
        static_assert("Unknown YOLO type, options: V11N, V11T, V11S, V11M, V11L, V11X");
    }
    return YOLO(width, depth, csp, num_classes);
}


//// 使用示例：
//auto model_nano = yolo_factory<YOLOType::V11N>(80);
//auto model_xlarge = yolo_factory<YOLOType::V11X>(80);
#endif //YOLOV11_CPP_MODEL_H
