//
// Created by SongpingWang on 2025/2/18.
//

#ifndef YOLOV11_CPP_MODEL_UTIL_H
#define YOLOV11_CPP_MODEL_UTIL_H

#include <torch/torch.h>
#include <vector>
#include <iostream>
#include <algorithm>
#include <torch/optim/schedulers/lr_scheduler.h>
#include <vector>
#include <stdexcept>
#include <cmath>
#include <memory>




std::tuple<torch::Tensor, torch::Tensor> make_anchors(const std::vector<torch::Tensor>& x, const std::vector<int>& strides, float offset = 0.5) {
    torch::Tensor anchor_tensor, stride_tensor;
    auto dtype = x[0].dtype();
    auto device = x[0].device();

    for (size_t i = 0; i < strides.size(); ++i) {
        int stride = strides[i];
        auto shape = x[i].sizes();
        int h = shape[2];
        int w = shape[3];

        auto sx = torch::arange(w, torch::device(device).dtype(dtype)) + offset;
        auto sy = torch::arange(h, torch::device(device).dtype(dtype)) + offset;
        auto sy_grid = sy.view({1, -1}).expand({w, h});
        auto sx_grid = sx.view({-1, 1}).expand({w, h});
        auto grid = torch::stack({sx_grid, sy_grid}, 2).view({-1, 2});

        anchor_tensor = torch::cat({anchor_tensor, grid}, 0);
        stride_tensor = torch::cat({stride_tensor, torch::full({h * w, 1}, stride, torch::device(device).dtype(dtype))}, 0);
    }

    return std::make_tuple(anchor_tensor, stride_tensor);
}


class LinearLR : public torch::optim::LRScheduler {
public:
    LinearLR(
            torch::optim::Optimizer &optimizer,
            int64_t num_steps,
            double max_lr,
            double min_lr,
            int64_t warmup_epochs,
            int64_t total_epochs)
            : torch::optim::LRScheduler(optimizer) {
        int64_t warmup_steps = std::max((int) (warmup_epochs * num_steps), 100);
        int64_t decay_steps = total_epochs * num_steps - warmup_steps;

        total_lr.reserve(warmup_steps + decay_steps);

        // Create warmup learning rates  不包含终点
        for (int64_t i = 0; i < warmup_steps; ++i) {
            total_lr.push_back(min_lr + (max_lr - min_lr) * (double) i / (double) warmup_steps);
        }

        // Create decay learning rates  不包含终点
        for (int64_t i = 0; i < decay_steps; ++i) {
            total_lr.push_back(max_lr - (max_lr - min_lr) * (double) i / (double) decay_steps);
        }
    }

protected:
    std::vector<double> get_lrs() override {
        if (step_count_ < 0 || step_count_ >= total_lr.size()) {
            throw std::out_of_range("Step index out of range");
        }
        return {total_lr[step_count_]};
    }

private:
    std::vector<double> total_lr;
};


/*
int main() {
    // 示例模型和优化器
    auto model = torch::nn::Linear(10, 1);
    torch::optim::SGD optimizer(model->parameters(), torch::optim::SGDOptions(0.01));

    // 示例参数
    int64_t num_steps = 1000;
    double max_lr = 0.1;
    double min_lr = 0.001;
    int64_t warmup_epochs = 5;
    int64_t total_epochs = 20;

    // 创建 LinearLR 实例
    LinearLR lr_scheduler(optimizer, num_steps, max_lr, min_lr, warmup_epochs, total_epochs);

    // 模拟训练过程
    for (int64_t step = 0; step < num_steps; ++step) {
        lr_scheduler.step();
        std::cout << "Step " << step << ", LR: " << optimizer.param_groups()[0].options().get_lr() << std::endl;
    }

    return 0;
}
*/


class EMA {
public:
    explicit EMA(const std::shared_ptr<torch::nn::Module> &model,
                 double decay = 0.9999,
                 double tau = 2000.0,
                 int64_t updates = 0)
            : ema(model->clone()), updates(updates), decay([decay, tau](int64_t x) {
        return decay * (1 - std::exp(-static_cast<double>(x) / tau));
    }) {
        ema->eval();
        for (auto &param: ema->parameters()) {
            param.set_requires_grad(false);
        }
    }

    void update(const std::shared_ptr<torch::nn::Module> &model) {
        const auto &model_module = model;
        if (model->is_training()) {
            throw std::runtime_error("如果模型被包装，需要获取原始模型");
        }

        updates++;
        double d = decay(updates);

        auto msd = model_module->named_parameters();
        auto ema_params = ema->named_parameters();

        auto msd_it = msd.begin();
        auto ema_it = ema_params.begin();

        for (; msd_it != msd.end() && ema_it != ema_params.end(); ++msd_it, ++ema_it) {
            auto &ema_param = ema_it->value();
            auto &model_param = msd_it->value();

            if (ema_param.dtype() == torch::kFloat32 || ema_param.dtype() == torch::kFloat64) {
                ema_param.copy_(ema_param * d + (1 - d) * model_param.detach());
            }
        }
    }

private:
    std::shared_ptr<torch::nn::Module> ema;
    int64_t updates;
    std::function<double(int64_t)> decay;
};


class AverageMeter {
public:
    AverageMeter() : num(0), sum(0), avg(0) {}

    void update(double v, int n) {
        if (!std::isnan(v)) {
            num += n;
            sum += v * n;
            avg = sum / num;
        }
    }

    [[nodiscard]] double get_avg() const { return avg; }

    [[nodiscard]] int get_num() const { return num; }

    [[nodiscard]] double get_sum() const { return sum; }

private:
    int num;
    double sum;
    double avg;
};



#include <torch/torch.h>
#include <torch/script.h>
#include <iostream>
#include <utility>
#include <vector>
#include <cmath>
#include <fstream>
#include <sstream>
#include <string>
#include <cctype>
#include "model.h"

////////////////////////////////////////////////////////////////////////////////////
///                                    loss function
///
/// Focal Loss 主要解决了以下几个问题：(https://blog.csdn.net/a8039974/article/details/142487672)
///     类别不平衡：通过调整正负样本的权重，使得模型在训练过程中更加关注少数类样本。
///     难易样本不平衡：通过引入调制因子（modulating factor），降低易分类样本的损失贡献，增大难分类样本的损失贡献，使得模型更加关注难分类样本的学习。
///     提升模型性能：在多个数据集和模型上的实验表明，Focal Loss能够显著提升目标检测等任务的性能。

/// -----------------------------------------------------------------------------------
/// VFL Loss（VariFocal Loss）
///     有效解决类别不平衡问题：通过不对称的样本加权策略，VFL Loss能够更有效地处理正负样本不平衡问题。
///     提升检测性能：在多个数据集和框架上的实验表明，VFL Loss能够显著提升目标检测的性能。
///     灵活性高：VFL Loss的公式设计灵活，可以通过调整超参数来适应不同的任务和数据集。


/// -----------------------------------------------------------------------------------
/// Distribution Focal Loss（DFL） 是目标检测领域中的一个损失函数
///    提高了边界框回归的灵活性和准确性，能够更好地适应复杂场景。
///    在一些目标检测模型中，DFL的应用显著提升了模型的性能。


/// VFL，DFL与Focal Loss关系与区别
/// 处理对象：
///     Focal Loss：主要关注分类任务中的类别不平衡问题，通过调整正负样本和难易样本的权重来优化分类性能。
///     VFL（VariFocal Loss）：在Focal Loss的基础上，进一步考虑了正样本和负样本的不同重要程度，通过不对称地处理这两种样本来平衡学习信号。
///     DFL（Distribute Focal Loss）：则主要关注边界框回归的精度，通过将边界框的位置建模为一般分布来提高定位准确性。

/// 实现方式：
///     Focal Loss：通过引入调制因子（modulating factor）来降低易分类样本的损失贡献，增大难分类样本的损失贡献。
///     VFL（VariFocal Loss）：在Focal Loss的基础上，通过不同的权重分配策略来不对称地处理正样本和负样本。
///     DFL（Distribute Focal Loss）：则通过优化边界框坐标的概率分布来实现更准确的定位。具体来说，DFL将边界框的坐标建模为一个概率分布，并通过优化该分布来提高定位精度。

/// 优缺点：
///     Focal Loss：优点在于能够有效解决类别不平衡问题，提高分类性能；缺点在于对超参数敏感，且计算复杂度相对较高。
///     VFL（VariFocal Loss）：通过不对称地处理正样本和负样本来进一步优化检测性能；但其具体实现和效果可能因模型和数据集而异。
///     DFL（Distribute Focal Loss）：优点在于提高了边界框回归的精度和定位准确性；缺点在于可能需要更多的计算资源来优化边界框坐标的概率分布。




////////////////////////////////////////////////////////////////////////////////////

struct QFLImpl : torch::nn::Module {
    double beta;
    torch::nn::BCEWithLogitsLoss bce_loss{nullptr};

    explicit QFLImpl(double beta = 2.0) : beta(beta) {
        bce_loss = register_module("bce_loss", torch::nn::BCEWithLogitsLoss(
                torch::nn::BCEWithLogitsLossOptions().reduction(torch::kNone)));
    }

    torch::Tensor forward(const torch::Tensor &outputs, const torch::Tensor &targets) {
        auto bce_loss_tensor = bce_loss->forward(outputs, targets);
        auto abs_diff = torch::abs(targets.to(outputs.dtype()) - outputs.sigmoid());

        auto beta_tensor = torch::tensor(beta, abs_diff.options());
        auto qfl_loss = torch::pow(abs_diff, beta_tensor) * bce_loss_tensor;
        return qfl_loss;
    }
};

TORCH_MODULE(QFL);


struct VFLImpl : torch::nn::Module {
    double alpha;
    double gamma;
    bool iou_weighted;
    torch::nn::BCEWithLogitsLoss bce_loss{nullptr};

    explicit VFLImpl(double alpha = 0.75, double gamma = 2.0, bool iou_weighted = true)
            : alpha(alpha), gamma(gamma), iou_weighted(iou_weighted) {
        assert(alpha >= 0.0);
        bce_loss = register_module("bce_loss", torch::nn::BCEWithLogitsLoss(
                torch::nn::BCEWithLogitsLossOptions().reduction(torch::kNone)));
    }

    torch::Tensor forward(const torch::Tensor &outputs, const torch::Tensor &targets) {
        assert(outputs.sizes() == targets.sizes());
        auto targets_cast = targets.to(outputs.dtype());

        auto alpha_tensor = torch::tensor(alpha, outputs.options());
        auto gamma_tensor = torch::tensor(gamma, outputs.options());
        auto condition_pos = (targets_cast > 0.0).to(outputs.dtype());
        auto condition_neg = (targets_cast <= 0.0).to(outputs.dtype());

        torch::Tensor focal_weight;
        if (iou_weighted) {
            focal_weight = targets_cast * condition_pos +
                           alpha_tensor * (outputs.sigmoid() - targets_cast).abs().pow(gamma_tensor) * condition_neg;
        } else {
            focal_weight = condition_pos +
                           alpha_tensor * (outputs.sigmoid() - targets_cast).abs().pow(gamma_tensor) * condition_neg;
        }

        return bce_loss->forward(outputs, targets_cast) * focal_weight;
    }


};

TORCH_MODULE(VFL);


struct FocalLossImpl : torch::nn::Module {
    double alpha;
    double gamma;
    torch::nn::BCEWithLogitsLoss bce_loss{nullptr};

    explicit FocalLossImpl(double alpha = 0.25, double gamma = 1.5)
            : alpha(alpha), gamma(gamma) {
        bce_loss = register_module("bce_loss", torch::nn::BCEWithLogitsLoss(
                torch::nn::BCEWithLogitsLossOptions().reduction(torch::kNone)));
    }

    torch::Tensor forward(const torch::Tensor &outputs, const torch::Tensor &targets) {

        auto targets_float = targets.to(outputs.dtype());
        auto loss = bce_loss->forward(outputs, targets);

        if (alpha > 0) {
            auto alpha_tensor = torch::tensor(alpha, outputs.options());
            auto alpha_factor = targets_float * alpha_tensor + (1 - targets_float) * (1 - alpha_tensor);
            loss = loss * alpha_factor;
        }

        if (gamma > 0) {
            auto gamma_tensor = torch::tensor(gamma, outputs.options());
            auto outputs_sigmoid = outputs.sigmoid();
            auto p_t = targets_float * outputs_sigmoid + (1 - targets_float) * (1 - outputs_sigmoid);
            auto gamma_factor = torch::pow(1.0 - p_t, gamma_tensor);
            loss = loss * gamma_factor;
        }

        return loss;
    }


};

TORCH_MODULE(FocalLoss);


// 定义损失函数
struct BoxLossImpl : torch::nn::Module {
    int dfl_ch;
    torch::nn::CrossEntropyLoss m_df_loss{nullptr};

    BoxLossImpl() : dfl_ch(0) {
        register_module("df_loss", torch::nn::CrossEntropyLoss(
                torch::nn::CrossEntropyLossOptions().reduction(torch::kNone)));
    }

    explicit BoxLossImpl(int dfl_ch) : dfl_ch(dfl_ch) {
        register_module("df_loss", torch::nn::CrossEntropyLoss(
                torch::nn::CrossEntropyLossOptions().reduction(torch::kNone)));
    }

    std::tuple<torch::Tensor, torch::Tensor> forward(
            const torch::Tensor &pred_dist,
            const torch::Tensor &pred_bboxes,
            const torch::Tensor &anchor_points,
            const torch::Tensor &target_bboxes,
            const torch::Tensor &target_scores,
            const torch::Tensor &target_scores_sum,
            const torch::Tensor &fg_mask) {
        // 使用masked_select替代索引操作
        auto weight = torch::masked_select(target_scores.sum(-1), fg_mask).unsqueeze(-1);

        // 使用正确的掩码应用方式
        auto masked_pred_bboxes = torch::masked_select(pred_bboxes, fg_mask.unsqueeze(-1)).view({-1, 4});
        auto masked_target_bboxes = torch::masked_select(target_bboxes, fg_mask.unsqueeze(-1)).view({-1, 4});
        auto iou = compute_iou(masked_pred_bboxes, masked_target_bboxes);
        auto loss_box = ((1.0 - iou) * weight).sum() / target_scores_sum;

        // 修正chunk维度为最后一维
        auto chunks = target_bboxes.chunk(2, -1);
        auto a = chunks[0];
        auto b = chunks[1];

        // 显式类型转换保证数值精度
        auto target = torch::cat({anchor_points - a, b - anchor_points}, -1)
                .clamp(0, static_cast<double>(dfl_ch) - 0.01);

        // 处理pred_dist的掩码应用
        auto masked_pred_dist = torch::masked_select(pred_dist, fg_mask.unsqueeze(-1))
                .view({-1, dfl_ch + 1});
        auto masked_target = torch::masked_select(target, fg_mask.unsqueeze(-1))
                .view({-1, 2});  // 假设每个目标有2个坐标

        auto loss_dfl = this->m_df_loss(masked_pred_dist, masked_target);
        loss_dfl = (loss_dfl * weight).sum() / target_scores_sum;

        return std::make_tuple(loss_box, loss_dfl);
    }

    torch::Tensor df_loss(const torch::Tensor &pred_dist, const torch::Tensor &target) {
        // 确保目标值在合理范围内
        auto clamped_target = target.clamp(0, dfl_ch - 1.01);

        auto tl = clamped_target.to(torch::kLong);
        auto tr = tl + 1;
        auto wl = tr - clamped_target;  // 保持浮点类型
        auto wr = 1 - wl;

        // 使用视图保持维度一致性
        auto left_loss = m_df_loss->forward(
                pred_dist,
                tl.contiguous().view({-1})
        ).view(tl.sizes());

        auto right_loss = m_df_loss->forward(
                pred_dist,
                tr.contiguous().view({-1})
        ).view(tl.sizes());

        return (left_loss * wl + right_loss * wr).mean(-1, /*keepdim=*/true);
    }

    static torch::Tensor compute_iou(const torch::Tensor &box1, const torch::Tensor &box2, double eps = 1e-7) {
        // 拆分坐标到各个维度
        auto box1_parts = box1.chunk(4, -1);
        auto b1_x1 = box1_parts[0], b1_y1 = box1_parts[1], b1_x2 = box1_parts[2], b1_y2 = box1_parts[3];

        auto box2_parts = box2.chunk(4, -1);
        auto b2_x1 = box2_parts[0], b2_y1 = box2_parts[1], b2_x2 = box2_parts[2], b2_y2 = box2_parts[3];

        // 计算宽度和高度（添加eps防止除零）
        auto w1 = b1_x2 - b1_x1;
        auto h1 = b1_y2 - b1_y1 + eps;
        auto w2 = b2_x2 - b2_x1;
        auto h2 = b2_y2 - b2_y1 + eps;

        // 交集区域计算（带维度对齐）
        auto inter_left = torch::max(b1_x1, b2_x1);
        auto inter_right = torch::min(b1_x2, b2_x2);
        auto inter_top = torch::max(b1_y1, b2_y1);
        auto inter_bottom = torch::min(b1_y2, b2_y2);
        auto inter = (inter_right - inter_left).clamp_min(0) *
                     (inter_bottom - inter_top).clamp_min(0);

        // 并集区域
        auto union_area = w1 * h1 + w2 * h2 - inter + eps;

        // 基础IoU计算
        auto iou = inter / union_area;

        // CIoU扩展计算
        auto c_left = torch::min(b1_x1, b2_x1);
        auto c_right = torch::max(b1_x2, b2_x2);
        auto c_top = torch::min(b1_y1, b2_y1);
        auto c_bottom = torch::max(b1_y2, b2_y2);
        auto cw = c_right - c_left;
        auto ch = c_bottom - c_top;
        auto c2 = cw.pow(2) + ch.pow(2) + eps;

        // 中心点距离计算
        auto rho2 = (torch::pow((b2_x1 + b2_x2 - b1_x1 - b1_x2), 2) +
                     torch::pow((b2_y1 + b2_y2 - b1_y1 - b1_y2), 2)) / 4;

        // 宽高比一致性计算
        auto v = (4 / pow(M_PI, 2)) *
                 torch::pow(torch::atan(w2 / h2) - torch::atan(w1 / h1), 2);

        // Alpha参数计算（禁用梯度）
        torch::Tensor alpha;
        {
            torch::NoGradGuard no_grad;
            alpha = v / (v - iou + (1 + eps));
        }

        // 最终CIoU计算
        return iou - (rho2 / c2 + v * alpha);
    }

};

TORCH_MODULE(BoxLoss);


/*
// 定义计算IoU的函数
torch::Tensor compute_iou(const torch::Tensor& box1, const torch::Tensor& box2, double eps = 1e-7) {
    auto b1_x1 = box1.select(1, 0);
    auto b1_y1 = box1.select(1, 1);
    auto b1_x2 = box1.select(1, 2);
    auto b1_y2 = box1.select(1, 3);
    auto b2_x1 = box2.select(1, 0);
    auto b2_y1 = box2.select(1, 1);
    auto b2_x2 = box2.select(1, 2);
    auto b2_y2 = box2.select(1, 3);

    auto inter_x1 = torch::max(b1_x1, b2_x1);
    auto inter_y1 = torch::max(b1_y1, b2_y1);
    auto inter_x2 = torch::min(b1_x2, b2_x2);
    auto inter_y2 = torch::min(b1_y2, b2_y2);
    auto inter_area = (inter_x2 - inter_x1).clamp_min(0) * (inter_y2 - inter_y1).clamp_min(0);

    auto box1_area = (b1_x2 - b1_x1) * (b1_y2 - b1_y1);
    auto box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1);
    auto union_area = box1_area + box2_area - inter_area + eps;

    return inter_area / union_area;
}
*/



// Function to compute IoU (Intersection over Union)
torch::Tensor compute_iou(const torch::Tensor &boxes1, const torch::Tensor &boxes2) {
    auto lt = torch::min(boxes1.slice(1, 0, 2), boxes2.slice(1, 0, 2));
    auto rb = torch::max(boxes1.slice(1, 2, 4), boxes2.slice(1, 2, 4));

    auto wh = (rb - lt).clamp(0);
    auto inter = wh.prod(-1);
    auto area1 = (boxes1.slice(1, 2, 4) - boxes1.slice(1, 0, 2)).prod(-1);
    auto area2 = (boxes2.slice(1, 2, 4) - boxes2.slice(1, 0, 2)).prod(-1);
    auto union_area = area1 + area2 - inter;

    return inter / union_area.clamp(1e-9);
}

struct Assigner : public torch::nn::Module {
    int64_t nc;
    int64_t top_k;
    double alpha;
    double beta;
    double eps;
    torch::Tensor eps_tensor;

    explicit Assigner(int64_t nc = 80,
                      int64_t top_k = 13,
                      double alpha = 1.0,
                      double beta = 6.0,
                      double eps = 1e-9)
            : nc(nc), top_k(top_k), alpha(alpha), beta(beta), eps(eps) {
        register_buffer("eps_tensor", torch::tensor(eps, torch::kFloat32));
    }

    std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> forward(
            const torch::Tensor &pd_scores,
            const torch::Tensor &pd_bboxes,
            const torch::Tensor &anc_points,
            const torch::Tensor &gt_labels,
            const torch::Tensor &gt_bboxes,
            const torch::Tensor &mask_gt
    ) {
        int64_t batch_size = pd_scores.size(0);
        int64_t num_max_boxes = gt_bboxes.size(1);

        if (num_max_boxes == 0) {
            auto device = gt_bboxes.device();
            return std::make_tuple(
                    torch::zeros_like(pd_bboxes).to(device),
                    torch::zeros_like(pd_scores).to(device),
                    torch::zeros_like(pd_scores.index({"...", 0})).to(device)
            );
        }

        int64_t num_anchors = anc_points.size(0);
        auto shape = gt_bboxes.sizes();
        auto lt = gt_bboxes.view({-1, 1, 4}).slice(2, 0, 2);
        auto rb = gt_bboxes.view({-1, 1, 4}).slice(2, 2, 4);
        auto mask_in_gts = torch::cat({anc_points.unsqueeze(0) - lt, rb - anc_points.unsqueeze(0)}, 2)
                .view({shape[0], shape[1], num_anchors, -1})
                .amin(3)
                .gt(eps_tensor);

        int64_t na = pd_bboxes.size(-2);
        auto gt_mask = (mask_in_gts * mask_gt).to(torch::kBool);
        auto overlaps = torch::zeros({batch_size, num_max_boxes, na}, pd_bboxes.dtype()).to(pd_bboxes.device());
        auto bbox_scores = torch::zeros({batch_size, num_max_boxes, na}, pd_scores.dtype()).to(pd_scores.device());

        auto ind = torch::zeros({2, batch_size, num_max_boxes}, torch::kInt64);
        ind[0] = torch::arange(batch_size).view({-1, 1}).expand({-1, num_max_boxes});
        ind[1] = gt_labels.squeeze(-1);
        auto selected_scores = pd_scores.index_select(0, ind[0]).index_select(2, ind[1]);
        bbox_scores.masked_scatter_(gt_mask, selected_scores.masked_select(gt_mask));

        auto pd_boxes = pd_bboxes.unsqueeze(1).expand({-1, num_max_boxes, -1, -1}).masked_select(gt_mask).view({-1, 4});
        auto gt_boxes = gt_bboxes.unsqueeze(2).expand({-1, -1, na, -1}).masked_select(gt_mask).view({-1, 4});
        overlaps.masked_scatter_(gt_mask, compute_iou(gt_boxes, pd_boxes).squeeze(-1).clamp_(0));

        auto align_metric = bbox_scores.pow(alpha) * overlaps.pow(beta);

        auto top_k_mask = mask_gt.expand({-1, -1, top_k}).to(torch::kBool);
        auto top_k_metrics = align_metric.topk(top_k, -1, true, true);
        auto top_k_indices = std::get<1>(top_k_metrics);
        top_k_indices.masked_fill_(~top_k_mask, 0);

        auto mask_top_k = torch::zeros_like(align_metric, torch::kInt8);
        auto ones = torch::ones_like(top_k_indices.index({"...", torch::indexing::Slice(0, 1)}), torch::kInt8);
        for (int64_t k = 0; k < top_k; ++k) {
            mask_top_k.scatter_add_(-1, top_k_indices.index({"...", k}), ones);
        }
        mask_top_k.masked_fill_(mask_top_k > 1, 0);
        mask_top_k = mask_top_k.to(align_metric.dtype());
        auto mask_pos = mask_top_k * mask_in_gts * mask_gt;

        auto fg_mask = mask_pos.sum(-2);
        if (fg_mask.max().item<int64_t>() > 1) {
            auto mask_multi_gts = (fg_mask.unsqueeze(1) > 1).expand({-1, num_max_boxes, -1});
            auto max_overlaps_idx = overlaps.argmax(1);

            auto is_max_overlaps = torch::zeros_like(mask_pos, mask_pos.dtype());
            is_max_overlaps.scatter_(1, max_overlaps_idx.unsqueeze(1), 1);

            mask_pos = torch::where(mask_multi_gts, is_max_overlaps, mask_pos).to(torch::kFloat32);
            fg_mask = mask_pos.sum(-2);
        }
        auto target_gt_idx = mask_pos.argmax(-2);

        auto index = torch::arange(batch_size, torch::kInt64).unsqueeze(1).to(gt_labels.device());
        auto target_index = target_gt_idx + index * num_max_boxes;
        auto target_labels = gt_labels.view(-1).index_select(0, target_index).view_as(target_gt_idx);

        auto target_bboxes = gt_bboxes.view({-1, gt_bboxes.size(-1)}).index_select(0, target_index).view_as(
                target_gt_idx);

        target_labels.clamp_(0);

        auto target_scores = torch::zeros({target_labels.size(0), target_labels.size(1), nc}, torch::kInt64).to(
                target_labels.device());
        target_scores.scatter_(2, target_labels.unsqueeze(-1), 1);

        auto fg_scores_mask = fg_mask.unsqueeze(-1).expand({-1, -1, nc});
        target_scores = torch::where(fg_scores_mask > 0, target_scores, 0);

        align_metric *= mask_pos;
        auto pos_align_metrics = align_metric.amax(/*dim=*/-1, /*keepdim=*/true);
        auto pos_overlaps = (overlaps * mask_pos).amax(/*dim=*/-1, /*keepdim=*/true);
        auto norm_align_metric = (align_metric * pos_overlaps / (pos_align_metrics + eps)).amax(-2).unsqueeze(-1);
        target_scores = target_scores * norm_align_metric;

        return std::make_tuple(target_bboxes, target_scores, fg_mask.to(torch::kBool));
    }
};


// 定义计算损失的类
struct ComputeLoss : public torch::nn::Module {
    torch::nn::ModuleHolder<torch::nn::Module> model;
    std::map<std::string, double> params;
    std::vector<int> stride;
    int nc;
    int no;
    int reg_max;
    torch::Device device{torch::kCPU};
    BoxLoss box_loss;
    torch::nn::BCEWithLogitsLoss cls_loss;
    std::shared_ptr<Assigner> assigner;

    ComputeLoss(const std::shared_ptr<torch::nn::Module>& model, std::map<std::string, double> params)
            : model(register_module("model", model)), params(std::move(params)) {

        // 获取模型设备
        auto first_param = model->parameters()[0];
        device = first_param.device();

        // 获取头部模块
        auto head_module = model->named_children().begin()->value()->children()[0];
        auto m = std::dynamic_pointer_cast<HeadImpl>(head_module);


        auto stride_tensor = m->stride;
        auto stride_accessor = stride_tensor.accessor<int64_t, 1>();
        std::vector<int> strides;
        for (int i = 0; i < stride_tensor.size(0); ++i) {
            strides.push_back(static_cast<int>(stride_accessor[i]));
        }

        /*
        stride = m->stride.vec();
        nc = m->nc;
        no = m->no;
        reg_max = m->ch;
         */


        // 初始化组件
        box_loss = register_module("box_loss", BoxLoss(reg_max - 1));
        box_loss->to(device);
        cls_loss = register_module("cls_loss", torch::nn::BCEWithLogitsLoss(
                torch::nn::BCEWithLogitsLossOptions().reduction(torch::kNone)));
        assigner = register_module("assigner", std::make_shared<Assigner>(nc, 10, 0.5, 6.0));
        assigner->to(device);

    }

    std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> forward(
            const std::vector<torch::Tensor> &outputs,
            const std::map<std::string, torch::Tensor> &targets) {

        // 拼接输出特征图
        std::vector<torch::Tensor> views;
        for (const auto &out: outputs) {
            views.push_back(out.view({out.size(0), no, -1}));
        }
        auto x = torch::cat(views, 2);

        // 拆分预测结果
        int split_pos = reg_max * 4;
        auto pred_distri = x.slice(1, 0, split_pos);
        auto pred_scores = x.slice(1, split_pos, split_pos + nc)
                .permute({0, 2, 1})
                .contiguous();

        // 生成锚点
        auto input_size = torch::tensor(
                {outputs[0].size(3), outputs[0].size(2)},  // [width, height]
                device(device).dtype(pred_scores.dtype())
        ) * stride[0];

        auto anchor_points_stride = make_anchors(outputs, stride, 0.5);
        auto anchor_points = std::get<0>(anchor_points_stride);
        auto stride_tensor = std::get<1>(anchor_points_stride);

        // 处理目标数据
        auto idx = targets.at("idx").view({-1, 1});
        auto cls = targets.at("cls").view({-1, 1});
        auto box = targets.at("box");
        auto targets_tensor = torch::cat({idx, cls, box}, 1).to(device);

        torch::Tensor gt;
        if (targets_tensor.size(0) == 0) {
            gt = torch::zeros({pred_scores.size(0), 0, 5},
                              torch::dtype(pred_scores.dtype()).device(device));
        } else {
            auto i = targets_tensor.select(1, 0);
            auto [unique_i, counts] = torch::_unique2(i, true, true);

            int max_count = counts.max().item<int>();
            gt = torch::zeros({pred_scores.size(0), max_count, 5},
                              torch::dtype(pred_scores.dtype()).device(device));

            for (int j = 0; j < gt.size(0); ++j) {
                auto mask = (i == j);
                auto n = mask.sum().item<int>();
                if (n > 0) {
                    gt.index_put_({j, torch::indexing::Slice(0, n)},
                                  targets_tensor.index({mask}).slice(1, 1, 6));
                }
            }

            // 坐标转换
            auto boxes = gt.slice(2, 1, 5);
            boxes.mul_(input_size.view({1, 1, 2}).repeat({1, 1, 2}));

            auto centers = boxes.slice(2, 0, 2);
            auto half_wh = boxes.slice(2, 2, 4) / 2.0;
            auto new_boxes = torch::cat({
                                                centers - half_wh,
                                                centers + half_wh
                                        }, 2);
            gt.slice(2, 1, 5) = new_boxes;
        }

        // 分割标签和边界框
        auto gt_labels = gt.slice(2, 0, 1);
        auto gt_bboxes = gt.slice(2, 1, 5);
        auto mask_gt = gt_bboxes.sum(2, /*keepdim=*/true).gt(0);

        // 解码预测框
        auto pred_bboxes = box_decode(anchor_points, pred_distri);

        // 分配目标
        auto assigned_targets = assigner->forward(
                pred_scores.detach().sigmoid(),
                (pred_bboxes.detach() * stride_tensor).to(gt_bboxes.dtype()),
                anchor_points * stride_tensor,
                gt_labels,
                gt_bboxes,
                mask_gt
        );

        auto target_bboxes = std::get<0>(assigned_targets);
        auto target_scores = std::get<1>(assigned_targets);
        auto fg_mask = std::get<2>(assigned_targets);

        // 计算分类损失
        auto target_scores_sum = torch::maximum(target_scores.sum(), torch::tensor(1.0, device(device)));
        auto loss_cls = cls_loss->forward(pred_scores, target_scores.to(pred_scores.dtype())).sum() / target_scores_sum;

        // 计算边界框损失
        torch::Tensor loss_box = torch::zeros_like(loss_cls);
        torch::Tensor loss_dfl = torch::zeros_like(loss_cls);

        if (fg_mask.sum().item<int>() > 0) {
            target_bboxes /= stride_tensor;
            auto losses = box_loss->forward(
                    pred_distri,
                    pred_bboxes,
                    anchor_points,
                    target_bboxes,
                    target_scores,
                    target_scores_sum,
                    fg_mask
            );
            loss_box = std::get<0>(losses);
            loss_dfl = std::get<1>(losses);
        }

        // 应用损失权重
        loss_box *= params.at("box");
        loss_cls *= params.at("cls");
        loss_dfl *= params.at("dfl");

        return std::make_tuple(loss_box, loss_cls, loss_dfl);
    }


    torch::Tensor box_decode(const torch::Tensor &anchor_points, const torch::Tensor &pred_dist) {
        auto b = pred_dist.size(0);
        auto a = pred_dist.size(1);
        auto c = pred_dist.size(2);
        auto pred_dist_reshaped = pred_dist.view({b, a, 4, c / 4});
        auto pred_dist_softmax = pred_dist_reshaped.softmax(3);

        // auto project = torch::arange(c / 4, torch::TensorOptions().dtype(torch::kFloat32).device(device)).view({1, 1, 1, -1});

        auto project = torch::arange(c / 4, torch::device(device).dtype(torch::kFloat32)).view({1, 1, 1, -1});
        auto pred_dist_projected = pred_dist_softmax.matmul(project).squeeze(-1);
        auto lt = pred_dist_projected.chunk(2, 2)[0];
        auto rb = pred_dist_projected.chunk(2, 2)[1];
        auto x1y1 = anchor_points - lt;
        auto x2y2 = anchor_points + rb;
        return torch::cat({x1y1, x2y2}, 2);
    }
};


/*
 int main() {
    // Example usage
    Assigner assigner;

    // Dummy input tensors
    auto pd_scores = torch::rand({2, 80, 10});
    auto pd_bboxes = torch::rand({2, 10, 4});
    auto anc_points = torch::rand({10, 2});
    auto gt_labels = torch::randint(0, 80, {2, 5, 1}, torch::kInt64);
    auto gt_bboxes = torch::rand({2, 5, 4});
    auto mask_gt = torch::ones({2, 5}, torch::kBool);

    auto [target_bboxes, target_scores, fg_mask] = assigner.forward(pd_scores, pd_bboxes, anc_points, gt_labels, gt_bboxes, mask_gt);

    std::cout << "Target BBoxes: " << target_bboxes << std::endl;
    std::cout << "Target Scores: " << target_scores << std::endl;
    std::cout << "FG Mask: " << fg_mask << std::endl;

    return 0;
}
 */


/*
int main() {
    // Load model
    torch::jit::script::Module model;
    try {
        model = torch::jit::load("model.pt");
    } catch (const c10::Error& e) {
        std::cerr << "Error loading the model\n";
        return -1;
    }

    // Set device
    torch::Device device(torch::kCUDA);
    model.to(device);

    // Define parameters
    std::map<std::string, double> params = {
            {"box", 1.0},
            {"cls", 1.0},
            {"dfl", 1.0}
    };

    // Create ComputeLoss object
    ComputeLoss compute_loss(model, params);

    // Dummy inputs and targets
    std::vector<torch::Tensor> outputs = {
            torch::rand({1, 80, 80, 80}, torch::device(device).dtype(torch::kFloat32)),
            torch::rand({1, 80, 80, 80}, torch::device(device).dtype(torch::kFloat32))
    };
    std::map<std::string, torch::Tensor> targets = {
            {"idx", torch::tensor({0}, torch::device(device).dtype(torch::kInt64))},
            {"cls", torch::tensor({1}, torch::device(device).dtype(torch::kInt64))},
            {"box", torch::tensor({0.1, 0.2, 0.3, 0.4}, torch::device(device).dtype(torch::kFloat32))}
    };

    // Compute loss
    auto loss = compute_loss.forward(outputs, targets);
    std::cout << "Loss: " << std::get<0>(loss).item<double>() << ", " << std::get<1>(loss).item<double>() << ", " << std::get<2>(loss).item<double>() << std::endl;

    return 0;
}
*/

/*
 int main() {
    // Example usage
    Assigner assigner;

    // Dummy input tensors
    auto pd_scores = torch::rand({2, 80, 10});
    auto pd_bboxes = torch::rand({2, 10, 4});
    auto anc_points = torch::rand({10, 2});
    auto gt_labels = torch::randint(0, 80, {2, 5, 1}, torch::kInt64);
    auto gt_bboxes = torch::rand({2, 5, 4});
    auto mask_gt = torch::ones({2, 5}, torch::kBool);

    auto [target_bboxes, target_scores, fg_mask] = assigner.forward(pd_scores, pd_bboxes, anc_points, gt_labels, gt_bboxes, mask_gt);

    std::cout << "Target BBoxes: " << target_bboxes << std::endl;
    std::cout << "Target Scores: " << target_scores << std::endl;
    std::cout << "FG Mask: " << fg_mask << std::endl;

    return 0;
}
 */

#endif //YOLOV11_CPP_MODEL_UTIL_H
