use burn::{
    nn::loss::MseLoss, tensor::{backend::Backend, Int, Tensor}
};

pub struct YoloV1LossConfig {
    pub lambda_coord: f32,    // 坐标损失权重 (论文中为5.0)
    pub lambda_noobj: f32,    // 无目标置信度损失权重 (论文中为0.5)
    pub batch_size: usize,    // Batch 批处理数量 (通常为16)
    pub grid_size: usize,     // S (通常为7)
    pub num_boxes: usize,     // B (通常为2)
    pub num_classes: usize,   // C (如VOC: 20)
}

impl Default for YoloV1LossConfig {
    fn default() -> Self {
        Self {
            lambda_coord: 5.0,
            lambda_noobj: 0.5,
            batch_size: 16,
            grid_size: 7,
            num_boxes: 2,
            num_classes: 20,
        }
    }
}

pub struct YoloV1Loss<B: Backend> {
    config: YoloV1LossConfig,
    _phantom: core::marker::PhantomData<B>,
}

impl<B: Backend> YoloV1Loss<B> {
    pub fn new(config: YoloV1LossConfig) -> Self {
        Self {
            config,
            _phantom: core::marker::PhantomData,
        }
    }
    
    pub fn forward(
        &self,
        prediction: Tensor<B, 4>,    // [batch, S, S, B*5 + C]
        target: Tensor<B, 5>,        // [batch, S, S, 1, 5 + C] - (x, y, w, h, conf, classes...)
    ) -> Tensor<B, 1> {
        let [batch_size, grid_h, grid_w, c] = prediction.dims();
        assert_eq!(grid_h,grid_w);
        let s = grid_h;
        
        // 解析预测张量
        let pred_boxes = prediction.clone().slice([0..batch_size, 0..s, 0..s, 0..self.config.num_boxes * 4])
            .reshape([batch_size, s, s, self.config.num_boxes, 4]);
        let pred_confidence = prediction.clone().slice([0..batch_size, 0..s, 0..s, self.config.num_boxes * 4..self.config.num_boxes * 5])
            .reshape([batch_size, s, s, self.config.num_boxes]);
        let pred_classes = prediction.clone().slice([0..batch_size, 0..s, 0..s, self.config.num_boxes * 5..c])
            .reshape([batch_size, s, s, self.config.num_classes]);
        
        // 解析目标张量
        let target_boxes = target.clone().slice([0..batch_size, 0..s, 0..s, 0..self.config.num_boxes, 0..4]); // [batch, S, S, 1, 4]
        let target_confidence = target.clone().slice([0..batch_size, 0..s, 0..s, 0..self.config.num_boxes, 4..5]).squeeze::<4>(3); // [batch, S, S, 1]
        let target_classes = target.clone().slice([0..batch_size, 0..s, 0..s, 0..self.config.num_boxes, 5..c]).squeeze::<4>(3); // [batch, S, S, C]
        
        // // 创建掩码（简化版，实际需要根据IoU计算）
        // let obj_mask = target_confidence.clone().int(); // 有目标网格
        // let noobj_mask = Tensor::ones_like(&obj_mask) - obj_mask.clone(); // 无目标网格

        // 使用IoU计算创建掩码
        let (obj_mask, noobj_mask) = self.calculate_iou_and_masks(
            &pred_boxes, 
            &target_boxes,
            &target_confidence
        );
        
        // 计算各部分损失
        let coord_loss = self.coordinate_loss(
            pred_boxes, target_boxes, 
            obj_mask.clone(), 
            self.config.lambda_coord
        );
        
        let conf_loss = self.confidence_loss(
            pred_confidence, target_confidence, 
            obj_mask.clone(), noobj_mask, 
            self.config.lambda_noobj
        );
        
        let class_loss = self.class_loss(pred_classes, target_classes, obj_mask);
        
        // 总损失
        coord_loss + conf_loss + class_loss
    }

    /// 基于IoU计算创建目标掩码
    fn calculate_iou_and_masks(
        &self,
        pred_boxes: &Tensor<B, 5>,    // [batch, S, S, B, 4]
        target_boxes: &Tensor<B, 5>,   // [batch, S, S, 1, 4]
        target_confidence: &Tensor<B, 4>, // [batch, S, S, 1]
    ) -> (Tensor<B, 4, Int>, Tensor<B, 4, Int>) {

        let device = pred_boxes.device();
        
        // 计算每个预测框与真实框的IoU
        let ious = self.calculate_iou(pred_boxes, target_boxes); // [batch, S, S, B]
        
        // 找到每个网格中IoU最大的框
        let (_max_iou, max_idx) = ious.clone().max_dim_with_indices(3); // [batch, S, S, B]
        
        // 创建最佳框掩码 (one-hot编码)
        let best_box_mask = Tensor::arange(0..self.config.num_boxes as i64, &device)
            .reshape([1, 1, 1, self.config.num_boxes])
            .equal(max_idx.unsqueeze::<4>())
            .int();
        
        // 有目标掩码：
        // 1. 网格中有目标 (target_confidence == 1)
        // 2. 且是该网格中IoU最大的框
        let obj_mask = target_confidence.clone()
            .int() // [batch, S, S, 1]
            .unsqueeze::<4>() // [batch, S, S, 1, 1]
            .repeat(&[3, self.config.num_boxes]) // [batch, S, S, B]
            * best_box_mask;
        
        // 无目标掩码：
        // 1. 网格中无目标 (target_confidence == 0)
        // 2. 或网格中有目标但IoU < 0.5
        let noobj_mask = (target_confidence.clone().equal_elem(0.0))
            .int() // 无目标网格
            .unsqueeze::<4>()
            .repeat(&[3, self.config.num_boxes]) // 扩展到所有框
            .bitwise_or((ious.lower_elem(0.5)).int()); // 或IoU<0.5的框
        
        (obj_mask, noobj_mask)
    }
    
    /// 计算IoU (交并比)
    fn calculate_iou(
        &self,
        pred_boxes: &Tensor<B, 5>,  // [batch, S, S, B, 4]
        target_boxes: &Tensor<B, 5>, // [batch, S, S, 1, 4]
    ) -> Tensor<B, 4> { //[batch, S, S, B]
        let [batch_size, grid_h, grid_w, num_boxes, _] = pred_boxes.dims();
        assert_eq!(grid_h,grid_w);
        let s = grid_h;
        
        // 提取坐标
        let pred_xy = pred_boxes.clone()
            .slice([0..batch_size, 0..s, 0..s, 0..num_boxes, 0..2]);
        let pred_wh = pred_boxes.clone()
            .slice([0..batch_size, 0..s, 0..s, 0..num_boxes, 2..4]);
        
        let target_xy = target_boxes.clone()
            .slice([0..batch_size, 0..s, 0..s, 0..num_boxes, 0..2]);
        let target_wh = target_boxes.clone()
            .slice([0..batch_size, 0..s, 0..s, 0..num_boxes, 2..4]);

        // 计算预测框的左上角和右下角
        let pred_left_top = pred_xy.clone() - pred_wh.clone() / 2.0;
        let pred_right_bottom = pred_xy.clone() + pred_wh.clone() / 2.0;
        
        // 计算目标框的左上角和右下角
        let target_left_top = target_xy.clone() - target_wh.clone() / 2.0;
        let target_right_bottom = target_xy.clone() + target_wh.clone() / 2.0;
        
        // 计算交集区域的左上角和右下角
        let inter_left_top = pred_left_top.clone().max_pair(target_left_top.clone());
        let inter_right_bottom = pred_right_bottom.clone().min_pair(target_right_bottom.clone());
        
        // 计算交集区域的宽高
        let inter_wh = (inter_right_bottom - inter_left_top).clamp(0.0, f32::MAX);
        
        // 计算交集面积
        let inter_area = inter_wh.clone().slice([0..batch_size, 0..s, 0..s, 0..num_boxes, 0..1]) * 
            inter_wh.clone().slice([0..batch_size, 0..s, 0..s, 0..num_boxes, 1..2]);
        
        // 计算预测框和目标框的面积
        let pred_area = pred_wh.clone().slice([0..batch_size, 0..s, 0..s, 0..num_boxes, 0..1]) * 
            pred_wh.clone().slice([0..batch_size, 0..s, 0..s, 0..num_boxes, 1..2]);
        let target_area = target_wh.clone().slice([0..batch_size, 0..s, 0..s, 0..num_boxes, 0..1]) * 
            target_wh.clone().slice([0..batch_size, 0..s, 0..s, 0..num_boxes, 1..2]);
        
        // 计算并集面积
        let union_area = pred_area + target_area - inter_area.clone();
        
        // IoU = 交集面积 / 并集面积
        let iou = (inter_area / (union_area + 1e-6)).squeeze::<4>(4);

        iou
    }

    fn coordinate_loss(
        &self,
        pred_boxes: Tensor<B, 5>,  // [batch, S, S, B, 4] - (x, y, w, h)
        target_boxes: Tensor<B, 5>, // [batch, S, S, 1, 4] - 真实框
        obj_mask: Tensor<B, 4, Int>, // [batch, S, S, B] - 目标存在掩码
        lambda_coord: f32,
    ) -> Tensor<B, 1> {
        // 提取坐标
        let pred_xy = pred_boxes.clone()
            .slice([0..self.config.batch_size, 0..self.config.grid_size, 0..self.config.grid_size, 0..self.config.num_boxes, 0..2]); // x, y
        let pred_wh = pred_boxes
            .slice([0..self.config.batch_size, 0..self.config.grid_size, 0..self.config.grid_size, 0..self.config.num_boxes, 2..4]); // w, h
        
        let target_xy = target_boxes.clone()
            .slice([0..self.config.batch_size, 0..self.config.grid_size, 0..self.config.grid_size, 0..self.config.num_boxes, 0..2]);
        let target_wh = target_boxes
            .slice([0..self.config.batch_size, 0..self.config.grid_size, 0..self.config.grid_size, 0..self.config.num_boxes, 2..4]);
        
        // 坐标损失 (x, y)
        let xy_loss = MseLoss::default().forward(pred_xy, target_xy, burn::nn::loss::Reduction::Mean);
        
        // 宽高损失 - 使用平方根减轻大小框差异
        let wh_loss = MseLoss::default().forward(
            pred_wh.powf_scalar(0.5), 
            target_wh.powf_scalar(0.5), 
            burn::nn::loss::Reduction::Mean
        );
        
        // 只计算有目标的损失
        let _num_objects = obj_mask.clone().float().sum().clamp(1.0, f32::MAX);
        
        (xy_loss + wh_loss) * obj_mask.clone().float().mean() * lambda_coord
    }

    fn confidence_loss(
        &self,
        pred_confidence: Tensor<B, 4>,  // [batch, S, S, B]
        target_confidence: Tensor<B, 4>, // [batch, S, S, 1]
        obj_mask: Tensor<B, 4, Int>,    // 有目标掩码
        noobj_mask: Tensor<B, 4, Int>,  // 无目标掩码
        lambda_noobj: f32,
    ) -> Tensor<B, 1> {
        // 有目标的置信度损失
        let obj_confidence_loss = MseLoss::default().forward(
            pred_confidence.clone() * obj_mask.clone().float(),
            target_confidence * obj_mask.float(),
            burn::nn::loss::Reduction::Mean
        );
        
        // 无目标的置信度损失（权重较小）
        let noobj_confidence_loss = MseLoss::default().forward(
            pred_confidence.clone() * noobj_mask.clone().float(),
            Tensor::zeros_like(&pred_confidence) * noobj_mask.float(),
            burn::nn::loss::Reduction::Mean
        ) * lambda_noobj;
        
        obj_confidence_loss + noobj_confidence_loss
    }

    fn class_loss(
        &self,
        pred_classes: Tensor<B, 4>,     // [batch, S, S, C]
        target_classes: Tensor<B, 4>,   // [batch, S, S, C]
        obj_mask: Tensor<B, 4, Int>,    // [batch, S, S, B] - 框级别有目标掩码
    ) -> Tensor<B, 1> {
        // 转换为网格级别掩码（只要有框有目标，该网格就有目标）
        let grid_obj_mask = obj_mask.sum_dim(3).greater_elem(0).int(); // [batch, S, S]
        
        // 只计算有目标网格的类别损失
        let class_loss = MseLoss::default().forward_no_reduction(
            pred_classes.clone(), 
            target_classes.clone(), 
        ) * grid_obj_mask.clone().unsqueeze::<4>().float();
        
        // 平均损失
        let obj_count = grid_obj_mask.float().sum().clamp(1.0, f32::MAX);
        class_loss.sum() / obj_count
    }
    
    // 各子损失函数实现...
}