import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F


# 忽略警告消息
warnings.filterwarnings('ignore')

# 类别数量（使用自己的数据集时需要更改）
CLASS_NUM = 20


class PLNLoss(nn.Module):
    """
    点链接网络(Point Linking Network)的损失函数
    
    计算PLN网络预测结果与标签之间的损失，包括：
    1. 点存在性损失(p_loss)：检测点是否存在的损失
    2. 坐标损失(coord_loss)：点坐标位置的预测损失
    3. 链接损失(link_loss)：点之间链接关系的预测损失
    4. 类别损失(class_loss)：物体类别预测的损失
    5. 负样本损失(noobj_loss)：没有目标的位置的预测损失
    """
    
    def __init__(self, S=14, B=2, w_class=0.0000001, w_coord=1.0, w_link=0.01):
        """
        初始化PLN损失函数
        
        参数:
            S (int): 网格大小，默认为14
            B (int): 每个网格预测的边界框数量，默认为2
            w_class (float): 类别损失权重
            w_coord (float): 坐标损失权重
            w_link (float): 链接损失权重
        """
        super(PLNLoss, self).__init__()
        self.S = S              # 网格大小，默认为14
        self.B = B              # 边界框数量，默认为2
        self.w_class = w_class  # 类别损失权重
        self.w_coord = w_coord  # 坐标损失权重
        self.w_link = w_link    # 链接损失权重
        self.classes = CLASS_NUM  # 类别数量
        self.noobj_scale = 0.1
        
        # 特征维度
        self.feature_size = 51  # 每个点的特征维度 (1+2+28+20)
        self.num_points = 4     # 点的数量（2个中心点和2个角点）

    def forward(self, pred_tensor, target_tensor):
        """
        前向传播，计算损失
        
        参数:
            pred_tensor (Tensor): 预测张量，形状为(batch_size, 204*4, 14, 14)
            target_tensor (Tensor): 目标张量，形状为(batch_size, 4, 14, 14, 204)
            
        返回:
            tuple: (total_loss, loss_p, loss_coord, loss_class)
        """
        # 处理嵌套元组的情况 - 递归展开直到找到张量
        def extract_tensor(obj):
            if isinstance(obj, torch.Tensor):
                return obj
            elif isinstance(obj, (tuple, list)) and len(obj) > 0:
                for item in obj:
                    result = extract_tensor(item)
                    if result is not None:
                        return result
            return None
        
        # 暴力提取方法 - 逐层展开元组直到找到张量
        def extract_first_tensor(obj, max_depth=10):
            current = obj
            for depth in range(max_depth):
                if isinstance(current, torch.Tensor):
                    return current
                elif isinstance(current, (tuple, list)) and len(current) > 0:
                    current = current[0]
                else:
                    break
            return current if isinstance(current, torch.Tensor) else None
        
        extracted_tensor = extract_tensor(pred_tensor)
        if extracted_tensor is None:
            extracted_tensor = extract_first_tensor(pred_tensor)
        
        if extracted_tensor is None:
            raise ValueError("Could not extract tensor from prediction output")
        
        pred_tensor = extracted_tensor
        
        # 处理5D张量的情况 - 可能是 [batch, scales, channels, height, width]
        if len(pred_tensor.shape) == 5:
            pred_tensor = pred_tensor[:, 0, :, :, :]  # 选择第一个尺度
        elif len(pred_tensor.shape) == 3:
            pred_tensor = pred_tensor.unsqueeze(0)
        elif len(pred_tensor.shape) != 4:
            if pred_tensor.numel() > 0:
                batch_size = target_tensor.shape[0] if len(target_tensor.shape) > 0 else 1
                total_elements = pred_tensor.numel()
                remaining_elements = total_elements // batch_size
                spatial_size = 14 * 14
                channels = remaining_elements // spatial_size
                
                if channels * spatial_size * batch_size == total_elements:
                    pred_tensor = pred_tensor.view(batch_size, channels, 14, 14)
                else:
                    raise ValueError(f"Cannot reshape tensor with {total_elements} elements to proper 4D format")
            
        # 确保是4D张量
        if len(pred_tensor.shape) != 4:
            raise ValueError(f"Expected 4D tensor after processing, got {pred_tensor.shape}")
            
        # 设备信息
        device = pred_tensor.device
        batch_size = pred_tensor.size(0)
        
        # 调整预测张量的形状以匹配目标张量的布局
        # 从(batch_size, 204*4, 14, 14)变为(batch_size, 14, 14, 204*4)
        pred_tensor_adjusted = pred_tensor.permute(0, 2, 3, 1)  # 调整维度顺序
        
        # 调整目标张量的形状
        # 从(batch_size, 4, 14, 14, 204)变为(batch_size, 14, 14, 204*4)
        target_tensor_adjusted = target_tensor.permute(0, 2, 3, 1, 4).contiguous().view(batch_size, self.S, self.S, -1)
        
        # 将张量拆分为4个点的特征
        p_loss = 0
        coord_loss = 0
        link_loss = 0
        class_loss = 0
        noobj_loss = 0
        
        # 分别处理4个点的特征
        for i in range(4):
            # 提取当前点的特征
            start_idx = i * 204  # 每个分支204维
            end_idx = (i + 1) * 204
            
            # 提取当前分支的所有特征 (batch_size, 14, 14, 204)
            pred_branch = pred_tensor_adjusted[:, :, :, start_idx:end_idx]
            target_branch = target_tensor_adjusted[:, :, :, start_idx:end_idx]
            
            # 分别处理该分支的4个点（2个中心点+2个角点）
            for j in range(4):
                point_start = j * self.feature_size  # 51
                point_end = (j + 1) * self.feature_size
                
                # 提取当前点的所有特征 (batch_size, 14, 14, 51)
                pred_point = pred_branch[:, :, :, point_start:point_end]
                target_point = target_branch[:, :, :, point_start:point_end]
                
                # 获取置信度通道（第一个通道）(batch_size, 14, 14)
                pred_conf = pred_point[:, :, :, 0]
                target_conf = target_point[:, :, :, 0]
                
                # 提取负样本掩码 (batch_size, 14, 14)
                neg_mask = target_conf == 0
                
                # 计算无目标损失
                if neg_mask.any():
                    # 只对负样本计算置信度损失 (batch_size, 14, 14)
                    squared_error = (pred_conf - target_conf) ** 2
                    masked_error = squared_error * neg_mask.float()
                    noobj_loss += masked_error.sum()
                
                # 提取正样本掩码 (batch_size, 14, 14)
                pos_mask = ~neg_mask
                
                if pos_mask.any():
                    # 提取有目标的置信度
                    pred_conf_pos = pred_conf[pos_mask]   # (n_positive,)
                    target_conf_pos = target_conf[pos_mask]   # (n_positive,)
                    
                    # 计算点存在性损失
                    p_loss += F.mse_loss(pred_conf_pos, target_conf_pos, reduction='sum')
                    
                    # 提取有目标的坐标 (2个坐标通道)
                    for k in range(2):
                        pred_coord = pred_point[:, :, :, k+1]   # (batch_size, 14, 14)
                        target_coord = target_point[:, :, :, k+1]   # (batch_size, 14, 14)
                        pred_coord_pos = pred_coord[pos_mask]   # (n_positive,)
                        target_coord_pos = target_coord[pos_mask]   # (n_positive,)
                        coord_loss += F.mse_loss(pred_coord_pos, target_coord_pos, reduction='sum')
                    
                    # 提取有目标的链接特征 (28个链接通道)
                    for k in range(28):
                        pred_link = pred_point[:, :, :, k+3]   # (batch_size, 14, 14)
                        target_link = target_point[:, :, :, k+3]   # (batch_size, 14, 14)
                        pred_link_pos = pred_link[pos_mask]   # (n_positive,)
                        target_link_pos = target_link[pos_mask]   # (n_positive,)
                        link_loss += F.mse_loss(pred_link_pos, target_link_pos, reduction='sum')
                    
                    # 提取有目标的类别特征 (20个类别通道) - 使用MSE损失但降低权重
                    for k in range(20):
                        pred_class = pred_point[:, :, :, k+31]   # (batch_size, 14, 14)
                        target_class = target_point[:, :, :, k+31]   # (batch_size, 14, 14)
                        pred_class_pos = pred_class[pos_mask]   # (n_positive,)
                        target_class_pos = target_class[pos_mask]   # (n_positive,)
                        class_loss += F.mse_loss(pred_class_pos, target_class_pos, reduction='sum')
        
        # 如果没有检测到点，返回零损失
        if p_loss == 0 and coord_loss == 0 and link_loss == 0 and class_loss == 0:
            zero_tensor = torch.tensor(0.0, device=device)
            return zero_tensor, zero_tensor, zero_tensor, zero_tensor
        
        # 计算带权重的损失
        weighted_coord_loss = self.w_coord * coord_loss
        weighted_class_loss = self.w_class * class_loss
        weighted_link_loss = self.w_link * link_loss
        weighted_noobj_loss = self.noobj_scale * noobj_loss
        
        # 组合总损失
        total_loss = (
            p_loss + 
            weighted_coord_loss + 
            weighted_class_loss + 
            weighted_link_loss + 
            weighted_noobj_loss
        )
        
        return total_loss, p_loss, coord_loss, class_loss


# 为了兼容原始YOLO接口，创建一个包装器
class YoloV3Loss(nn.Module):
    """
    兼容YOLOv3接口的PLN损失函数包装器
    """
    def __init__(self, anchors, strides, iou_threshold_loss=0.5):
        super(YoloV3Loss, self).__init__()
        # 这些参数保留用于兼容性，但实际使用PLN损失
        self.pln_loss = PLNLoss()
        
    def forward(self, pred_tensor, target_tensor):
        """
        兼容YOLOv3接口的前向传播
        
        参数:
            pred_tensor: PLN网络的预测输出
            target_tensor: PLN格式的目标标签
            
        返回:
            tuple: (loss, loss_coord, loss_conf, loss_cls) - 兼容原始接口
        """
        total_loss, p_loss, coord_loss, class_loss = self.pln_loss(pred_tensor, target_tensor)
        
        # 返回格式与原始YOLOv3Loss兼容
        return total_loss, coord_loss, p_loss, class_loss