import paddle
import numpy as np

class ConvUnit(paddle.nn.Layer):
    def __init__(self, in_ch, out_ch, kernel_size=1, stride=1, padding='same'):
        """
        初始化卷积单元，H/W=(H/W+2*P-F)/S+1
        params:
        - in_ch      : 输入通道
        - out_ch     : 输出通道
        - kernel_size: 卷积大小
        - stride     : 滑动步长
        - padding    : 填充大小
        """
        super().__init__()
        
        # 添加卷积结构
        self.conv = paddle.nn.Conv2D(in_channels=in_ch, 
                                     out_channels=out_ch, 
                                     kernel_size=kernel_size, 
                                     stride=stride, 
                                     padding=padding,
                                     weight_attr=paddle.ParamAttr(
                                         initializer=paddle.nn.initializer.Normal(0, 0.02),
                                         regularizer=None), # 设置权重参数
                                     bias_attr=False)       # 设置偏置参数
        
        # 添加正则方法
        self.batch_norm = paddle.nn.BatchNorm2D(num_features=out_ch,
                                                weight_attr=paddle.ParamAttr(
                                                    initializer=paddle.nn.initializer.Normal(0, 0.02),
                                                    regularizer=paddle.regularizer.L2Decay(0)), # 设置权重参数
                                                bias_attr=paddle.ParamAttr(
                                                    initializer=paddle.nn.initializer.Constant(0),
                                                    regularizer=paddle.regularizer.L2Decay(0))) # 设置偏置参数
        
        # 添加激活函数
        self.leaky_relu = paddle.nn.LeakyReLU(0.1)
        
    def forward(self, x):
        """
        对输入特征进行卷积
        params:
        - x: 输入特征
        return:
        - x: 输出特征
        """
        x = self.conv(x)       # 进行卷积
        x = self.batch_norm(x) # 进行正则
        x = self.leaky_relu(x) # 进行激活
        
        return x

class DownSample(paddle.nn.Layer):
    def __init__(self, in_ch, out_ch):
        """
        初始化向下采样
        params:
        - in_ch  : 输入通道
        - out_ch : 输出通道
        """
        super().__init__()
        
        # 添加卷积
        self.conv = ConvUnit(in_ch, out_ch, kernel_size=3, stride=2, padding=1)
        
    def forward(self, x):
        """
        对输入特征进行向下采样
        params:
        - x: 输入特征
        return:
        - x: 输出特征
        """
        x = self.conv(x)
        
        return x
    
class BasicBlock(paddle.nn.Layer):
    def __init__(self, in_ch, out_ch):
        """
        初始化基础残差模块
        params:
        - in_ch : 输入通道
        - out_ch: 输出通道
        """
        super().__init__()
        
        # 添加卷积
        self.conv1 = ConvUnit(in_ch, out_ch, kernel_size=1, stride=1, padding=0)
        self.conv2 = ConvUnit(out_ch, out_ch*2, kernel_size=3, stride=1, padding=1)
        
    def forward(self, x):
        """
        对输入特征进行卷积
        params:
        - x: 输入特征
        return:
        - x: 输出特征
        """
        # 进行卷积
        y = self.conv1(x)
        y = self.conv2(y)
        
        # 残差连接
        x = paddle.add(x=x, y=y)
        
        return x
    
class BlockGroup(paddle.nn.Layer):
    def __init__(self, in_ch, out_ch, num_blocks):
        """
        初始化基础残差块组
        params:
        - in_ch     : 输入通道
        - out_ch    : 输出通道
        - num_blocks: 每组块数
        """
        super().__init__()
        
        # 添加第一个残差模块
        self.basicblock0 = BasicBlock(in_ch, out_ch)
        
        # 添加剩余的残差模块
        self.block_list = [] # 基础模块列表
        for i in range(1, num_blocks):
            block_item = self.add_sublayer(
                'block_' + str(i),
                BasicBlock(in_ch, out_ch))
            self.block_list.append(block_item)
            
    def forward(self, x):
        """
        对输入特征进行卷积
        params:
        - x: 输入特征
        return:
        - x: 输出特征
        """
        x = self.basicblock0(x)
        
        for block_item in self.block_list:
            x = block_item(x)
            
        return x
    
class DarkNet53(paddle.nn.Layer):
    def __init__(self):
        """
        初始化骨干网络结构
        """
        super().__init__()
        
        # 输入卷积采样
        self.conv0 = ConvUnit(in_ch=3, out_ch=32, kernel_size=3, stride=1, padding=1)
        self.downsample0 = DownSample(in_ch=32, out_ch=64)
        
        # 基础残差块组
        self.num_groups = [1, 2, 8, 8, 4] # 每组的模块数
        self.group_list = [] # 残差块组列表
        for i, num_blocks in enumerate(self.num_groups):
            group_item = self.add_sublayer(
                'group_' + str(i),
                BlockGroup(in_ch=32*(2**(i+1)), out_ch=32*(2**i), num_blocks=num_blocks))
            self.group_list.append(group_item)
            
        # 向下采样块组
        self.downs_list = [] # 向下采样列表
        for i in range(len(self.num_groups) - 1):
            downs_item = self.add_sublayer(
                'downs_' + str(i),
                DownSample(in_ch=32*(2**(i+1)), out_ch=32*(2**(i+2))) )
            self.downs_list.append(downs_item)
        
    def forward(self, x):
        """
        对输入特征进行卷积
        params:
        - x     : 输入特征
        return:
        - c_list: 输出特征列表：[c0, c1, c2]
        """
        # 提取特征图像
        x = self.conv0(x)
        x = self.downsample0(x)
        
        # 输出特征图像
        c_list = [] # 输出特征列表
        for i, group_item in enumerate(self.group_list):
            # 提取特征图像
            x = group_item(x) # 提取特征图像
            c_list.append(x)  # 添加输出列表
            
            # 向下采样特征
            if i < len(self.num_groups) - 1:
                x = self.downs_list[i](x)
            
        return c_list[-1:-4:-1]
    
####################################################################################

class YOLOv3Detector(paddle.nn.Layer):
    def __init__(self, in_ch, out_ch):
        """
        初始化检测卷积
        params:
        - in_ch : 输入通道
        - out_ch: 输出通道
        """
        super().__init__()
        
        # 添加旁路卷积
        conv_arch = [[in_ch   , out_ch  , 1], 
                     [out_ch  , out_ch*2, 3],
                     [out_ch*2, out_ch  , 1],
                     [out_ch  , out_ch*2, 3],
                     [out_ch*2, out_ch  , 1]]
        self.conv_route = paddle.nn.Sequential()
        for i, (in_ch, out_ch, kernel_size) in enumerate(conv_arch):
            self.conv_route.add_sublayer(
                'conv_' + str(i),
                ConvUnit(in_ch, out_ch, kernel_size)
            )
        
        # 添加输出卷积
        self.conv_tip = ConvUnit(in_ch=out_ch, out_ch=out_ch*2, kernel_size=3)
        
    def forward(self, x):
        """
        对输入特征进行卷积
        params:
        - x    : 输入特征
        return:
        - route: 旁路特征
        - tip  : 输出特征
        """
        route = self.conv_route(x) # 旁路特征
        tip = self.conv_tip(route) # 输出特征
        
        return route, tip

class YOLOv3FPN(paddle.nn.Layer):
    def __init__(self, channels=[1024, 512, 256]):
        """
        初始化检测颈部
        params:
        - channels: 通道数量列表
        """
        super().__init__()
        
        # 添加检测卷积
        self.num_detector = len(channels) # 检测器数
        self.dete_list = [] # 检测列表
        self.rout_list = [] # 旁路列表
        
        for i in range(self.num_detector):
            # 添加检测列表
            dete_item = self.add_sublayer(
                'dete_' + str(i),
                YOLOv3Detector(channels[i] if i==0 else channels[i]+channels[i]//2, channels[i]//2)
            )
            self.dete_list.append(dete_item)
            
            # 添加旁路列表
            if i < self.num_detector - 1:
                rout_item = self.add_sublayer(
                    'rout_' + str(i),
                    ConvUnit(channels[i]//2, channels[i]//4)
                )
                self.rout_list.append(rout_item)
        
        # 添加向上采样
        self.upsample = paddle.nn.Upsample(scale_factor=2.0) # 两倍向上采样
        
    def forward(self, c_list):
        """
        对输入特征进行卷积
        params:
        - c_list: 输入特征列表：[c0, c1, c2]
        return:
        - t_list: 输出特征列表：[t0, t1, t2]
        """
        t_list = [] # 输出特征列表
        for i, c_item in enumerate(c_list):
            # 连接向上采样
            if i > 0: # 如果不是c0, 则用输出连接route
                c_item = paddle.concat([route, c_item], axis=1) # 按通道数连接
            
            # 输出检测结果
            route, tip = self.dete_list[i](c_item) # 输出检测特征
            t_list.append(tip) # 添加输出特征
            
            # 进行向上采样
            if i < self.num_detector - 1:
                route = self.rout_list[i](route) # 提取采样特征
                route = self.upsample(route) # 放大采样特征
        
        return t_list

####################################################################################

class YOLOv3Head(paddle.nn.Layer):
    def __init__(self, 
                 num_classes=20,
                 anchor_mask=[[6, 7, 8], [3, 4, 5], [0, 1, 2]],
                 channels=[1024, 512, 256]):
        """
        初始化检测头部
        params:
        - num_classes: 物体类别数量
        - anchor_mask: 锚框掩码列表
        - channels   : 通道数量列表
        """
        super().__init__()
        
        # 添加输出卷积
        self.conv_list = [] # 输出卷积列表
        for i in range(len(anchor_mask)):
            conv_item = self.add_sublayer(
                'conv_' + str(i),
                paddle.nn.Conv2D(
                    in_channels=channels[i],
                    out_channels=len(anchor_mask[i]) * (num_classes + 5),
                    kernel_size=1,
                    stride=1,
                    padding=0,
                    weight_attr=paddle.ParamAttr(
                        initializer=paddle.nn.initializer.Normal(0, 0.02),
                        regularizer=None),                           # 设置权重参数
                    bias_attr=paddle.ParamAttr(
                        initializer=paddle.nn.initializer.Constant(0),
                        regularizer=paddle.regularizer.L2Decay(0)))) # 设置偏置参数
            self.conv_list.append(conv_item)
    
    def forward(self, t_list):
        """
        对输入特征进行卷积
        params:
        - t_list: 输入特征列表：[t0, t1, t2]
        return:
        - p_list: 输出特征列表：[p0, p1, p2]
        """
        p_list = [] # 输出特征列表
        for i, t_item in enumerate(t_list):
            p_item = self.conv_list[i](t_item) # 输出卷积特征
            p_list.append(p_item) # 添加输出列表
            
        return p_list
    
####################################################################################

class YOLOv3(paddle.nn.Layer):
    def __init__(self,
                 num_classes     =20,
                 anchor_size     =[[10, 13], [16, 30], [33, 23],
                                   [30, 61], [62, 45], [59, 119],
                                   [116, 90], [156, 198], [373, 326]],
                 anchor_mask     =[[6, 7, 8], [3, 4, 5], [0, 1, 2]],
                 ignore_threshold=0.70,
                 downsample_ratio=[32, 16, 8],
                 score_threshold =0.70,
                 nms_threshold   =0.45):
        """
        初始化检测网络
        params:
        - num_classes     : 物体类别数量
        - anchor_size     : 锚框大小列表
        - anchor_mask     : 锚框掩码列表
        - ignore_threshold: 物体忽略阈值
        - downsample_ratio: 向下采样倍数
        - score_threshold : 预测得分阈值
        - nms_threshold   : 非极大值阈值
        """
        super().__init__()
        
        # 设置网络参数
        self.num_classes = num_classes           # 物体类别
        self.anchor_size = anchor_size           # 锚框大小
        self.anchor_mask = anchor_mask           # 锚框掩码
        self.ignore_threshold = ignore_threshold # 忽略阈值
        self.downsample_ratio = downsample_ratio # 下采样率
        self.score_threshold = score_threshold   # 得分阈值
        self.nms_threshold = nms_threshold       # 抑制阈值
        
        # 添加网络结构
        self.backbone = DarkNet53()         # 骨干网络
        self.neck = YOLOv3FPN()             # 检测颈部
        self.head = YOLOv3Head(num_classes) # 检测头部
        
    def forward(self, images):
        """
        提取卷积特征
        params:
        - images: 输入图像
        return:
        - p_list: 预测特征
        """
        c_list = self.backbone(images) # 提取骨干网络特征
        t_list = self.neck(c_list)     # 提取检测颈部特征
        p_list = self.head(t_list)     # 提取检测头部特征
        
        return p_list

####################################################################################
    
    def losses(self, p_list, inputs):
        """
        计算批次损失总和
        params:
        - p_list: 预测特征
        - inputs: 输入数据
        return:
        - losses: 批次预测损失总和
        """
        # 计算预测标签
        l_list = self.get_predict_label(inputs)
        
        # 计算负例掩码
        n_list = self.get_negative_mask(p_list, inputs)

        # 计算损失总和
        losses = 0 # 预测损失总和
        for infer, label, nmask in zip(p_list, l_list, n_list): # 遍历预测特征
            loss = self.get_loss(infer, label, nmask) # 计算预测损失
            losses += loss # 累加预测损失
            
        return losses
    
    def get_predict_label(self, inputs):
        """
        计算预测标签
        params:
        - inputs: 输入数据
        return:
        - l_list: 预测标签
        """
        # 读取输入数据
        image = inputs['image'].numpy() # 图像数据
        gtcls = inputs['gtcls'].numpy() # 物体类别
        gtbox = inputs['gtbox'].numpy() # 物体边框
        
        # 计算预测标签
        l_list = [] # 预测标签列表
        for anchor_mask, downsample_ratio in zip(self.anchor_mask, self.downsample_ratio): # 遍历采样
            # 构造预测标签
            batch_szie = image.shape[0]        # 图像批数
            num_anchor = len(anchor_mask)      # 锚框长度
            h = image.shape[2]                 # 图像高度
            w = image.shape[3]                 # 图像宽度
            grid_h = int(h / downsample_ratio) # 网格高度
            grid_w = int(w / downsample_ratio) # 网格宽度
            
            label = paddle.zeros([batch_szie, num_anchor, 6+self.num_classes, grid_h, grid_w], dtype='float32') # 预测标签
            label.stop_gradient = True # 停止梯度传播
            
            # 计算预测标签
            for m, (cls, box) in enumerate(zip(gtcls, gtbox)): # 遍历批次
                for c, b in zip(cls, box): # 遍历边框
                    # 获取边框坐标
                    gx, gy, gw, gh = b # 设置边框坐标
                    if gw <= 0. or gh <= 0.: # 是否存在物体
                        continue
                    
                    # 获取匹配边框
                    max_iou = 0. # 最大交并比
                    max_idx = -1 # 最大值索引
                    for n, anchor_size in enumerate(self.anchor_size): # 遍历锚框
                        box1 = [0., 0., gw, gh]                             # xyxy格式边框
                        box2 = [0., 0., anchor_size[0]/w, anchor_size[1]/h] # xyxy格式锚框
                        
                        iou = self.get_iou_xyxy(box1, box2) # 计算交并比值
                        if iou > max_iou: # 是否更新交并比
                            max_iou = iou # 更新最大交并比
                            max_idx = n   # 更新最大值索引
                            
                    # 设置预测标签
                    if max_idx in anchor_mask: # 是否存在索引
                        # 设置索引坐标
                        k = anchor_mask.index(max_idx) # 锚框位索引k
                        i = int(gx * grid_w)           # 网格列坐标i
                        j = int(gy * grid_h)           # 网格行坐标j
                        
                        # 设置位置标签
                        label[m, k, 0, j, i] = gx * grid_w - i # 位置标签dx=sigmoid(tx)=gtx-cx
                        label[m, k, 1, j, i] = gy * grid_h - j # 位置标签dy=sigmoid(ty)=gty-cy
                        
                        label[m, k, 2, j, i] = np.log(gw * w / self.anchor_size[max_idx][0]) # 位置标签tw=log(gtw/pw)
                        label[m, k, 3, j, i] = np.log(gh * h / self.anchor_size[max_idx][1]) # 位置标签th=log(gth/ph)
                        
                        # 设置位置权重
                        label[m, k, 4, j, i] = 2. - gw * gh # 调节不同尺寸锚框对损失函数的贡献，作为加权系数和位置损失函数相乘
                        
                        # 设置物体标签
                        label[m, k, 5, j, i] = 1.
                        
                        # 设置类别标签
                        label[m, k, 6+c, j, i] = 1.
                        
            # 添加标签列表
            l_list.append(label)
            
        return l_list
    
    def get_iou_xyxy(self, box1, box2):
        """
        计算交并比值
        params:
        - box1: 物体边框1，ndarray类型
        - box2: 物体边框2，ndarray类型
        return:
        - iou : 交并比值
        """
        # 计算交集面积
        x_min = max(box1[0], box2[0]) # 边框1与边框2的x1坐标
        y_min = max(box1[1], box2[1]) # 边框1与边框2的y1坐标
        x_max = min(box1[2], box2[2]) # 边框1与边框2的x2坐标
        y_max = min(box1[3], box2[3]) # 边框1与边框2的y2坐标
        
        intersection = np.maximum(x_max - x_min, 0) * np.maximum(y_max - y_min, 0) # 边框1与边框2的交集面积
        
        # 计算并集面积
        s1 = np.maximum(box1[2] - box1[0], 0) * np.maximum(box1[3] - box1[1], 0) # 边框1的面积
        s2 = np.maximum(box2[2] - box2[0], 0) * np.maximum(box2[3] - box2[1], 0) # 边框2的面积

        union = s1 + s2 - intersection + 1e-9 # 边框1与边框2的并集面积
        
        # 计算交并比值
        iou = intersection / union
        
        return iou
       
    def get_negative_mask(self, p_list, inputs):
        """
        计算负例掩码
        params:
        - p_list: 预测特征
        - inputs: 输入数据
        return:
        - n_list: 负例掩码
        """
        # 读取输入数据
        image = inputs['image'] # 图像数据
        gtbox = inputs['gtbox'] # 物体边框
        
        # 计算负例掩码
        n_list = [] # 负样本码列表
        for i, (anchor_mask, downsample_ratio) in enumerate(zip(self.anchor_mask, self.downsample_ratio)): # 遍历采样
            # 构造预测边框
            infer = p_list[i]             # 预测特征
            batch_szie = infer.shape[0]   # 特征批数
            num_anchor = len(anchor_mask) # 锚框长度
            grid_h = infer.shape[2]       # 网格高度
            grid_w = infer.shape[3]       # 网格宽度
            h = grid_h * downsample_ratio # 图像高度
            w = grid_w * downsample_ratio # 图像宽度
            
            infer = infer.reshape([batch_szie, num_anchor, -1, grid_h, grid_w]) # 变换特征形状
            pdloc = infer[:, :, 0:4, :, :]                                      # 获取预测位置
            pdbox = paddle.zeros(pdloc.shape, dtype=pdloc.dtype)                # 构造预测边框
            
            # 计算预测边框
            tx = pdloc[:, :, 0, :, :] # 预测位置tx
            ty = pdloc[:, :, 1, :, :] # 预测位置ty
            tw = pdloc[:, :, 2, :, :] # 预测位置tw
            th = pdloc[:, :, 3, :, :] # 预测位置th
            
            yv, xv = paddle.meshgrid([paddle.arange(grid_h), paddle.arange(grid_w)])         # 计算坐标矩阵
            xy = paddle.stack([xv, yv]).cast(pdloc.dtype).reshape([1 ,1, 2, grid_h, grid_w]) # 变换坐标形状
            cx = xy[:, :, 0, :, :] # 预测边框cx
            cy = xy[:, :, 1, :, :] # 预测边框cy
            
            anchor = [self.anchor_size[j] for j in anchor_mask]                               # 获取锚框宽高
            wh = paddle.to_tensor(anchor).cast(pdloc.dtype).reshape([1, num_anchor, 2, 1, 1]) # 变换锚框形状
            pw = wh[:, :, 0, :, :] # 预测边框pw
            ph = wh[:, :, 1, :, :] # 预测边框ph
            
            pdbox[:, :, 0, :, :] = (cx + paddle.nn.functional.sigmoid(tx)) / grid_w # 预测边框x=cx + sigmoid(tx)
            pdbox[:, :, 1, :, :] = (cy + paddle.nn.functional.sigmoid(ty)) / grid_h # 预测边框y=cy + sigmoid(ty)
            pdbox[:, :, 2, :, :] = (pw * paddle.exp(tw)) / w                        # 预测边框w=pw * exp(tw)
            pdbox[:, :, 3, :, :] = (ph * paddle.exp(th)) / h                        # 预测边框h=ph * exp(th)
            
            pdbox = pdbox.transpose([0, 1, 3, 4, 2]).reshape([batch_szie, -1, 4])   # 变换边框形状[b,m,4]
       
            # 计算负例掩码
            iou = self.get_iou_xywh(pdbox, gtbox) # 计算交并比值，形状为[b,m,50]
            iou_max = iou.max(axis=2) # 计算50个边框中的最大交并比值，形状为[b,m]
            
            nmask = paddle.cast(iou_max <= self.ignore_threshold, dtype=pdloc.dtype) # 计算负例掩码
            nmask = nmask.reshape([batch_szie, num_anchor, grid_h, grid_w])          # 变换掩码形状，[b,a,h,w]
            nmask.stop_gradient = True                                               # 停止梯度传播
            
            # 添加负例掩码
            n_list.append(nmask)
            
        return n_list
    
    def get_iou_xywh(self, box1, box2):
        """
        计算交并比值
        params:
        - box1: 物体边框1，tensor类型
        - box2: 物体边框2，tensor类型
        return:
        - iou : 交并比值
        """
        # 增加边框维度
        box1 = box1.unsqueeze(2) # [b,m ,4]转为[b,m, 1,4]
        box2 = box2.unsqueeze(1) # [b,50,4]转为[b,1,50,4]
        
        # 变换边框坐标
        x1_min = box1[:, :, :, 0] - box1[:, :, :, 2]/2.0 # 边框1的x1坐标
        y1_min = box1[:, :, :, 1] - box1[:, :, :, 3]/2.0 # 边框1的y1坐标
        x1_max = box1[:, :, :, 0] + box1[:, :, :, 2]/2.0 # 边框1的x2坐标
        y1_max = box1[:, :, :, 1] + box1[:, :, :, 3]/2.0 # 边框1的y2坐标

        x2_min = box2[:, :, :, 0] - box2[:, :, :, 2]/2.0 # 边框2的x1坐标
        y2_min = box2[:, :, :, 1] - box2[:, :, :, 3]/2.0 # 边框2的y1坐标
        x2_max = box2[:, :, :, 0] + box2[:, :, :, 2]/2.0 # 边框2的x2坐标
        y2_max = box2[:, :, :, 1] + box2[:, :, :, 3]/2.0 # 边框2的y2坐标
        
        # 计算交集面积
        x_min = paddle.maximum(x1_min, x2_min) # 边框1与边框2的x1坐标
        y_min = paddle.maximum(y1_min, y2_min) # 边框1与边框2的y1坐标
        x_max = paddle.minimum(x1_max, x2_max) # 边框1与边框2的x2坐标
        y_max = paddle.minimum(y1_max, y2_max) # 边框1与边框2的y2坐标

        intersection = (x_max - x_min).clip(0) * (y_max - y_min).clip(0) # 边框1与边框2的交集面积
        
        # 计算并集面积
        s1 = (x1_max - x1_min).clip(0) * (y1_max - y1_min).clip(0) # 边框1的面积
        s2 = (x2_max - x2_min).clip(0) * (y2_max - y2_min).clip(0) # 边框2的面积
        
        union = s1 + s2 - intersection + 1e-9 # 边框1与边框2的并集面积
        
        # 计算交并比值
        iou = intersection / union
        
        return iou
        
    def get_loss(self, infer, label, nmask):
        """
        计算预测损失
        params:
        - infer: 预测特征
        - label: 预测标签
        - nmask: 负例掩码
        return:
        - loss : 损失总和
        """
        # 变换预测特征            
        b, a, c, h, w = label.shape             # 获取标签形状
        infer = infer.reshape([b, a, -1, h, w]) # 变换特征形状
        
        # 获取预测特征
        pd_tx = infer[:, :, 0 , :, :] # 预测位置tx, 二值交叉熵中对预测值求sigmoid，对标签值不求
        pd_ty = infer[:, :, 1 , :, :] # 预测位置ty, 二值交叉熵中对预测值求sigmoid，对标签值不求
        pd_tw = infer[:, :, 2 , :, :] # 预测位置tw
        pd_th = infer[:, :, 3 , :, :] # 预测位置th
        pdobj = infer[:, :, 4 , :, :] # 预测物体概率
        pdcls = infer[:, :, 5:, :, :] # 预测类别概率
        
        # 获取预测标签
        lb_dx = label[:, :, 0 , :, :] # 标签位置dx=sigmoid(tx)
        lb_dy = label[:, :, 1 , :, :] # 标签位置dy=sigmoid(ty)
        lb_tw = label[:, :, 2 , :, :] # 标签位置tw
        lb_th = label[:, :, 3 , :, :] # 标签位置th
        lbscl = label[:, :, 4 , :, :] # 标签位置权重
        lbobj = label[:, :, 5 , :, :] # 标签物体概率
        lbcls = label[:, :, 6:, :, :] # 标签类别概率
        
        # 计算位置损失
        loss_dx = paddle.nn.functional.binary_cross_entropy_with_logits(pd_tx, lb_dx, reduction='none') # 计算位置损失dx
        loss_dy = paddle.nn.functional.binary_cross_entropy_with_logits(pd_ty, lb_dy, reduction='none') # 计算位置损失dy
        loss_tw = paddle.abs(pd_tw - lb_tw) # 计算位置损失tw
        loss_th = paddle.abs(pd_th - lb_th) # 计算位置损失th
        loss_loc = (loss_dx + loss_dy + loss_tw + loss_th) * lbscl * lbobj # 计算位置损失和，只计算正样本
        
        # 计算物体损失
        loss_obj = paddle.nn.functional.binary_cross_entropy_with_logits(pdobj, lbobj, reduction='none') # 计算物体损失
        loss_obj_pos = loss_obj * lbobj # 计算正例损失
        loss_obj_neg = loss_obj * (1 - lbobj) * nmask # 计算负例损失
        loss_obj = loss_obj_pos + loss_obj_neg # 计算物体损失和，正样本和小于阈值的负样本
        
        # 计算类别损失
        loss_cls = paddle.nn.functional.binary_cross_entropy_with_logits(pdcls, lbcls, reduction='none') # 计算类别损失
        loss_cls = loss_cls.sum(axis=2) * lbobj # 计算类别损失和，只计算正样本
        
        # 计算预测损失
        loss = loss_loc + loss_obj + loss_cls # 计算损失总和
        loss = loss.sum(axis=[1, 2, 3]).mean() # 计算平均损失
        
        return loss
    
####################################################################################

    def infers(self, p_list, imghws):
        """
        计算批次预测结果
        params:
        - p_list: 预测特征
        - imghws: 图像高宽
        return:
        - infers: 批次预测结果列表，形状[b,n,6]，b为批数，n为每批预测物体数（每批不一定相同），6为[class,score,x1,y1,x2,y2]
        """
        # 计算预测边框得分
        boxes, score = self.get_boxes_score(p_list, imghws)
        
        # 进行非极大值抑制
        infers = self.nms_boxes_score(boxes, score)
        
        return infers
    
    def get_boxes_score(self, p_list, imghws):
        """
        计算预测边框和得分
        params:
        - p_list: 预测特征
        - imghws: 图像高宽
        return:
        - boxes : 预测边框
        - score : 预测得分
        """
        # 读取高宽维度
        imghw = imghws.unsqueeze(1) # 增加数据维度，[b,2]转为[b,1,2]
        
        # 计算预测结果
        boxes = [] # 预测边框列表
        score = [] # 预测得分列表
        for i, (anchor_mask, downsample_ratio) in enumerate(zip(self.anchor_mask, self.downsample_ratio)): # 遍历采样
            # 构造预测边框
            infer = p_list[i]             # 预测特征
            batch_szie = infer.shape[0]   # 特征批数
            num_anchor = len(anchor_mask) # 锚框长度
            grid_h = infer.shape[2]       # 网格高度
            grid_w = infer.shape[3]       # 网格宽度
            h = grid_h * downsample_ratio # 图像高度
            w = grid_w * downsample_ratio # 图像宽度
            
            infer = infer.reshape([batch_szie, num_anchor, -1, grid_h, grid_w]) # 变换特征形状
            pdloc = infer[:, :, 0:4, :, :]                                      # 获取预测位置
            pdbox = paddle.zeros(pdloc.shape, dtype=pdloc.dtype)                # 构造预测边框
            pdbox.stop_gradient = True                                          # 停止梯度传播
            
            # 计算预测边框
            tx = pdloc[:, :, 0, :, :] # 预测位置tx
            ty = pdloc[:, :, 1, :, :] # 预测位置ty
            tw = pdloc[:, :, 2, :, :] # 预测位置tw
            th = pdloc[:, :, 3, :, :] # 预测位置th
            
            yv, xv = paddle.meshgrid([paddle.arange(grid_h), paddle.arange(grid_w)])         # 计算坐标矩阵
            xy = paddle.stack([xv, yv]).cast(pdloc.dtype).reshape([1 ,1, 2, grid_h, grid_w]) # 变换坐标形状
            cx = xy[:, :, 0, :, :] # 预测边框cx
            cy = xy[:, :, 1, :, :] # 预测边框cy
            
            anchor = [self.anchor_size[j] for j in anchor_mask]                               # 获取锚框宽高
            wh = paddle.to_tensor(anchor).cast(pdloc.dtype).reshape([1, num_anchor, 2, 1, 1]) # 变换锚框形状
            pw = wh[:, :, 0, :, :] # 预测边框pw
            ph = wh[:, :, 1, :, :] # 预测边框ph
            
            pdbox[:, :, 0, :, :] = (cx + paddle.nn.functional.sigmoid(tx)) / grid_w # 预测边框x=cx + sigmoid(tx)
            pdbox[:, :, 1, :, :] = (cy + paddle.nn.functional.sigmoid(ty)) / grid_h # 预测边框y=cy + sigmoid(ty)
            pdbox[:, :, 2, :, :] = (pw * paddle.exp(tw)) / w                        # 预测边框w=pw * exp(tw)
            pdbox[:, :, 3, :, :] = (ph * paddle.exp(th)) / h                        # 预测边框h=ph * exp(th)
            
            # 调整边框形状
            pdbox = pdbox.transpose([0, 1, 3, 4, 2]).reshape([batch_szie, -1, 4]) # [b,m,4]
            
            # 调整坐标格式
            pdbox[:, :, 0] = pdbox[:, :, 0] - pdbox[:, :, 2] / 2.0 # 预测边框x1
            pdbox[:, :, 1] = pdbox[:, :, 1] - pdbox[:, :, 3] / 2.0 # 预测边框y1
            pdbox[:, :, 2] = pdbox[:, :, 0] + pdbox[:, :, 2]       # 预测边框x2
            pdbox[:, :, 3] = pdbox[:, :, 1] + pdbox[:, :, 3]       # 预测边框y2
            
            # 计算原图坐标
            pdbox[:, :, 0] = pdbox[:, :, 0] * imghw[:, :, 1] # 预测边框x1
            pdbox[:, :, 1] = pdbox[:, :, 1] * imghw[:, :, 0] # 预测边框y1
            pdbox[:, :, 2] = pdbox[:, :, 2] * imghw[:, :, 1] # 预测边框x2
            pdbox[:, :, 3] = pdbox[:, :, 3] * imghw[:, :, 0] # 预测边框y2
            
            # 计算预测得分
            pdobj = paddle.nn.functional.sigmoid(infer[:, :, 4:5, :, :]) # 预测物体概率，对损失函数计算结果求sigmoid
            pdcls = paddle.nn.functional.sigmoid(infer[:, :, 5: , :, :]) # 预测类别概率，对损失函数计算结果求sigmoid
            pdsco = pdobj * pdcls                                        # 计算预测得分，预测物体概率*预测类别概率
            pdsco.stop_gradient = True                                   # 停止梯度传播
            
            # 调整得分形状
            pdsco = pdsco.transpose([0, 1, 3, 4, 2]).reshape([batch_szie, -1, self.num_classes]) # [b,m,c]
            
            # 添加结果列表
            boxes.append(pdbox) # 添加边框列表
            score.append(pdsco) # 添加得分列表
        
        # 合并预测结果
        boxes = paddle.concat(boxes, axis=1) # 合并预测边框列表第一维
        score = paddle.concat(score, axis=1) # 合并预测得分列表第一维
        
        return boxes, score
    
    def nms_boxes_score(self, boxes, score):
        """
        对预测边框和得分进行非极大值抑制
        params:
        - boxes : 预测边框
        - score : 预测得分
        return:
        - infers: 批次预测结果列表，形状[b,n,6]，b为批数，n为每批预测物体数（每批不一定相同），6为[label,score,x1,y1,x2,y2]
        """
        # 读取输入数据
        boxes = boxes.numpy()                      # 预测边框，[b,m,4]
        score = score.transpose([0, 2, 1]).numpy() # 预测得分，[b,c,m]
        batch_size = boxes.shape[0]                # 边框批次
        
        # 计算抑制结果
        infers = [] # 预测结果列表
        for i in range(batch_size): # 遍历批次
            # 每批抑制结果
            infer = [] # 每批预测列表
            for j in range(self.num_classes): # 遍历类别
                # 获取抑制索引
                nms_indexes = self.get_nms_indexes(boxes[i], score[i][j]) # 抑制索引列表
                if len(nms_indexes) < 1: # 如果索引列表长度为空，那么计算下一个类别索引
                    continue
                    
                # 设置抑制结果
                nms_score = score[i][j][nms_indexes]                         # 预测得分
                nms_boxes = boxes[i][nms_indexes]                            # 预测边框
                
                nms_infer = np.zeros([len(nms_indexes), 6], dtype='float32') # 预测结果
                nms_infer[:, 0] = j                                          # 预测类别
                nms_infer[:, 1] = nms_score                                  # 预测得分
                nms_infer[:, 2:6] = nms_boxes                                # 预测边框

                # 添加每类结果
                infer.append(nms_infer)
                
            # 添加预测列表
            if len(infer) > 0:
                infer = np.concatenate(infer, axis=0) # 拼接各批预测结果
                infers.append(infer)                  # 添加预测结果列表
            else:
                infers.append(infer)                  # 添加空的预测结果
        
        return infers
    
    def get_nms_indexes(self, boxes, score):
        """
        计算边框和得分的非极大值抑制索引
        params:
        - boxes      : 预测边框
        - score      : 预测得分
        return:
        - nms_indexes: 抑制结果索引列表
        """
        # 获取得分索引
        score_index = np.argsort(score)[::-1] # 对得分逆向排序，获取预测得分索引
        
        # 非极大值抑制
        nms_indexes = [] # 预测索引列表
        while(len(score_index) > 0): # 如果剩余得分索引数量大于0，则进行非极大值抑制
            # 获取最大得分
            max_index = score_index[0]   # 获取最大得分索引
            max_score = score[max_index] # 获取最大得分数值
            if max_score < self.score_threshold: # 如果最大得分小于预测得分阈值，则不处理剩余得分索引
                break
            
            # 设置保留标识
            keep_flag = True # 保留标识为真
            for i in nms_indexes: # 遍历保留索引
                # 计算交并比值
                box1 = boxes[max_index] # 第一个边框坐标
                box2 = boxes[i]         # 保留的边框坐标

                iou = self.get_iou_xyxy(box1, box2) # 计算交并比值
                if iou > self.nms_threshold: # 如果交并比值大于非极大值阈值，则不处理剩余保留索引
                    keep_flag = False # 保留标识为假
                    break
                    
            # 添加保留索引
            if keep_flag: # 如果保留标识为真，则添加预测索引
                nms_indexes.append(max_index) # 添加预测索引列表
                
            # 获取剩余索引
            score_index = score_index[1:]
            
        # 转换数据格式
        nms_indexes = np.array(nms_indexes)
        
        return nms_indexes