from math import sqrt

import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from utils import *
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


class VGGBase(nn.Module):
    """
    VGG 基础网络 卷积来生成低层次特征图
    """

    def __init__(self):
        super(VGGBase, self).__init__()
        # VGG16中标准的卷积层配置
        self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=1)  # 3*300*300 -> 64*300*300
        self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)  # 64*300*300 -> 64*300*300
        self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)  # 64*300*300 -> 64*150*150

        self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1)  # 64*150*150 -> 128*150*150
        self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1)  # 128*150*150 ->128*150*150
        self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)  # 128*150*150 -> 128*75*75

        self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=1)  # 128*75*75 -> 256*75*75
        self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, padding=1)  # 256*75*75 -> 256*75*75
        self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, padding=1)  # 256*75*75 -> 256*75*75
        self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)  # 对偶数维度使用向上取整的操作 #256*75*75 -> 256*38*38

        self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, padding=1)  # 256*38*38 -> 512*38*38
        self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1)  # 512*38*38 -> 512*38*38
        self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1)  # 512*38*38 -> 512*38*38
        self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2)  # 512*38*38 -> 512*19*19

        self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, padding=1)  # 512*19*19 -> 512*19*19
        self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1)  # 512*19*19 -> 512*19*19
        self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1)  # 512*19*19 -> 512*19*19
        self.pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)  # 512*19*19 -> 512*19*19

        # 替换VGG16中FC6和FC7层
        self.conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)  # 512*19*19 -> 1024*19*19
        self.conv7 = nn.Conv2d(1024, 1024, kernel_size=1)  # 1024*19*19 -> 1024*19*19

        # 加载预训练的层
        self.load_pretrained_layers()

    def forward(self, image):
        """
        前向传播
        :param image: image tensor (n,3,300,300)
        :return: conv4_3,和 conv7下的低层特征图
        """
        out = F.relu(self.conv1_1(image))
        out = F.relu(self.conv1_2(out))
        out = self.pool1(out)

        out = F.relu(self.conv2_1(out))
        out = F.relu(self.conv2_2(out))
        out = self.pool2(out)

        out = F.relu(self.conv3_1(out))
        out = F.relu(self.conv3_2(out))
        out = F.relu(self.conv3_3(out))
        out = self.pool3(out)

        out = F.relu(self.conv4_1(out))
        out = F.relu(self.conv4_2(out))
        out = F.relu(self.conv4_3(out))
        conv4_3_feats = out  # (N,512,38,38)
        out = self.pool4(out)

        out = F.relu(self.conv5_1(out))
        out = F.relu(self.conv5_2(out))
        out = F.relu(self.conv5_3(out))
        out = self.pool5(out)

        out = F.relu(self.conv6(out))
        conv7_feats = F.relu(self.conv7(out))  # (N,1024,19,19)

        return conv4_3_feats, conv7_feats

    def load_pretrained_layers(self):
        """
        如原文一样，将在ImageNet任务中预训练的VGG16网络作为基础网络
        其在pytorch中是可获得的,并在其上进行了修改
        """
        # 当前基础网络的参数
        state_dict = self.state_dict()
        param_names = list(state_dict.keys())  # 列出(当前基础网络(自定义网络))参数键值的名字

        # 预训练的VGG网络
        pretrained_state_dict = torchvision.models.vgg16(pretrained=True).state_dict()  # 预训练网络的参数
        pretrained_param_names = list(pretrained_state_dict.keys())  # 列出预训练网络的参数的键值的名字

        # 从预训练的网络中的参数迁移到当前的模型中
        # 想原始预训练过的参数，赋值给自定网络的参数
        for i, param in enumerate(param_names[:-4]):  # 除了6和7参数
            state_dict[param] = pretrained_state_dict[pretrained_param_names[i]]
            # 值得注意的是，虽然在自定的VGG基础网络中的self.pool3使用的是向上取整，改变了输出的2D维度，但是并没有改变参数量(weights,bias)

            # 将fc6和fc7准换为卷积层
            conv_fc6_weight = pretrained_state_dict['classifier.0.weight'].view(4096, 512, 7, 7)  # (4096,512,7,7)
            conv_fc6_bias = pretrained_state_dict['classifier.0.bias']  # (4096)
            state_dict['conv6.weight'] = decimate(conv_fc6_weight, m=[4, None, 3, 3])  # (1024,512,3,3)
            state_dict['conv6.bias'] = decimate(conv_fc6_bias, m=[4])  # (1024)

            conv_fc7_weight = pretrained_state_dict['classifier.3.weight'].view(4096, 4096, 1, 1)
            conv_fc7_bias = pretrained_state_dict['classifier.3.bias']
            state_dict['conv7.weight'] = decimate(conv_fc7_weight, m=[4, 4, None, None])
            state_dict['conv7.bias'] = decimate(conv_fc6_bias, m=[4])

            # 经过上述操作，已经修改了self.state_dict, 相当于my_net_dict.update(pretrained_dict);
            self.load_state_dict(state_dict)

        print("\nLoaded base model\n")


def decimate(tensor, m):
    """
    利用因子 m 修改tensor,也就是通过保留每个第m个值来进行降采样

    当将全连接层Fc转换为等价的全卷积层时使用的，但是尺寸更小
    :param tensor: 要修建的tensor
    :param m: #修剪因子列表，对应于tensor的每个维度，None表示对该维度不进行修剪
    :return: 修剪后的tensor
    """
    assert tensor.dim() == len(m)  # tensor.dim()表示tensor的维度总数,当异常的时候报错

    for d in range(tensor.dim()):
        if m[d] is not None:
            tensor = tensor.index_select(dim=d, index=torch.arange(start=0, end=tensor.size(d), step=m[d]).long())
    # 返回修改过的tensor，这些tensor将作为自定义网络的conv6和conv7的权重和偏置参数
    return tensor


class PredictionConvolutions(nn.Module):
    """
    利用低层和高层特征图来预测类别得分和Bbox
    bbox(位置)以编码后的偏移量的方式(给出)进行预测，该偏移量相对于8732个先验框(默认框)中的每一个
    ‘cxcy_to_gcxgcy’
    类别得分表示的是，8732个定位到的bbox的每个目标的类别得分,注意这里不是8732个先验框中的目标而是，经过预测偏移量后的Bbox中的目标的类别
    """

    def __init__(self, n_classes):
        """
        :param n_classes: 目标的类别数量
        """
        super(PredictionConvolutions, self).__init__()
        self.n_classes = n_classes

        # 每个特征图下每个位置定义的先验框的数量
        # number表示使用n个横纵比
        n_boxes = {'conv4_3': 4,
                   'conv7': 6,
                   'conv8_2': 6,
                   'conv9_2': 6,
                   'conv10_2': 4,
                   'conv11_2': 4}

        # 位置预测卷积，预测的是偏移量，
        self.loc_conv4_3 = nn.Conv2d(512, n_boxes['conv4_3'] * 4, kernel_size=3, padding=1)  # ->16*38*38
        self.loc_conv7 = nn.Conv2d(1024, n_boxes['conv7'] * 4, kernel_size=3, padding=1)  # ->24*19*19
        self.loc_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * 4, kernel_size=3, padding=1)
        self.loc_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * 4, kernel_size=3, padding=1)
        self.loc_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * 4, kernel_size=3, padding=1)
        self.loc_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * 4, kernel_size=3, padding=1)

        # 分类预测卷积，预测位置框(先验框)中的目标的类别
        self.cl_conv4_3 = nn.Conv2d(512, n_boxes['conv4_3'] * n_classes, kernel_size=3,
                                    padding=1)  # ->(4*n_classes)*38*38
        self.cl_conv7 = nn.Conv2d(1024, n_boxes['conv7'] * n_classes, kernel_size=3, padding=1)
        self.cl_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * n_classes, kernel_size=3, padding=1)
        self.cl_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * n_classes, kernel_size=3, padding=1)
        self.cl_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * n_classes, kernel_size=3, padding=1)
        self.cl_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * n_classes, kernel_size=3, padding=1)

        # 初始化卷积参数
        self.init_conv2d()

    def init_conv2d(self):
        """
        初始化卷积参数
        """
        for c in self.children():
            if isinstance(c, nn.Conv2d):
                nn.init.xavier_uniform_(c.weight)
                nn.init.constant_(c.bias, 0.)

    def forward(self, conv4_3_feats, conv7_feats, conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats):
        """
        前向传播
        :param conv4_3_feats: a tensor of dimensions (N, 512, 38, 38)
        :param conv7_feats: a tensor of dimensions (N, 1024, 19, 19)
        :param conv8_2_feats: a tensor of dimensions (N, 512, 10, 10)
        :param conv9_2_feats: a tensor of dimensions (N, 256, 5, 5)
        :param conv10_2_feats: a tensor of dimensions (N, 256, 3, 3)
        :param conv11_2_feats: a tensor of dimensions (N, 256, 1, 1)
        :return: 8732个Bbox和对应的得分，即，每个图像中相对于每个先验框的偏移量(预测后的Bbox以偏移量的方式给出，以及Bbox中的目标的得分)
        """
        batch_size = conv4_3_feats.size(0)

        # 这里边预测位置框(相对于先验框的偏移量)没什么规则，就是硬(传统)卷积
        # 预测定位框(bbox)的边界(即相对于先验框的偏移量)
        l_conv4_3 = self.loc_conv4_3(conv4_3_feats)
        l_conv4_3 = l_conv4_3.permute(0, 2, 3,
                                      1).contiguous()  # tensor.permute()转换维度，tensor.contiguous()保证tensor是一块连续的内存,便于后面的.view(); 原来是(n,16*38*38),现在是(n,38*38*16)
        l_conv4_3 = l_conv4_3.view(batch_size, -1, 4)  # (N,5776,4) 一共有5776个boxes

        l_conv7 = self.loc_conv7(conv7_feats)  # (N,24*19*19)
        l_conv7 = l_conv7.permute(0, 2, 3, 1).contiguous()  # (N,19*19*24)
        l_conv7 = l_conv7.view(batch_size, -1, 4)  # (n,2166,4) 一共有2166个boxes

        l_conv8_2 = self.loc_conv8_2(conv8_2_feats)  # (N, 24, 10, 10)
        l_conv8_2 = l_conv8_2.permute(0, 2, 3, 1).contiguous()  # (N, 10, 10, 24)
        l_conv8_2 = l_conv8_2.view(batch_size, -1, 4)  # (N, 600, 4)

        l_conv9_2 = self.loc_conv9_2(conv9_2_feats)  # (N, 24, 5, 5)
        l_conv9_2 = l_conv9_2.permute(0, 2, 3, 1).contiguous()  # (N, 5, 5, 24)
        l_conv9_2 = l_conv9_2.view(batch_size, -1, 4)  # (N, 150, 4)

        l_conv10_2 = self.loc_conv10_2(conv10_2_feats)  # (N, 16, 3, 3)
        l_conv10_2 = l_conv10_2.permute(0, 2, 3, 1).contiguous()  # (N, 3, 3, 16)
        l_conv10_2 = l_conv10_2.view(batch_size, -1, 4)  # (N, 36, 4)

        l_conv11_2 = self.loc_conv11_2(conv11_2_feats)  # (N, 16, 1, 1)
        l_conv11_2 = l_conv11_2.permute(0, 2, 3, 1).contiguous()  # (N, 1, 1, 16)
        l_conv11_2 = l_conv11_2.view(batch_size, -1, 4)  # (N, 4, 4)

        # 预测位置框中的目标类别
        c_conv4_3 = self.cl_conv4_3(conv4_3_feats)  # (n,4*n_classes,38,38)
        c_conv4_3 = c_conv4_3.permute(0, 2, 3, 1).contiguous()  # (n,38,38,4*n_classes)
        c_conv4_3 = c_conv4_3.view(batch_size, -1, self.n_classes)

        c_conv7 = self.cl_conv7(conv7_feats)  # (N, 6 * n_classes, 19, 19)
        c_conv7 = c_conv7.permute(0, 2, 3, 1).contiguous()  # (N, 19, 19, 6 * n_classes)
        c_conv7 = c_conv7.view(batch_size, -1,
                               self.n_classes)  # (N, 2166, n_classes), there are a total 2116 boxes on this feature map

        c_conv8_2 = self.cl_conv8_2(conv8_2_feats)  # (N, 6 * n_classes, 10, 10)
        c_conv8_2 = c_conv8_2.permute(0, 2, 3, 1).contiguous()  # (N, 10, 10, 6 * n_classes)
        c_conv8_2 = c_conv8_2.view(batch_size, -1, self.n_classes)  # (N, 600, n_classes)

        c_conv9_2 = self.cl_conv9_2(conv9_2_feats)  # (N, 6 * n_classes, 5, 5)
        c_conv9_2 = c_conv9_2.permute(0, 2, 3, 1).contiguous()  # (N, 5, 5, 6 * n_classes)
        c_conv9_2 = c_conv9_2.view(batch_size, -1, self.n_classes)  # (N, 150, n_classes)

        c_conv10_2 = self.cl_conv10_2(conv10_2_feats)  # (N, 4 * n_classes, 3, 3)
        c_conv10_2 = c_conv10_2.permute(0, 2, 3, 1).contiguous()  # (N, 3, 3, 4 * n_classes)
        c_conv10_2 = c_conv10_2.view(batch_size, -1, self.n_classes)  # (N, 36, n_classes)

        c_conv11_2 = self.cl_conv11_2(conv11_2_feats)  # (N, 4 * n_classes, 1, 1)
        c_conv11_2 = c_conv11_2.permute(0, 2, 3, 1).contiguous()  # (N, 1, 1, 4 * n_classes)
        c_conv11_2 = c_conv11_2.view(batch_size, -1, self.n_classes)  # (N, 4, n_classes)

        # 一共8732个boxes
        # 用特定的顺序连接 (必须与先验框的顺序进行匹配)
        locs = torch.cat([l_conv4_3, l_conv7, l_conv8_2, l_conv9_2, l_conv10_2, l_conv11_2], dim=1)  # (N,8732,4)
        class_scores = torch.cat([c_conv4_3, c_conv7, c_conv8_2, c_conv9_2, c_conv10_2, c_conv11_2],
                                 dim=1)  # (N,8732,n_classses)

        return locs, class_scores


class AuxiliaryConvolutions(nn.Module):
    """
    辅助卷积来生成高层次特征
    """

    def __init__(self):
        super(AuxiliaryConvolutions, self).__init__()

        # 注意默认stride=1
        self.conv8_1 = nn.Conv2d(1024, 256, kernel_size=1, padding=0)  # 1024*19*19 -> 256*19*19
        self.conv8_2 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1)  # 256*19*19 -> 512*10*10

        self.conv9_1 = nn.Conv2d(512, 128, kernel_size=1, padding=0)  # 512*7*7 -> 128*10*10
        self.conv9_2 = nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1)  # 128*10*10 -> 256*5*5

        self.conv10_1 = nn.Conv2d(256, 128, kernel_size=1, padding=0)  # 256*5*5-> 256*5*5
        self.conv10_2 = nn.Conv2d(128, 256, kernel_size=3, padding=0)  # 256*5*5 -> 256*3*3

        self.conv11_1 = nn.Conv2d(256, 128, kernel_size=1, padding=0)  # 256*3*3 -> 128*3*3
        self.conv11_2 = nn.Conv2d(128, 256, kernel_size=3, padding=0)  # 128*3*3 -> 256*1*1

        # 初始化卷积参数
        self.init_conv2d()

    def init_conv2d(self):
        """
        初始化卷积参数
        """
        for c in self.children():
            if isinstance(c, nn.Conv2d):
                nn.init.xavier_uniform_(c.weight)
                nn.init.constant_(c.bias, 0.)

    def forward(self, conv7_feats):
        """
        前向传播
        :param conv7_feats: 将conv7_feats特征图作为辅助网络的参数,低层次特征图，tensor (N,1024,19,19)
        :return: 更高层次的特征图，conv8_2,conv9_2,conv10_2,conv11_2
        """
        out = F.relu(self.conv8_1(conv7_feats))
        out = F.relu(self.conv8_2(out))
        conv8_2_feats = out

        out = F.relu(self.conv9_1(out))
        out = F.relu(self.conv9_2(out))
        conv9_2_feats = out

        out = F.relu(self.conv10_1(out))
        out = F.relu(self.conv10_2(out))
        conv10_2_feats = out

        out = F.relu(self.conv11_1(out))
        conv11_2_feats = F.relu(self.conv11_2(out))

        return conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats


class SSD300(nn.Module):
    """
    封装VGGBase,辅助网络，预测卷积
    """

    def __init__(self, n_classes):
        super(SSD300, self).__init__()
        self.n_classes = n_classes

        self.base = VGGBase()  # 相当于实例化的基础网络
        self.aux_convs = AuxiliaryConvolutions()  # 相当于实例化了辅助网络
        self.pred_convs = PredictionConvolutions(n_classes)  # 相当于实例化了预测网络 #操作的时候就是硬卷积

        # 因为低层次特征图(conv4_3)有相当大的尺度，因此利用L2正则化和重缩放，重缩放因子初始设置为20，但是在反向传播中每个通道中的重缩放因子是可以学习的
        self.rescale_factors = nn.Parameter(torch.FloatTensor(1, 512, 1, 1))  # 因为要学习，所以在__init__中预测
        nn.init.constant_(self.rescale_factors, 20)

        # 创建先验框
        self.priors_cxcy = self.create_prior_boxes()

    def create_prior_boxes(self):
        """
        对模型创建8732个先验框(默认框)
        :return: 以中心坐标形式返回的先验框, tensor (8732,4)
        """
        fmap_dims = {'conv4_3': 38,
                     'conv7': 19,
                     'conv8_2': 10,
                     'conv9_2': 5,
                     'conv10_2': 3,
                     'conv11_2': 1}

        obj_scales = {'conv4_3': 0.1,
                      'conv7': 0.2,
                      'conv8_2': 0.375,
                      'conv9_2': 0.55,
                      'conv10_2': 0.725,
                      'conv11_2': 0.9}

        aspect_ratios = {'conv4_3': [1., 2., 0.5],
                         'conv7': [1., 2., 3., 0.5, .333],
                         'conv8_2': [1., 2., 3., 0.5, .333],
                         'conv9_2': [1., 2., 3., 0.5, .333],
                         'conv10_2': [1., 2., 0.5],
                         'conv11_2': [1., 2., 0.5]}

        fmaps = list(fmap_dims.keys())

        prior_boxes = []

        for k, fmap in enumerate(fmaps):  # 某一特征图
            for i in range(fmap_dims[fmap]):  # 高(列)
                for j in range(fmap_dims[fmap]):  # 宽(行)

                    # 中心坐标形式
                    cx = (j + 0.5) / fmap_dims[fmap]  # 除以特征图维度是因为将坐标转换为分数的形式
                    cy = (i + 0.5) / fmap_dims[fmap]

                    for ratio in aspect_ratios[fmap]:  # 特征图对应的横纵比,list[]
                        prior_boxes.append([cx, cy, obj_scales[fmap] * sqrt(ratio),
                                            obj_scales[fmap] / sqrt(ratio)])  # w=s*sqrt(ratio),h=s/sqrt(ratio)
                        # 每个特征图，从左到右，从上到下，在每个位置上生成多个具有不同横纵比的先验框

                        # 额外的先验框
                        if ratio == 1:
                            # 捕捉异常使用的是try/except语句，用来检测try语句中的错误，从而让except语句补货异常信息并处理
                            try:  # 先执行，异常时交给expect处理
                                additional_scale = sqrt(obj_scales[fmap] * obj_scales[fmaps[k + 1]])
                            # 对于最后一个特征图，没有下一个特征图
                            except IndexError:
                                additional_scale = 1.
                            prior_boxes.append([cx, cy, additional_scale, additional_scale])

        prior_boxes = torch.FloatTensor(prior_boxes).to(device)

        prior_boxes.clamp_(0, 1)  # 加紧到0,1之间，即超出图像边界的被修剪

        # 值得注意的是，从一开始就是以小数形式表示的先验框坐标(中心点的形式)
        return prior_boxes  # (8732,4)

    def forward(self, image):
        """
        前向传播
        :param image: tensor (n,3,300,300)
        :return: 每个图像生成8732个位置和得分
        """

        # 执行VGG网络，得到低层特征图
        conv4_3_feats, conv7_feats = self.base(image)

        # 对conv4_3进行操作
        # 这么做的意义没懂...???...
        norm = conv4_3_feats.pow(2).sum(dim=1, keepdim=True).sqrt()  # (n,512,38,38)-> (n,1,38,38)
        conv4_3_feats = conv4_3_feats / norm  # (n,512,38,38)
        conv4_3_feats = conv4_3_feats * self.rescale_factors  # (n,512,38,38) 广播机制

        # 执行辅助卷积，得到高层特征图
        conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats = self.aux_convs(conv7_feats)

        # 执行预测卷积（对每个结果位置框预测相对于先验框的偏移量和类别）
        # 就是硬卷积
        locs, classes_scores = self.pred_convs(conv4_3_feats, conv7_feats, conv8_2_feats, conv9_2_feats, conv10_2_feats,
                                               conv11_2_feats)

        return locs, classes_scores  # (n,8732,4) (n,8732,n_classes)

    def detect_objects(self, predicted_locs, predicted_scores, min_score, max_overlap, top_k):
        """
        Decipher the 8732 locations and class scores (output of the SSD300) to detect objects.
        
        For each class, perform Non-Maximum Suppression on boxes that are above a minimum threshold.
        
        min_score: minmum threshold for reserve the box for a certain class
        max_overlap: maximum overlap between two boxes that the one with a lower score won't be suppressed
        top_k: if there are too many detected objects, keep the top k results
        return: detections(bboxes, labels, scores), lists of length batch_size
        """
        batch_size = predicted_locs.size(0)
        n_priors = self.priors_cxcy.size(0)
        predicted_scores = F.softmax(predicted_scores, dim = 2) # (N, 8732, n_classes)
        
        # Lists to store final results
        all_images_boxes = []
        all_images_labels = []
        all_images_scores = []
        
        assert n_priors == predicted_locs.size(1) == predicted_scores.size(1)
        
        for i in range(batch_size):
            # Decode object coordinates from g to xy(relative coordinates)
            decoded_locs = cxcy_to_xy(gcxgcy_to_cxcy(predicted_locs[i], self.priors_cxcy))
            # Lists to store results for one image
            image_boxes = []
            image_labels = []
            image_scores = []
            
            max_scores, best_label = predicted_scores[i].max(dim = 1) # (8732)
            
            # Check for each class
            for c in range(1, self.n_classes):
                # Keep only results are above the minimum score
                class_scores = predicted_scores[i][:, c] # (8732)
                score_above_min_score = class_scores > min_score # [0, 1, ... 0] tensor for indexing
                n_above_min_score = score_above_min_score.sum().item()
                #print('n_above_min_score:',n_above_min_score)
                if n_above_min_score == 0:
                    continue
                class_scores = class_scores[score_above_min_score] # (n_qualified)
                class_decoded_locs = decoded_locs[score_above_min_score] # (n_qualified, 4)
                
                # Sort qualified results
                class_scores, sort_ind = class_scores.sort(dim = 0, descending = True)
                class_decoded_locs = class_decoded_locs[sort_ind]
                
                # Find the overlap between predicted boxes
                overlap = find_jaccard_overlap(class_decoded_locs, class_decoded_locs) # (n_qualified, n_qualified)

                # Non-Maximum Suppression
                
                # Use a torch.uint8 tensor to indicate which predicted box is suppressed
                # 1 implies suppressed
                suppress = torch.zeros((n_above_min_score)).bool().to(device) # (n_qualified)
                
                for box in range(class_decoded_locs.size(0)):
                    if suppress[box] == 1:
                        continue
                        
                    # Suppress boxes whose overlaps are greater than maximum overlap
                    suppress = suppress | (overlap[box] > max_overlap)
                    #overlap_above_max = (overlap[box] > max_overlap).bool()
                    #suppress = torch.max(suppress, overlap_above_max)
                    suppress[box] = 0 # as the overlap of iteself is 1
                    
                # Store the unsuppressed boxes for this class
                
                image_boxes.append(class_decoded_locs[~suppress])
                image_labels.append(torch.LongTensor((~suppress).sum().item() * [c]).to(device))
                image_scores.append(class_scores[~suppress])
                
            # If no object is found, store a placeholder for 'background'
            if len(image_boxes) == 0:
                image_boxes.append(torch.FloatTensor([[0., 0., 1., 1.]]).to(device))
                image_labels.append(torch.LongTensor([0]).to(device))
                image_scores.append(torch.FloatTensor([0.]).to(device))
                
            # Concatenate results list to tensors
            image_boxes = torch.cat(image_boxes, dim = 0) # (n_objects, 4)
            image_labels = torch.cat(image_labels, dim = 0) # (n_objects)
            image_scores = torch.cat(image_scores, dim = 0) # (n_objects)
            n_objects = image_scores.size(0)
            # Keep only the top k objects
            if n_objects > top_k:
                image_scores, sort_ind = image_scores.sort(dim = 0, descending = True)
                image_scores = image_scores[:top_k]
                image_boxes = image_boxes[sort_ind][:top_k]
                image_labels = image_labels[sort_ind][:top_k]
                
            # Store the final results
            all_images_boxes.append(image_boxes)
            all_images_labels.append(image_labels)
            all_images_scores.append(image_scores)
            
        return all_images_boxes, all_images_labels, all_images_scores


class MultiBoxLoss(nn.Module):
    """
    The MultiBox loss, a loss function for object detection.
    This is a combination of:
    (1) a localization loss for the predicted locations of the boxes, and
    (2) a confidence loss for the predicted class scores.
    """

    def __init__(self, priors_cxcy, threshold=0.5, neg_pos_ratio=3, alpha=1.):
        super(MultiBoxLoss, self).__init__()
        self.priors_cxcy = priors_cxcy
        self.priors_xy = cxcy_to_xy(priors_cxcy)
        self.threshold = threshold
        self.neg_pos_ratio = neg_pos_ratio
        self.alpha = alpha

        self.smooth_l1 = nn.L1Loss()
        self.cross_entropy = nn.CrossEntropyLoss(reduce=False)

    def forward(self, predicted_locs, predicted_scores, boxes, labels):
        """
        Forward propagation.
        :param predicted_locs: predicted locations/boxes w.r.t the 8732 prior boxes, a tensor of dimensions (N, 8732, 4)
        :param predicted_scores: class scores for each of the encoded locations/boxes, a tensor of dimensions (N, 8732, n_classes)
        :param boxes: true  object bounding boxes in boundary coordinates, a list of N tensors
        :param labels: true object labels, a list of N tensors
        :return: multibox loss, a scalar
        """
        batch_size = predicted_locs.size(0)
        n_priors = self.priors_cxcy.size(0)
        n_classes = predicted_scores.size(2)

        assert n_priors == predicted_locs.size(1) == predicted_scores.size(1)

        true_locs = torch.zeros((batch_size, n_priors, 4), dtype=torch.float).to(device)  # (N, 8732, 4)
        true_classes = torch.zeros((batch_size, n_priors), dtype=torch.long).to(device)  # (N, 8732)

        # For each image
        for i in range(batch_size):
            n_objects = boxes[i].size(0)

            overlap = find_jaccard_overlap(boxes[i],
                                           self.priors_xy)  # (n_objects, 8732)

            # For each prior, find the object that has the maximum overlap
            overlap_for_each_prior, object_for_each_prior = overlap.max(dim=0)  # (8732)

            # We don't want a situation where an object is not represented in our positive (non-background) priors -
            # 1. An object might not be the best object for all priors, and is therefore not in object_for_each_prior.
            # 2. All priors with the object may be assigned as background based on the threshold (0.5).

            # To remedy this -
            # First, find the prior that has the maximum overlap for each object.
            _, prior_for_each_object = overlap.max(dim=1)  # (N_o)

            # Then, assign each object to the corresponding maximum-overlap-prior. (This fixes 1.)
            object_for_each_prior[prior_for_each_object] = torch.LongTensor(range(n_objects)).to(device)

            # To ensure these priors qualify, artificially give them an overlap of greater than 0.5. (This fixes 2.)
            overlap_for_each_prior[prior_for_each_object] = 1.

            # Labels for each prior
            label_for_each_prior = labels[i][object_for_each_prior]  # (8732)
            # Set priors whose overlaps with objects are less than the threshold to be background (no object)
            label_for_each_prior[overlap_for_each_prior < self.threshold] = 0  # (8732)

            # Store
            true_classes[i] = label_for_each_prior

            # Encode center-size object coordinates into the form we regressed predicted boxes to
            true_locs[i] = cxcy_to_gcxgcy(xy_to_cxcy(boxes[i][object_for_each_prior]), self.priors_cxcy)  # (8732, 4)

        # Identify priors that are positive (object/non-background)
        positive_priors = true_classes != 0  # (N, 8732)

        # LOCALIZATION LOSS

        # Localization loss is computed only over positive (non-background) priors
        loc_loss = self.smooth_l1(predicted_locs[positive_priors], true_locs[positive_priors])  # (), scalar

        # Note: indexing with a torch.uint8 (byte) tensor flattens the tensor when indexing is across multiple dimensions (N & 8732)
        # So, if predicted_locs has the shape (N, 8732, 4), predicted_locs[positive_priors] will have (total positives, 4)

        # CONFIDENCE LOSS

        # Confidence loss is computed over positive priors and the most difficult (hardest) negative priors in each image
        # That is, FOR EACH IMAGE,
        # we will take the hardest (neg_pos_ratio * n_positives) negative priors, i.e where there is maximum loss
        # This is called Hard Negative Mining - it concentrates on hardest negatives in each image, and also minimizes pos/neg imbalance

        # Number of positive and hard-negative priors per image
        n_positives = positive_priors.sum(dim=1)  # (N)
        n_hard_negatives = self.neg_pos_ratio * n_positives  # (N)

        # First, find the loss for all priors
        conf_loss_all = self.cross_entropy(predicted_scores.view(-1, n_classes), true_classes.view(-1))  # (N * 8732)
        conf_loss_all = conf_loss_all.view(batch_size, n_priors)  # (N, 8732)

        # We already know which priors are positive
        conf_loss_pos = conf_loss_all[positive_priors]  # (sum(n_positives))

        # Next, find which priors are hard-negative
        # To do this, sort ONLY negative priors in each image in order of decreasing loss and take top n_hard_negatives
        conf_loss_neg = conf_loss_all.clone()  # (N, 8732)
        conf_loss_neg[positive_priors] = 0.  # (N, 8732), positive priors are ignored (never in top n_hard_negatives)
        conf_loss_neg, _ = conf_loss_neg.sort(dim=1, descending=True)  # (N, 8732), sorted by decreasing hardness
        hardness_ranks = torch.LongTensor(range(n_priors)).unsqueeze(0).expand_as(conf_loss_neg).to(device)  # (N, 8732)
        hard_negatives = hardness_ranks < n_hard_negatives.unsqueeze(1)  # (N, 8732)
        conf_loss_hard_neg = conf_loss_neg[hard_negatives]  # (sum(n_hard_negatives))

        # As in the paper, averaged over positive priors only, although computed over both positive and hard-negative priors
        conf_loss = (conf_loss_hard_neg.sum() + conf_loss_pos.sum()) / n_positives.sum().float()  # (), scalar

        # TOTAL LOSS

        return conf_loss + self.alpha * loc_loss


if __name__ == '__main__':
    ssd300 = SSD300(20)
    print(ssd300)
    # for name,param in ssd300.named_parameters():
    #     print(name)
    # 查看参数
    biases = list()
    not_biases = list()
    for param_name, param in ssd300.named_parameters():
        # print(param_name,param.size())
        if param.requires_grad:
            if param_name.endswith('.bias'):
                biases.append(param)
            else:
                not_biases.append(param)

    lr = 1e-3
    momentum = 0.9
    weight_decay = 5e-4

    # optimizer = torch.optim.SGD(params=[{'params': biases, 'lr': 2 * lr}, {'params': not_biases}], lr=lr,
    #                             momentum=momentum, weight_decay=weight_decay)
    optimizer = torch.optim.SGD(params=ssd300.parameters(), lr=lr,
                                momentum=momentum, weight_decay=weight_decay)

    # 优化器是按组输出的，根据你的设置有关
    print(optimizer)
