'''
@author: zhangkai
@license: (C) Copyright 2017-2023
@contact: jeffcobile@gmail.com
@Software : PyCharm
@file: sover.py
@time: 2020-06-16 10:25:33
@desc: 
'''
import torch
from torch.utils.model_zoo import load_url
import os
import itertools
import math
import pickle
import numpy as np

from ELib.eval.tranEval import EvalObj
from ELib.model.model_zoo import get_model
from ELib.loss.loss_zoo import get_loss
from ELib.dataset.data_zoo import get_dataset

from ELib.utils.utils import decode, diounms, nms

from jjzhk.logger import Logger
from jjzhk.device import device
from jjzhk.drawseg import BaseDrawSeg
from jjzhk.progressbar import ProgressBar


class TyranrexSolver:
    model_urls = {
        'vgg16' : 'https://github.com/JJZHK/AIModels/releases/download/1.0/vgg16_SDC.pth',
        'resnet50' : 'https://github.com/JJZHK/AIModels/releases/download/1.0/resnet50_SDC.pth',
        'resnet152' : 'https://github.com/JJZHK/AIModels/releases/download/1.0/resnet152_SDC.pth',
        'darknet19' : 'https://github.com/JJZHK/AIModels/releases/download/1.0/darknet19_SDC.pth',
        'darknet53' : 'https://github.com/JJZHK/AIModels/releases/download/1.0/darknet53_SDC.pth',
        'mobilenetv1': 'https://github.com/JJZHK/AIModels/releases/download/1.0/mobilenetv1_SDC.pth',
        'mobilenetv2': 'https://github.com/JJZHK/AIModels/releases/download/1.0/mobilenetv2_SDC.pth',

        'yolov3' : 'https://github.com/JJZHK/AIModels/releases/download/1.0/darknet53.conv.74',
        'yolov4' : 'https://github.com/JJZHK/AIModels/releases/download/1.0/yolov4_SDC.pth',
        'mb2_v3_asff' : 'https://github.com/JJZHK/AIModels/releases/download/1.0/mnv2_asff_SDC.pth'
    }

    def __init__(self, cfg):
        self.cfg = cfg
        self._train_loader_ = None
        self._eval_loader_ = None
        self._test_loader_ = None
        self.phase = None
        self.logger = Logger(output="logger")
        self.train_path, self.eval_path, self.test_path, self.checkpoint_file, \
        self.loss_file, self.eval_file = self.logger.get_path_files()
        '''
        logger/train_logs
        logger/eval_logs
        logger/test_logs
        logger/train_logs/checkpoint.txt
        logger/train_logs/loss.txt
        logger/eval_logs/eval.txt
        '''
        self.model = get_model(self.cfg).to(device)

        self._init_others_()

    def train(self):
        self.phase = "train"
        if self._train_loader_ is None:
            self.train_dataset = self._init_dataset_('train')
            if '_asff' in self.cfg.BASE.DATANAME:
                self._train_loader_ = torch.utils.data.DataLoader(dataset=self.train_dataset,
                                                                  batch_size=1, shuffle=True,
                                                                  num_workers=0, pin_memory=True)
            else:
                train_sampler = self.make_data_sampler(self.train_dataset, False)
                train_batch_sampler = self.make_batch_data_sampler(train_sampler,
                                                                   images_per_batch=self.cfg.TRAIN.BATCH_SIZE,
                                                                   drop_last=False)
                self._train_loader_ = torch.utils.data.DataLoader(dataset=self.train_dataset,
                                                                  batch_sampler=train_batch_sampler,
                                                                  num_workers=0,
                                                                  pin_memory=True,
                                                                  collate_fn=self._init_detection_collate_)

            self.optimizer = self._get_optimizer_(self.model)
            self.criterion = get_loss(self.cfg, priors=self.priors if hasattr(self, 'priors') else None)
            if self.criterion is not None:
                self.criterion.to(device)
            self.lr_scheduler = self._get_scheduler_()

        max_epochs = self.cfg.TRAIN.MAX_EPOCHS
        previous = self._find_previous_()
        if previous:
            start_epoch = previous[0][-1]
            self._resume_checkpoint_(previous[1][-1])
        else:
            start_epoch = self._init_weights_()
        bar = ProgressBar(self.cfg.TRAIN.MAX_EPOCHS, len(self._train_loader_), "Loss:%.3f;AvgLoss:%.3f;LR:%.6f")

        print("start training...")
        newir = self.cfg.TRAIN.SOLVER.LR
        for epoch in range(start_epoch + 1, max_epochs + 1):
            self.model.train()

            if epoch > self.cfg.TRAIN.SOLVER.WARM_UP_EPOCHS and self.lr_scheduler is not None:
                self.lr_scheduler.step(epoch - self.cfg.TRAIN.SOLVER.WARM_UP_EPOCHS)
                for param_group in self.optimizer.param_groups:
                    newir = param_group['lr']
            else:
                if hasattr(self.cfg.TRAIN.SOLVER, 'STEP'):
                    steps = self.cfg.TRAIN.SOLVER.STEP
                    attributes = sorted([int(attr) for attr in steps.__dict__ if attr != '_content'], reverse=True)
                    flag = True
                    for s in attributes:
                        if epoch >= s and flag:
                            for param_group in self.optimizer.param_groups:
                                param_group['lr'] = float(steps.__dict__[str(s)])
                            newir = float(steps.__dict__[str(s)])
                            flag = False
            # print(epoch, newir)
            avg_loss_per_epoch, time = self._train_epoch_(epoch, bar, newir)

            if self.cfg.TRAIN.EVALITER is not None and self.cfg.TRAIN.EVALITER is not 0 and epoch % self.cfg.TRAIN.EVALITER == 0:
                self._eval_epoch_(epoch, self.model)

            if self.cfg.TRAIN.TESTITER is not None and self.cfg.TRAIN.TESTITER is not 0 and epoch % self.cfg.TRAIN.TESTITER == 0:
                self._test_epoch_(epoch, self.model)

            resume_checkpoints = {
                'state_dict': self.model.module.state_dict() if hasattr(self.model,
                                                                        'module') else self.model.state_dict(),
                'lr_scheduler': self.lr_scheduler.state_dict(),
                'optimizer': self.optimizer.state_dict()
            }
            self.logger.save_checkpoints_file(epoch, resume_checkpoints)
            self.logger.logger("epoch %d-loss:%.3f;lr:%.6f;time:%d;" % (epoch, avg_loss_per_epoch, newir, time), phase='l')

    def eval(self):
        self.phase = "eval"
        print("loading weights from %s" % self.cfg.TEST.WEIGHTS)
        self._resume_checkpoint_(self.cfg.EVAL.WEIGHTS)
        self._eval_epoch_(0, self.model)

    def test(self):
        self.phase = "test"
        print("loading weights from %s" % self.cfg.TEST.WEIGHTS)
        self._resume_checkpoint_(self.cfg.TEST.WEIGHTS)
        self._test_epoch_(0, self.model)

    def _eval_epoch_(self, epoch, model):
        if self._eval_loader_ is None:
            self.eval_dataset = self._init_dataset_('eval')
            if '_asff' in self.cfg.BASE.DATANAME:
                self._eval_loader_ = torch.utils.data.DataLoader(dataset=self.eval_dataset,
                                                                  batch_size=1, shuffle=True,
                                                                  num_workers=0, pin_memory=True)
            else:
                eval_sampler = self.make_data_sampler(self.eval_dataset, False)
                eval_batch_sampler = self.make_batch_data_sampler(eval_sampler, images_per_batch=self.cfg.EVAL.BATCH_SIZE,
                                                                  drop_last=False)
                self._eval_loader_ = torch.utils.data.DataLoader(dataset=self.eval_dataset,
                                                                 batch_sampler=eval_batch_sampler,
                                                                 num_workers=0,
                                                                 pin_memory=True,
                                                                 collate_fn=self._init_detection_collate_)
        model.eval()

        if not os.path.exists(os.path.join(self.eval_path, str(epoch))):
            os.mkdir(os.path.join(self.eval_path, str(epoch)))
        eval_model = EvalObj(self.cfg, model)

        if hasattr(self, 'detector'):
            mAP, info = eval_model.calculateMAP(self._eval_loader_,
                                                os.path.join(self.eval_path, str(epoch)),
                                                detector=self.detector)
            self.logger.logger("epoch %d - mAP : %.3f" % (epoch, mAP), phase='e')

            headers = ['class name', 'AP']
            table = []
            for i, cls_name in enumerate(self.cfg.BASE.CLASSINFO.keys()):
                table.append([cls_name, info[cls_name]])

            self.logger.save_eval_txt_file(epoch, table, headers)
        else:
            pass

    def _test_epoch_(self, epoch, model):
        if self._test_loader_ is None:
            self.test_dataset = self._init_dataset_('test')
            test_sampler = self.make_data_sampler(self.test_dataset, False)
            test_batch_sampler = self.make_batch_data_sampler(test_sampler, images_per_batch=self.cfg.TEST.BATCH_SIZE,
                                                              drop_last=False)

            self._test_loader_ = torch.utils.data.DataLoader(dataset=self.test_dataset,
                                                             batch_sampler=test_batch_sampler,
                                                             num_workers=0,
                                                             pin_memory=True,
                                                             collate_fn=self._init_detection_collate_
                                                             )
        model.eval()
        output = os.path.join(self.test_path, "%d" % epoch)
        if not os.path.exists(output):
            os.mkdir(output)

        bar = ProgressBar(1, len(self._test_loader_), "Detections %d")
        draw = BaseDrawSeg(cfg=self.cfg, output=output)
        for i, sampler in enumerate(self._test_loader_):
            bar.show(1, i)
            images, info = sampler['img'], sampler['info']
            boxes = self.model.get_predict(images, info, eval=False, detector=self.detector)
            for j, box in enumerate(boxes):
                image_id = info[j]["img_id"]
                filename = "%s.jpg" % image_id
                image = draw.drawByImage(os.path.join(self.cfg.BASE.TEST_DATA_ROOT, "Images", filename),
                             boxes=box, mask=None, imagename=image_id)
        return image

    def _train_epoch_(self, epoch, bar, newir):
        avg_loss = 0
        time = 0

        for i, inputs in enumerate(self._train_loader_):
            if '_asff' in self.cfg.BASE.DATANAME:
                inputs = [input if isinstance(
                    input, list) else input.squeeze(0) for input in inputs]
                img, _, _, *labels = inputs
                images = img.to(device)
                labels = [label.to(device) for label in labels]
                targets = labels
                pred = self.model(images)
            else:
                # {'img': np.stack(imgs, 0), 'annot': targets, 'info': infos}
                images, target, _ = inputs['img'], inputs['annot'], inputs['info']
                images = torch.autograd.Variable(torch.FloatTensor(images)).to(device)
                targets = [torch.autograd.Variable(torch.FloatTensor(anno)).to(device) for anno in target]
                pred = self.model(images, phase='train', target=target)

            if self.criterion == None:
                loss = pred
            else:
                loss = self.criterion(pred, targets)

            avg_loss += loss.item()

            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()

            time = bar.show(epoch, loss.item(), avg_loss / (i + 1), newir)

        return avg_loss / (i + 1), time

    def make_data_sampler(self, dataset, shuffle):
        if shuffle:
            sampler = torch.utils.data.sampler.RandomSampler(dataset)
        else:
            sampler = torch.utils.data.sampler.SequentialSampler(dataset)
        return sampler

    def make_batch_data_sampler(self, sampler, images_per_batch, num_iters=None, start_iter=0, drop_last=True):
        batch_sampler = torch.utils.data.sampler.BatchSampler(sampler, images_per_batch, drop_last=drop_last)
        if num_iters is not None:
            batch_sampler = IterationBasedBatchSampler(batch_sampler, num_iters, start_iter)
        return batch_sampler

    def _set_batch_norm_attr_(self, named_modules, attr, value):
        for m in named_modules:
            if isinstance(m[1], torch.nn.BatchNorm2d) or isinstance(m[1], torch.nn.SyncBatchNorm):
                setattr(m[1], attr, value)

    def _init_dataset_(self, phase):
        print('init %s dataset' % phase)
        return get_dataset(self.cfg, phase)

    def _find_previous_(self):
        if not os.path.exists(self.checkpoint_file):
            return False

        with open(self.checkpoint_file, 'r') as f:
            lineList = f.readlines()

        if lineList == []:
            return False

        epoches, resume_checkpoints = [list() for _ in range(2)]
        for line in lineList:
            epoch = int(line[line.find('epoch ') + len('epoch '): line.find(':')])
            checkpoint = line[line.find(':') + 2:-1]
            epoches.append(epoch)
            resume_checkpoints.append(checkpoint)
        return epoches, resume_checkpoints

    def _init_weights_(self):
        self._resume_checkpoint_('', justInitBase=True)

        # if self.cfg.TRAIN.WEIGHTS and self.cfg.TRAIN.WEIGHTS is not '':
        #     self._resume_checkpoint_(os.path.join(self.cfg.BASE.WEIGHTS_ROOT, self.cfg.TRAIN.WEIGHTS),
        #                              justInitBase=True)
        # else:
        #     self._resume_checkpoint_('', justInitBase=True)

        return 0

    def _resume_checkpoint_(self, weights, justInitBase=False):
        ''' 调用这个方法的四种情况
        1. 加载pretrained model时调用 justInitBase = True, 模型从网络上下载
        2. 模型继续运行时的调用 justInitBase=False,模型在logger中
        3. eval方法时调用 justInitBase=False, 模型从网络下载
        4. test方法时调用 justInitBase=False, 模型从网络下载
        '''
        print("loading backbone weights...")

        if justInitBase: # 第1种情况
            self._load_backbone_weights_()
        else:
            if 'https://' in weights:
                checkpoint = torch.utils.model_zoo.load_url(weights, map_location='cpu')
                # if 'optimizer' in checkpoint.keys():
                #     self.optimizer.load_state_dict(checkpoint['optimizer'])
                # if 'lr_scheduler' in checkpoint.keys():
                #     self.lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])

                self.model.load_init_weights(checkpoint)

            else:
                self.model.load_init_weights(torch.load(weights, map_location='cpu'))
                # if not weights in ['yolov3']:
                #     self.model.load_init_weights(torch.utils.model_zoo.load_url(self.model_urls[weights]))
                # else:
                #     self.model.load_init_weights(load_weights_file(self.model_urls[weights]))


        # if weights is None or weights == '':
        #     self._load_backbone_weights_()
        #
        #     backbone_name = self.cfg.MODEL.BACKBONE
        #     if backbone_name in self.model_urls.keys():
        #         if hasattr(self.model, 'base'):
        #             self.model.base.load_state_dict(torch.utils.model_zoo.load_url(self.model_urls[backbone_name]))
        #         elif hasattr(self.model, 'backbone'):
        #             self.model.backbone.load_init_weights(
        #                 torch.utils.model_zoo.load_url(self.model_urls[backbone_name], map_location='cpu'))
        # else:
        #     if justInitBase:  # it should load total net's weights
        #         # if weights endswith pth, should be load directly;
        #         # if weights does not end with pth, should load weights from url.
        #         if not weights in ['yolov3']:
        #             self.model.load_init_weights(torch.utils.model_zoo.load_url(self.model_urls[weights]))
        #         else:
        #             self.model.load_init_weights(load_weights_file(self.model_urls[weights]))
        #     else:
        #         checkpoint = torch.load(weights, map_location='cpu')
        #         if 'optimizer' in checkpoint.keys():
        #             self.optimizer.load_state_dict(checkpoint['optimizer'])
        #         if 'lr_scheduler' in checkpoint.keys():
        #             self.lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
        #
        #         self.model.load_init_weights(checkpoint)

    def _get_optimizer_(self, model):
        parameters = self._trainable_param_(model)
        opt_lower = self.cfg.TRAIN.SOLVER.OPTIMIZER.lower()

        if opt_lower == 'sgd':
            optimizer = torch.optim.SGD(
                parameters, lr=self.cfg.TRAIN.SOLVER.LR, momentum=self.cfg.TRAIN.SOLVER.MOMENTUM,
                weight_decay=self.cfg.TRAIN.SOLVER.WEIGHT_DECAY)
        elif opt_lower == 'adam':
            optimizer = torch.optim.Adam(
                parameters, lr=self.cfg.TRAIN.SOLVER.LR, eps=self.cfg.TRAIN.SOLVER.EPSILON,
                betas=(0.9, 0.999),
                weight_decay=self.cfg.TRAIN.SOLVER.WEIGHT_DECAY)
        elif opt_lower == 'adadelta':
            optimizer = torch.optim.Adadelta(
                parameters, lr=self.cfg.TRAIN.SOLVER.LR, eps=self.cfg.TRAIN.SOLVER.EPSILON,
                weight_decay=self.cfg.TRAIN.SOLVER.WEIGHT_DECAY)
        elif opt_lower == 'rmsprop':
            optimizer = torch.optim.RMSprop(
                parameters, lr=self.cfg.TRAIN.SOLVER.LR, alpha=0.9, eps=self.cfg.TRAIN.SOLVER.EPSILON,
                momentum=self.cfg.TRAIN.SOLVER.MOMENTUM, weight_decay=self.cfg.TRAIN.SOLVER.WEIGHT_DECAY)
        elif opt_lower == 'adamw':
            optimizer = torch.optim.AdamW(parameters, self.cfg.TRAIN.SOLVER.LR)
        else:
            raise ValueError("Expected optimizer method in [sgd, adam, adadelta, rmsprop], but received "
                             "{}".format(opt_lower))

        return optimizer

    def _trainable_param_(self, model):
        trainable_scope = self.cfg.TRAIN.TRAINABLE_SCOPE
        for param in model.parameters():
            param.requires_grad = False

        trainable_param = []
        for module in trainable_scope.split(','):
            if hasattr(model, module):
                for param in getattr(model, module).parameters():
                    param.requires_grad = True
                trainable_param.extend(getattr(model, module).parameters())

        return trainable_param if len(trainable_param) else model.parameters()

    def _get_scheduler_(self):
        optimizer = self.optimizer

        if self.cfg.TRAIN.SOLVER.SCHEDULER == 'step':
            scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                        step_size=self.cfg.TRAIN.LR_SCHEDULER.STEPS[0],
                                                        gamma=self.cfg.TRAIN.LR_SCHEDULER.GAMMA)
        elif self.cfg.TRAIN.SOLVER.SCHEDULER == 'multi_step':
            scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
                                                             milestones=self.cfg.TRAIN.LR_SCHEDULER.STEPS,
                                                             gamma=self.cfg.TRAIN.LR_SCHEDULER.GAMMA)
        elif self.cfg.TRAIN.SOLVER.SCHEDULER == 'exponential':
            scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer,
                                                               gamma=self.cfg.TRAIN.LR_SCHEDULER.GAMMA)
        elif self.cfg.TRAIN.SOLVER.SCHEDULER == 'SGDR':
            scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,
                                                                   T_max=self.cfg.TRAIN.MAX_EPOCHS)
        elif self.cfg.TRAIN.SOLVER.SCHEDULER == 'LambdaLR':
            scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, self.burnin_schedule)
        elif self.cfg.TRAIN.SOLVER.SCHEDULER == 'reducelr':
            scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True)
        else:
            AssertionError('scheduler can not be recognized.')
        return scheduler

    def _load_backbone_weights_(self):
        backbone_name = self.cfg.MODEL.BACKBONE
        if backbone_name in self.model_urls.keys():
            if hasattr(self.model, 'base'):
                self.model.base.load_state_dict(torch.utils.model_zoo.load_url(self.model_urls[backbone_name]))
            elif hasattr(self.model, 'backbone'):
                self.model.backbone.load_init_weights(torch.utils.model_zoo.load_url(self.model_urls[backbone_name],map_location='cpu'))

    def _init_others_(self):
        self.priors = None
        self.detector = None

        if self.cfg.BASE.DATANAME.startswith('ssd_'):
            feature_maps = self._forward_features_size(self.model, self.cfg.BASE.IMAGE_SIZE)

            self.priorbox = PriorBox(image_size=self.cfg.BASE.IMAGE_SIZE, feature_maps=feature_maps,
                                     aspect_ratios=self.cfg.BASE.ASPECT_RATIOS,
                                     scale=self.cfg.BASE.SIZES, archor_stride=self.cfg.BASE.STEPS, clip=self.cfg.BASE.CLIP)

            self.priors = torch.autograd.Variable(self.priorbox.forward())
            self.detector = Detect(self.cfg, self.priors)

    def _forward_features_size(self, model, img_size):
        model.eval()
        x = torch.rand(1, 3, img_size[0], img_size[1])
        if torch.cuda.is_available():
            x = x.cuda()
        with torch.no_grad():
            x = torch.autograd.Variable(x)
        feature_maps = model(x, phase='feature')
        return [(o.size()[2], o.size()[3]) for o in feature_maps]

    def _init_detection_collate_(self, batch):
        if self.phase == 'train':
            return self.train_dataset.collater(batch)
        elif self.phase == 'eval':
            return self.eval_dataset.collater(batch)
        else:
            return self.test_dataset.collater(batch)
        # if self.cfg.BASE.DATANAME.startswith('ssd_') or \
        #         '_asff' in self.cfg.BASE.DATANAME or \
        #         'v4' in self.cfg.BASE.DATANAME:
        #     return self._detection_collate_(batch)
        # elif 'v3' in self.cfg.BASE.DATANAME:
        #     if self.phase == "test":
        #         return self._detection_collate_(batch)
        #     elif self.phase == "train":
        #         return self.train_dataset.collate_fn(batch)
        #     else:
        #         return self.eval_dataset.collate_fn(batch)
        # else:
        #     return torch.utils.data.dataloader.default_collate(batch)

    # def _detection_collate_(self, batch):
    #     targets = []
    #     imgs = []
    #     infos = []
    #     for sample in batch:
    #         imgs.append(sample[0])
    #         if sample[1] is not None:
    #             targets.append(sample[1])
    #         infos.append(sample[2])
    #     return np.stack(imgs, 0), targets, infos

    def burnin_schedule(self, i):
        if i < 1000:
            factor = pow(i / 1000, 4)
        elif i < 400000:
            factor = 1.0
        elif i < 450000:
            factor = 0.1
        else:
            factor = 0.01
        return factor


class IterationBasedBatchSampler(torch.utils.data.sampler.BatchSampler):
    """
    Wraps a BatchSampler, resampling from it until
    a specified number of iterations have been sampled
    """

    def __init__(self, batch_sampler, num_iterations, start_iter=0):
        self.batch_sampler = batch_sampler
        self.num_iterations = num_iterations
        self.start_iter = start_iter

    def __iter__(self):
        iteration = self.start_iter
        while iteration <= self.num_iterations:
            # if the underlying sampler has a set_epoch method, like
            # DistributedSampler, used for making each process see
            # a different split of the dataset, then set it
            if hasattr(self.batch_sampler.sampler, "set_epoch"):
                self.batch_sampler.sampler.set_epoch(iteration)
            for batch in self.batch_sampler:
                iteration += 1
                if iteration > self.num_iterations:
                    break
                yield batch

    def __len__(self):
        return self.num_iterations


class Detect(torch.autograd.Function):
    """At test time, Detect is the final layer of SSD.  Decode location preds,
    apply non-maximum suppression to location predictions based on conf
    scores and threshold to a top_k number of output predictions for both
    confidence score and locations.
    """

    def __init__(self, cfg, priors):
        self.num_classes = cfg.BASE.NUM_CLASSES
        self.background_label = cfg.BASE.BACKGROUND_LABEL
        self.conf_thresh = cfg.BASE.CONF_THRESHOLD
        self.nms_thresh = cfg.BASE.IOU_THRESHOLD
        self.top_k = cfg.BASE.MAX_DETECTIONS
        self.variance = cfg.BASE.VARIANCE
        self.cfg = cfg
        self.priors = priors

    def forward(self, predictions):
        """
        Args:
            loc_data: (tensor) Loc preds from loc layers
                Shape: [batch,num_priors*4]
            conf_data: (tensor) Shape: Conf preds from conf layers
                Shape: [batch*num_priors,num_classes]
            prior_data: (tensor) Prior boxes and variances from priorbox layers
                Shape: [1,num_priors,4]
        """
        loc, conf = predictions

        loc_data = loc.data
        conf_data = conf.data
        prior_data = self.priors.data

        num = loc_data.size(0)  # batch size
        num_priors = prior_data.size(0)
        # self.output.zero_()
        if num == 1:
            # size batch x num_classes x num_priors
            conf_preds = conf_data.transpose(0, 1).squeeze().contiguous().unsqueeze(0)
        else:
            conf_preds = conf_data.view(num, num_priors,
                                        self.num_classes).transpose(2, 1)
            # self.output.expand_(num, self.num_classes, self.top_k, 5)
        output = torch.zeros(num, self.num_classes, self.top_k, 5)

        # Decode predictions into bboxes.
        for i in range(num):
            decoded_boxes = decode(loc_data[i], prior_data, self.variance)
            # For each class, perform nms
            conf_scores = conf_preds[i].clone()
            num_det = 0
            for cl in range(1, self.num_classes):
                c_mask = conf_scores[cl].gt(self.conf_thresh).nonzero(as_tuple=False).view(-1)
                if c_mask.dim() == 0:
                    continue
                scores = conf_scores[cl][c_mask]
                if scores.size(0) == 0:
                    continue

                boxes = decoded_boxes[c_mask, :]
                if self.cfg.MODEL.NMS == 'diounms':
                    ids, count = diounms(boxes, scores, self.nms_thresh, self.top_k)
                elif self.cfg.MODEL.NMS == 'nms':
                    ids, count = nms(boxes, scores, self.nms_thresh, self.top_k)
                elif self.cfg.MODEL.NMS == 'soft_nms':
                    pass

                output[i, cl, :count] = \
                    torch.cat((scores[ids[:count]].unsqueeze(1),
                               boxes[ids[:count]]), 1)
        return output


class PriorBox(object):
    """Compute priorbox coordinates in center-offset form for each source
    feature map.
    """

    '''
    一般来说，先验框Prior Box有8732个，38*38*4+19*19*6+10*10*6+5*5*6+3*3*4+1*1*4，但是本例中一般为11620个
    38*38*6+19*19*6+10*10*6+5*5*6+3*3*4+1*1*4
    '''

    def __init__(self, image_size, feature_maps, aspect_ratios, scale, archor_stride=None, archor_offest=None,
                 clip=True):
        super(PriorBox, self).__init__()
        self.image_size = image_size  # 图片size
        self.feature_maps = feature_maps  # 特征图各层的size
        # aspect_ratios : [[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2], [1, 2]]
        self.aspect_ratios = aspect_ratios  # 先验框的比率
        # number of priors for feature map location (either 4 or 6)
        self.num_priors = len(aspect_ratios)  #
        self.clip = clip
        # scale value 每张特征图的最大尺寸和最小尺寸

        # scale : [[30, 30], [60, 60], [111, 111], [162, 162], [213, 213], [264, 264], [315, 315]]
        if isinstance(scale[0], list):  # scale = size
            self.scales = [min(s[0] / self.image_size[0], s[1] / self.image_size[1]) for s in scale]
        elif isinstance(scale[0], float) and len(scale) == 2:
            num_layers = len(feature_maps)
            min_scale, max_scale = scale
            self.scales = [min_scale + (max_scale - min_scale) * i / (num_layers - 1) for i in range(num_layers)] + [
                1.0]

        # 感受野大小，即相对于原图的缩小倍数
        # archor_stride = [[8, 8], [16, 16], [32, 32], [64, 64], [100, 100], [300, 300]]
        self.steps = [(self.image_size[0] / steps[0], self.image_size[0] / steps[1]) for steps in archor_stride]

    # def forward(self):
    #     mean = []
    #     for k, f in enumerate(self.feature_maps):  # 存放的是feature map的尺寸：38 19 10 5 3 1
    #         for i, j in itertools.product(range(f[0]), range(f[1])):  # 对于一个feature map上的所有的位置
    #             f_k = self.steps[k]  # steps=[8,16,32,64,100,300] 得到feature map的尺寸
    #             # unit center x,y
    #             cx = (j + 0.5) / f_k[1]
    #             cy = (i + 0.5) / f_k[0]
    #             # 这里一定要特别注意 i,j 和cx, cy的对应关系, 因为cy对应的是行, 所以应该零cy与i对应.
    #
    #             # aspect_ratio: 1
    #             # rel size: min_size
    #             s_k = self.scales[k]
    #             mean += [cx, cy, s_k, s_k]
    #
    #             # aspect_ratio: 1
    #             # 根据原文, 当 aspect_ratios 为1时, 会有一个额外的 box, 如下:
    #             # rel size: sqrt(s_k * s_(k+1))
    #             s_k_prime = math.sqrt(s_k * self.scales[k+1])
    #             mean += [cx, cy, s_k_prime, s_k_prime]
    #
    #             # rest of aspect ratios
    #             for ar in self.aspect_ratios[k]:
    #                 mean += [cx, cy, s_k * math.sqrt(ar), s_k / math.sqrt(ar)]
    #                 mean += [cx, cy, s_k / math.sqrt(ar), s_k * math.sqrt(ar)]
    #     output = torch.Tensor(mean).view(-1, 4)
    #     if self.clip:
    #         output.clamp_(max=1, min=0)
    #     return output

    def forward(self):
        mean = []
        # l = 0
        for k, f in enumerate(self.feature_maps):
            for i, j in itertools.product(range(f[0]), range(f[1])):
                '''
                cx, cy为当前feature map的当前像素的中心坐标
                '''
                cx = (j + 0.5) / self.steps[k][1]
                cy = (i + 0.5) / self.steps[k][0]
                s_k = self.scales[k]

                for ar in self.aspect_ratios[k]:
                    if isinstance(ar, int):
                        if ar == 1:
                            # aspect_ratio: 1 Min size
                            mean += [cx, cy, s_k, s_k]

                            # aspect_ratio: 1 Max size
                            s_k_prime = math.sqrt(s_k * self.scales[k + 1])
                            mean += [cx, cy, s_k_prime, s_k_prime]
                        else:
                            ar_sqrt = math.sqrt(ar)
                            mean += [cx, cy, s_k * ar_sqrt, s_k / ar_sqrt]
                            mean += [cx, cy, s_k / ar_sqrt, s_k * ar_sqrt]
                    elif isinstance(ar, list):
                        mean += [cx, cy, s_k * ar[0], s_k * ar[1]]

        output = torch.Tensor(mean).view(-1, 4)
        if self.clip:
            output.clamp_(max=1, min=0)
        return output
