import os
import cv2
import math
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import layers

#! 导入自定义模块
from utils import get_now
from data import PascalVOCDatasetV1 as PascalVOCDataset
from data_utils import normalize #! 方便预测
from model_utils import VGG16Base_feats, SSD300AuxConv, SSD300PredConv, SSD512AuxConv, SSD512PredConv
from metric_utils import gcxcywh_to_cxcywh, cxcywh_to_xyxy, find_jaccard_overlap, coco_eval, AverageMeter


class Rescale(layers.Layer):
    def __init__(self, *args, **kwargs):
        super(Rescale,self).__init__(*args, **kwargs)

    def build(self,input_shape):
        last_dim = input_shape[-1]
        # w_init = keras.initializers.random_uniform(minval=10,maxval=20)
        w_init = keras.initializers.constant(20.0) #! 为什么是20?
        self.w = tf.Variable(w_init(shape=[1, 1, 1, last_dim]), trainable=True)

    def call(self,inputs):
        return self.w * inputs

class SSD:
    def __init__(self, n_classes, input_shape, model_path=None, weights_path=None):
        self.n_classes   = n_classes
        self.input_shape = input_shape

        if model_path is not None: 
            self.load_model(model_path)
        else:
            self.model = self.create_model()
            if weights_path is not None:
                self.load_weights(weights_path)
            else:
                self.load_pretrained()
        
        self.priors_cxcy = self.create_prior_boxes() #! 先验框坐标格式为`cx,cy,w,h`

    def save_model(self, model_path):
        self.model.save(model_path)

    def load_model(self, model_path):
        BaseModel = keras.models.load_model(model_path, custom_objects={"Rescale": Rescale})
        locs, classes_scores = BaseModel.output
        _, prior_num, n_classes = classes_scores.shape
        if n_classes == self.n_classes: #! 类别数量相同, 假设是相同类别, 直接加载
            self.model = keras.models.load_model(model_path, custom_objects={"Rescale": Rescale})
        else: #! 类别数量不同, 假设不是相同类别, 加载模型参数
            self.model = self.create_model()
            for layer in BaseModel.layers:
                if "cl" in layer.name: continue
                if "math" in layer.name or "nn" in layer.name: continue
                self.model.get_layer(name=layer.name).set_weights(layer.get_weights())
                self.model.get_layer(name=layer.name).trainable = layer.trainable
        del BaseModel #! 删除不使用模型

    def load_weights(self, weights_path):
        self.model.load_weights(weights_path)

    def load_pretrained(self):
        VGG16NoTopModel = keras.applications.VGG16(include_top=False, weights='imagenet')
        for layer in VGG16NoTopModel.layers:
            if "input" in layer.name: continue #! 输入层名称不同, 需要跳过
            self.model.get_layer(name=layer.name).set_weights(layer.get_weights())
            self.model.get_layer(name=layer.name).trainable = layer.trainable
        del VGG16NoTopModel #! 删除不使用模型

    def layer_freeze(self, freeze):
        #! 确定模型类型
        if isinstance(self, SSD300):
            all_layers = {1: 'block1', 2: 'block2', 3: 'block3', 4: 'block4', 5: 'block5',            #! 特征提取层
                            6: 'conv6', 7: 'conv7', 8: 'conv8', 9: 'conv9', 10: 'conv10', 11: 'conv11', #! 辅助卷积层
                            12: ['c_conv', 'l_conv']}                                                   #! 预测层
        elif isinstance(self, SSD512):
            all_layers = {1: 'block1', 2: 'block2', 3: 'block3', 4: 'block4', 5: 'block5', #! 特征提取层
                            6: 'conv6', 7: 'conv7', 8: 'conv8', 9: 'conv9',                  #! 辅助卷积层
                            10: 'conv10', 11: 'conv11', 12: 'conv12',                        #! 辅助卷积层
                            13: ['c_conv', 'l_conv']}                                        #! 预测层
        else:
            return          

        #! 模型冻结
        if freeze is not None: #! 准备冻结某些层, 否则按照加载模型的参数是否可训练进行训练

            if isinstance(freeze, int): #! freeze是整数, 按模块进行冻结

                max_node = max(all_layers.keys())

                if freeze < max_node: #! 冻结对应层(从第1层到第freeze层)
                    freeze_layers = [all_layers[l] for l in range(1, freeze+1)]
                    for layer in self.model.layers:
                        for fl in freeze_layers:
                            if all_layers[max_node][0] in layer.name or all_layers[max_node][1] in layer.name: #! 预测层
                                pass
                            elif fl in layer.name: #! 其他层
                                self.model.get_layer(name=layer.name).trainable = False
                elif freeze == max_node: #! 理论上freeze不会等于12/13
                    for layer in self.model.layers:
                        self.model.get_layer(name=layer.name).trainable = False
                else: #! freeze大于12/13则意味着不冻结
                    for layer in self.model.layers:
                        self.model.get_layer(name=layer.name).trainable = True                    

            else: #! freeze不是整数, 意味着按照加载的模型, 不做任何处理
                for layer in self.model.layers:
                    self.model.get_layer(name=layer.name).trainable = True        

    def detect_objects(self, predicted_locs, predicted_scores, min_score, max_overlap, top_k):
        #! 对每一类物品执行: 1. 最小分数过滤; 2. 极大值抑制
        #! predicted_locs:   (N,8732|24564,4)         logits 格式为`gcx,gcy,gw,gh`
        #! predicted_scores: (N,8732|24564,n_classes) logits 
        #! max_overlap: 最大重叠比, 超过该比例, 分数较小的框将被抑制
        #! top_k: 如果有大量的框被保留, 只会返回分数top_k的框
        #! 返回的boxes是相对坐标, 值在[0,1]之间

        batch_size = predicted_locs.shape[0]
        n_priors = self.priors_cxcy.shape[0]
        predicted_scores = tf.nn.softmax(predicted_scores,axis=2) #! (N,8732|24564,n_classes) 注意与模型结构匹配
        
        all_images_bboxes = list()
        all_images_labels = list()
        all_images_scores = list()

        assert n_priors == predicted_locs.shape[1] == predicted_scores.shape[1]

        decoded_locs = cxcywh_to_xyxy(gcxcywh_to_cxcywh(predicted_locs, self.priors_cxcy)) #! (N,8732|24564,4)

        for i in range(batch_size): #! 逐张图处理
            
            #! 将预测locs从`gcx,gcy,gw,gh`转换到`x1,y1,x2,y2`坐标
            # decoded_locs = cxcywh_to_xyxy(gcxcywh_to_cxcywh(predicted_locs[i], self.priors_cxcy)) #! (8732|24564,4)

            image_bboxes = list()
            image_labels = list()
            image_scores = list()

            max_scores = tf.reduce_max(predicted_scores[i],axis=-1)
            best_label = tf.argmax(predicted_scores[i],axis=-1)

            for c in range(1, self.n_classes): #! 默认0类别为背景
                class_scores = predicted_scores[i][:, c] #! (8732|24564)
                score_above_min_score = class_scores > min_score
                n_above_min_score = tf.reduce_sum(tf.cast(score_above_min_score,tf.int64)) #! scalar
                if n_above_min_score == 0: continue

                indices = tf.where(score_above_min_score)[:,0] #! 2维(No,1) -> 1维(No)
                class_scores = tf.gather(class_scores, indices)       #! (No)
                # class_decoded_locs = tf.gather(decoded_locs, indices) #! (No, 4)
                class_decoded_locs = tf.gather(decoded_locs[i], indices) #! (No, 4)

                sort_ind = tf.argsort(class_scores, direction="DESCENDING")  #! 1维
                class_scores = tf.gather(class_scores, sort_ind)             #! (No)
                class_decoded_locs = tf.gather(class_decoded_locs, sort_ind) #! (No,4)

                overlap = find_jaccard_overlap(class_decoded_locs, class_decoded_locs) #! (N,N)

                suppress = tf.zeros([n_above_min_score],dtype=tf.int64)

                for box in range(class_decoded_locs.shape[0]):
                    if suppress[box] == 1:
                        continue
                    
                    suppress = tf.maximum(suppress, tf.cast(overlap[box] > max_overlap,suppress.dtype))
                    suppress = tf.tensor_scatter_nd_update(suppress,[[box]],tf.zeros(shape=(1,),dtype=suppress.dtype))
                
                indices = tf.where(tf.cast(1 - suppress,tf.bool))[:,0] #! 2维(No,1) -> 1维(No)
                image_bboxes.append(tf.gather(class_decoded_locs,indices))
                image_labels.append(tf.gather(1-suppress,indices) * c)
                image_scores.append(tf.gather(class_scores,indices))
            
            if len(image_bboxes) == 0:
                image_bboxes.append([0.,0.,1.,1.])
                image_labels.append([0])
                image_scores.append([0.])

            image_bboxes = tf.concat(image_bboxes,axis=0) #! 可能image_bboxes是一维
            image_labels = tf.concat(image_labels,axis=0) #!     image_labels是一维
            image_scores = tf.concat(image_scores,axis=0) #!     image_scores是一维
            if len(image_bboxes.shape) == 1: image_bboxes = image_bboxes[None] #! 升维
            n_objects = image_scores.shape[0]

            if n_objects > top_k:
                sort_ind = tf.argsort(image_scores, direction="DESCENDING") #! 1维
                image_bboxes = tf.gather(image_bboxes,sort_ind)[:top_k]
                image_labels = tf.gather(image_labels,sort_ind)[:top_k]
                image_scores = tf.gather(image_scores,sort_ind)[:top_k]
            
            all_images_bboxes.append(image_bboxes.numpy())
            all_images_labels.append(image_labels.numpy())
            all_images_scores.append(image_scores.numpy())
        
        return all_images_bboxes, all_images_labels, all_images_scores

    def train(self, train_loader, optimizer, criterion, epochs, freeze=None, grad_clip=True, last_tune=0.1, save_epochs=100, valid_loader=None, eval_epochs=100):
        
        self.layer_freeze(freeze) #! 模型冻结

        # self.model.summary() #! 查看模型情况

        self.model.compile(optimizer=optimizer) #! 优化器编译

        #! ===以下模型训练===
        #! 日志
        ymdHMS = get_now()
        FILEPATH = os.path.abspath(__file__)
        ROOTDIR  = os.path.dirname(FILEPATH)
        LOG_DIR  = os.path.join(ROOTDIR,"logs"  ,ymdHMS)
        MODELDIR = os.path.join(ROOTDIR,"models",ymdHMS)
        summary_writer = tf.summary.create_file_writer(LOG_DIR)

        #! 训练轮次
        start_iteration = int(self.model.optimizer.iterations.numpy())
        start_epoch = start_iteration // len(train_loader)
        digit_epoch = str(len(str(epochs)))
        format_epoch = "epoch [{:0" + digit_epoch + "}][{}]"

        digit_step = str(len(str(len(train_loader))))
        format_step = "step [{:0" + digit_step + "}][{}]"

        #! BatchSize
        BS = train_loader.batch_size

        #! 开始评估的轮次
        EVAL_START_EPOCH = 100

        #! 最后last_tune的轮次(默认10%)进行微调, 最少不低于10轮
        TUNE_START_EPOCH = epochs - max(10, int(last_tune * epochs / 10) * 10)

        #! 损失记录, 以及平滑损失
        loss_item      = AverageMeter()
        loc_loss_item  = AverageMeter()
        conf_loss_item = AverageMeter()

        for epoch in range(start_epoch, epochs):

            if (epoch+1) >= TUNE_START_EPOCH: #! 微调
                if isinstance(self, SSD300): self.layer_freeze(11)
                elif isinstance(self, SSD512): self.layer_freeze(12)
            
            #! Ground Truth
            gt_categories_ = list()
            gt_annotations = list()
            gt_images = list()
            #! detections
            dt_annotations = list()
            #! box_id
            box_id = 0

            #! 单步训练, 损失和反向传导
            for step, (images, boxes, labels, _) in enumerate(train_loader):

                string_epoch = format_epoch.format(epoch+1, epochs)
                string_step = format_step.format(step+1, len(train_loader))

                data = images, boxes, labels

                if (epoch+1) > (EVAL_START_EPOCH-1) and valid_loader is None and (epoch+1) % eval_epochs == 0: #! 通过if与后文匹配(逻辑上的匹配)
                    batch_images_bboxes, batch_images_labels, batch_images_scores = \
                        self.train_step(data, criterion, grad_clip, summary_writer, \
                                        string_epoch, string_step, [loss_item, loc_loss_item, conf_loss_item], \
                                        return_objects=True)
                    
                    N = images.shape[0] #! 该批有多少图片

                    #! Ground Truth
                    boxes_xywh = [np.concatenate([box[:, :2], box[:, 2:] - box[:, :2]], axis=-1) for box in boxes] #! 转化为xywh格式
                    areas = [(box[:, 3] - box[:, 1]) * (box[:, 3] - box[:, 1]) for box in boxes] #! 面积
                    batch_images = [{'id': step * BS + id} for id in range(N)]
                    batch_gt_categories_ = [{'id': label} for id in range(N) for label in labels[id]] #! 添加的时候需要去重
                    batch_gt_annotations = [{'id': box_id + x, 'image_id': step * BS + id, 'category_id': label, \
                                             'iscrowd': 0, 'bbox': box.tolist(), 'area': area} \
                                             for id in range(N) for x,(box,label,area) in enumerate(zip(boxes_xywh[id], labels[id], areas[id]))]
                    box_id += len(batch_gt_annotations)

                    gt_images.extend(batch_images)
                    [gt_categories_.append(category) for category in batch_gt_categories_ if category not in gt_categories_]
                    gt_annotations.extend(batch_gt_annotations)

                    #! detections
                    batch_images_bboxes_wywh = [np.concatenate([BIB[:, :2], BIB[:, 2:] - BIB[:, :2]], axis=-1) for BIB in batch_images_bboxes]
                    batch_dt_annotations = [{"image_id": step * BS + id, "bbox": box.tolist(), "category_id": label, "score": score} \
                                            for id in range(N) for box,label,score in zip(batch_images_bboxes_wywh[id], batch_images_labels[id], batch_images_scores[id])]
                    
                    dt_annotations.extend(batch_dt_annotations)
                    
                else:
                    self.train_step(data, criterion, grad_clip, summary_writer, \
                        string_epoch, string_step, [loss_item, loc_loss_item, conf_loss_item])

            #! 模型保存
            if (epoch+1) % save_epochs == 0:
                self.save_model(os.path.join(MODELDIR,"%s_%d.h5"%(self.model.name, epoch+1)))
            self.save_model(os.path.join(MODELDIR,"%s_last.h5"%self.model.name))

            #! 模型评估 - 在训练若干轮(暂定50轮)之后, 若没有验证集, 每eval_epochs在训练集上进行评估
            if (epoch+1) > (EVAL_START_EPOCH-1) and valid_loader is None and (epoch+1) % eval_epochs == 0:
                gt_dataset = {'images': gt_images, 'annotations': gt_annotations, 'categories': gt_categories_}
                coco_eval(gt_dataset, dt_annotations)

            #! 模型评估 - 在训练若干轮(暂定50轮)之后, 若有验证集, 每eval_epochs在验证集上进行评估
            if (epoch+1) > (EVAL_START_EPOCH-1) and valid_loader is not None and (epoch+1) % eval_epochs == 0:
                self.evaluate(valid_loader)

            #! 损失重置
            loss_item.reset()
            loc_loss_item.reset()
            conf_loss_item.reset()

        #! 如果不存在验证集, 训练完成后在训练集上的模型评估(不考虑最后一轮是否整eval_epoch轮次, 主打原图验证)
        if valid_loader is None:
            self.evaluate(train_loader)

        #! 如果存在验证集, 训练完成后在验证集上的模型评估(且最后一轮不是整eval_epochs轮次)
        if valid_loader is not None and epochs % eval_epochs != 0:
            self.evaluate(valid_loader)

    def train_step(self, data, criterion, grad_clip, summary_writer, epoch, step, loss_item_list, return_objects=False):

        images, boxes, labels = data

        loss_item, loc_loss_item, conf_loss_item = loss_item_list

        with tf.GradientTape() as tap:
            
            #! 图像预测
            predicted_locs, predicted_scores = self.model(images)

            #! 损失计算
            conf_loss, loc_loss = criterion(predicted_locs, predicted_scores, boxes, labels) #! 两个标量
            loss = conf_loss + criterion.alpha * loc_loss

            #! 损失记录
            bs = images.shape[0]
            loss_item.update(loss.numpy(), n=bs)
            loc_loss_item.update(loc_loss.numpy() * criterion.alpha, n=bs)
            conf_loss_item.update(conf_loss.numpy(), n=bs)

            #! 梯度计算
            trainable_vars = self.model.trainable_variables
            gradients = tap.gradient(loss, trainable_vars)

            #! 梯度剪裁及应用
            if grad_clip is True: #! 梯度剪裁
                
                gradients, _ = tf.clip_by_global_norm(gradients, 0.5)
                capped_gradients = [(tf.clip_by_value(gradient,-0.5,0.5),variable) \
                    for gradient,variable in zip(gradients, self.model.trainable_variables) if gradient is not None]
                
                #! 梯度应用
                self.model.optimizer.apply_gradients(capped_gradients)
            
            else: #! 直接梯度应用(不经过剪裁)

                self.model.optimizer.apply_gradients(zip(gradients, trainable_vars))

        #! 学习率
        lr = self.model.optimizer.lr(self.model.optimizer.iterations) \
            if isinstance(self.model.optimizer.lr, keras.optimizers.schedules.LearningRateSchedule) \
            else self.model.optimizer.lr
        
        #! 记录信息
        with summary_writer.as_default():
            tf.summary.scalar('conf_loss',conf_loss_item.avg,step=self.model.optimizer.iterations)
            tf.summary.scalar('loc_loss',loc_loss_item.avg,step=self.model.optimizer.iterations)
            tf.summary.scalar('loss',loss_item.avg,step=self.model.optimizer.iterations)
            tf.summary.scalar('lr',float(lr),step=self.model.optimizer.iterations)

        #! 打印信息
        tf.print("%s %s loss: %.4f conf_loss: %.4f loc_loss: %.4f"%(epoch, step, \
                loss_item.avg, conf_loss_item.avg, loc_loss_item.avg))
        
        #! 按需返回预测信息
        if return_objects: return self.detect_objects(predicted_locs, predicted_scores, 0.05, 0.60, 1000)
    
    def fit(self, train_loader, optimizer, criterion, epochs, freeze=None, grad_clip=True, save_epochs=100, valid_loader=None, eval_epochs=100):
        self.train(train_loader, optimizer, criterion, epochs, freeze=freeze, grad_clip=grad_clip, save_epochs=save_epochs, valid_loader=valid_loader, eval_epochs=eval_epochs)

    def predict(self, min_score, max_overlap, top_k, data_loader=None, data_dir=None, image_path_list=None):
        #! 需要返回的变量
        all_image_paths  = list() #! 可选
        all_image_bboxes = list()
        all_image_labels = list()
        all_image_scores = list()
        #! 需要使用的变量
        h, w, c = self.input_shape

        if data_loader is not None and data_dir is None and image_path_list is None:

            if issubclass(type(data_loader), PascalVOCDataset):
                shuffle = data_loader.shuffle
                data_loader.shuffle = False #! 由于是预测, 所以先将loader的shuffle设置为False, 
                
                for i, (images, batch_image_bboxes, batch_image_labels, _) in enumerate(data_loader):
                    #! 预测结果
                    predicted_locs, predicted_scores = self.model(images)
                    batch_image_bboxes, batch_image_labels, batch_image_scores =\
                        self.detect_objects(predicted_locs, predicted_scores, min_score, max_overlap, top_k)
                    all_image_bboxes.extend(batch_image_bboxes)
                    all_image_labels.extend(batch_image_labels)
                    all_image_scores.extend(batch_image_scores)
                    all_image_paths.extend(data_loader.images[i * data_loader.batch_size: (i+1) * data_loader.batch_size])
                
                data_loader.shuffle = shuffle #! 预测完成后将shuffle改回
                            
        elif data_loader is None and (data_dir is not None or image_path_list is not None):

            if data_dir is not None and image_path_list is None:
                #! 整理图片路径列表
                image_path_list = [os.path.join(data_dir, image_path) for image_path in os.listdir(data_dir)]
            elif data_dir is None and image_path_list is not None:
                pass
            else:
                image_path_list = []
            
            BS = 16 #! batchsize
            for j in range(0,len(image_path_list),BS): #! 从0开始到图片列表的长度, 步长为BS
                
                #! 按顺序设置一批图片
                batch_image_list = image_path_list[j: j + BS]
        
                #! 批量读取图片
                images = []
                for ip in batch_image_list:
                    image = cv2.imread(ip)
                    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                    image = cv2.resize(image, (w, h))
                    image = image / 255.
                    image = normalize(image, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
                    images.append(image)
                images = np.stack(images)

                #! 进行预测
                predicted_locs, predicted_scores = self.model(images)
                batch_image_bboxes, batch_image_labels, batch_image_scores =\
                    self.detect_objects(predicted_locs, predicted_scores, min_score, max_overlap, top_k)
                all_image_bboxes.extend(batch_image_bboxes)
                all_image_labels.extend(batch_image_labels)
                all_image_scores.extend(batch_image_scores)
                all_image_paths.extend(batch_image_list)

        return all_image_paths, all_image_bboxes, all_image_labels, all_image_scores

    def evaluate(self, data_loader, rev_label_map=None):
        shuffle = data_loader.shuffle
        transform = data_loader.transform
        data_loader.shuffle = False    #! 不变改顺序
        data_loader.transform = False  #! 不进行图像增强

        #! 预测结果
        tf.print(get_now(), "预测结果")
        dt_image_paths, dt_image_bboxes, dt_image_labels, dt_image_scores = \
            self.predict(min_score=0.05, max_overlap=0.6, top_k=1000, data_loader=data_loader)
        
        #! ground truth
        tf.print(get_now(), "处理真值")
        gt_image_paths = list()
        gt_image_bboxes = list()
        gt_image_labels = list()
        for i, (_, batch_image_boxes, batch_image_labels, _) in enumerate(data_loader):
            gt_image_bboxes.extend(batch_image_boxes)
            gt_image_labels.extend(batch_image_labels)
            gt_image_paths.extend(data_loader.images[i * data_loader.batch_size: (i+1) * data_loader.batch_size])
        
        data_loader.shuffle = shuffle     #! 还原shuffle
        data_loader.transform = transform #! 还原transform

        #! 真值和预测值整理
        tf.print(get_now(), "整理数据")
        gt_dataset = dict()
        # dt_dataset = dict()
        images     = []
        gt_annotations = []
        gt_categories_ = []
        dt_annotations = []
        # dt_categories_ = []
        bid = 0 #! short for bounding box id

        for image_id, (dt_image_path, dt_image_bbox, dt_image_label, dt_image_score, gt_image_path, gt_image_bbox, gt_image_label) in \
            enumerate(zip(dt_image_paths, dt_image_bboxes, dt_image_labels, dt_image_scores, gt_image_paths, gt_image_bboxes, gt_image_labels)):
            
            assert dt_image_path == gt_image_path

            #! 不需要读取图片, 只需要图片路径 image_id
            #! {"file_name": gt_image_path, "height": h, "width": w, "id": image_id}
            image_dict = {"file_name": gt_image_path, "id": image_id}
            images.append(image_dict)
            
            #! bbox浮点数也可以计算 category可以不需要name
            gt_image_bbox_xywh = np.concatenate([gt_image_bbox[:, :2], gt_image_bbox[:, 2:] - gt_image_bbox[:, :2]], axis=-1)
            gt_image_area = (gt_image_bbox[:,3] - gt_image_bbox[:,1]) * (gt_image_bbox[:,2] - gt_image_bbox[:,0])
            for j, (bbox, label, area) in enumerate(zip(gt_image_bbox_xywh, gt_image_label, gt_image_area)):
                
                #! #! {"image_id": image_id, "area": area, "iscrowd": 0, "bbox": bbox.tolist(), "category_id": label, "id": bid}
                annotation_dict = {"image_id": image_id, "area": area, "iscrowd": 0, "bbox": bbox.tolist(), "category_id": label, "id": bid}
                gt_annotations.append(annotation_dict)

                #! {"id": label, "name": label if not rev_label_map else rev_label_map[label]}
                category_dict = {"id": label, "name": label if not rev_label_map else rev_label_map[label]}
                if category_dict not in gt_categories_: gt_categories_.append(category_dict)

                bid += 1

            #! bbox浮点数也可以计算 只需要annotation
            dt_image_bbox_xywh = np.concatenate([dt_image_bbox[:, :2], dt_image_bbox[:, 2:] - dt_image_bbox[:, :2]], axis=-1)
            for k, (bbox, label, score) in enumerate(zip(dt_image_bbox_xywh, dt_image_label, dt_image_score)):

                annotation_dict = {"image_id": image_id, "bbox": bbox.tolist(), "category_id": label, "score": score}
                dt_annotations.append(annotation_dict)

                # category_dict = {"id": label, "name": labeboxl if not rev_label_map else rev_label_map[label]}
                # if category_dict not in dt_categories_: dt_categories_.append(category_dict)

        gt_dataset["images"] = images
        gt_dataset["annotations"] = gt_annotations
        gt_dataset["categories" ] = gt_categories_

        #! 评估计算
        tf.print(get_now(), "评估计算")
        coco_eval(gt_dataset, dt_annotations)

class SSD300(SSD):

    def __init__(self, n_classes, model_path=None, weights_path=None):
        super(SSD300, self).__init__(n_classes, (300,300,3), model_path=model_path, weights_path=weights_path)

    def create_model(self):
        inputs = keras.Input(self.input_shape, name="input")
        conv4_3_feats, conv7_feats = VGG16Base_feats(inputs)

        norm = tf.pow(conv4_3_feats,2)                    #! (N,38,38,512)
        norm = tf.reduce_sum(norm,axis=-1,keepdims=True)  #! (N,38,38,  1)
        norm = tf.sqrt(norm)                              #! (N,38,38,  1)
        conv4_3_feats = conv4_3_feats / norm              #! (N,38,38,512) 自动广播 归一化
        rescale_factors = Rescale(name="rescale_factors") #! 参数为(1,1,1,512)
        conv4_3_feats = rescale_factors(conv4_3_feats)    #! (N,38,38,512)

        conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats = SSD300AuxConv(conv7_feats)
        locs, classes_scores = SSD300PredConv(self.n_classes,
                                        conv4_3_feats, 
                                        conv7_feats,
                                        conv8_2_feats, 
                                        conv9_2_feats, 
                                        conv10_2_feats, 
                                        conv11_2_feats)
        return keras.Model(inputs=inputs,outputs=[locs,classes_scores],name="SSD300")
    
    def create_prior_boxes(self):
        #! 创建先验框, 6个特征图共8732个, 返回格式为`c_x,c_y,w,h`的先验框信息
        fmap_dims = {
            'conv4_3' : 38, 
            'conv7'   : 19, 
            'conv8_2' : 10, 
            'conv9_2' :  5, 
            'conv10_2':  3, 
            'conv11_2':  1,
        }
        obj_scales = {
            'conv4_3' : 0.1  , 
            'conv7'   : 0.2  , 
            'conv8_2' : 0.375, 
            'conv9_2' : 0.55 , 
            'conv10_2': 0.725, 
            'conv11_2': 0.9  ,
        }
        aspect_ratios = {
            'conv4_3' : [1. , 2. ,      0.5       ],
            'conv7'   : [1. , 2. , 3. , 0.5, 0.333],
            'conv8_2' : [1. , 2. , 3. , 0.5, 0.333],
            'conv9_2' : [1. , 2. , 3. , 0.5, 0.333],
            'conv10_2': [1. , 2. ,      0.5       ],
            'conv11_2': [1. , 2. ,      0.5       ],
        }

        fmaps = list(fmap_dims.keys()) #! 特征图名称

        prior_boxes = []
        
        for k, fmap in enumerate(fmaps):
            for i in range(fmap_dims[fmap]):
                for j in range(fmap_dims[fmap]):
                    cx = (j + 0.5) / fmap_dims[fmap]
                    cy = (i + 0.5) / fmap_dims[fmap]

                    for ratio in aspect_ratios[fmap]:
                        prior_boxes.append([cx, cy, obj_scales[fmap] * math.sqrt(ratio), obj_scales[fmap] / math.sqrt(ratio)])

                        #! 对于宽高比为1, 额外使用一个先验框, 比例是当前特征图和下一特征图宽高比的几何平均数
                        #! 各特征图下先验宽高比数量为[4,6,6,6,4], 与PredConv中的n_boxes一致
                        if ratio == 1:
                            try:
                                addtitional_ratio = math.sqrt(obj_scales[fmap] * obj_scales[fmaps[k+1]])
                            except IndexError:
                                addtitional_ratio = 1.
                            prior_boxes.append([cx, cy, addtitional_ratio, addtitional_ratio])

        prior_boxes = tf.convert_to_tensor(prior_boxes) #! (8732,4)
        prior_boxes = tf.clip_by_value(prior_boxes,0,1) #! 先验框截断, 保证在[0,1]之间 实际上没作用

        return prior_boxes

class SSD512(SSD):

    def __init__(self, n_classes, model_path=None, weights_path=None):
        super(SSD512, self).__init__(n_classes, (512,512,3), model_path=model_path, weights_path=weights_path)

    def create_model(self):
        inputs = keras.Input(self.input_shape, name="input")
        conv4_3_feats, conv7_feats = VGG16Base_feats(inputs)

        norm = tf.pow(conv4_3_feats,2)                    #! (N,64,64,512)
        norm = tf.reduce_sum(norm,axis=-1,keepdims=True)  #! (N,64,64,  1)
        norm = tf.sqrt(norm)                              #! (N,64,64,  1)
        conv4_3_feats = conv4_3_feats / norm              #! (N,64,64,512) 自动广播 归一化
        rescale_factors = Rescale(name="rescale_factors") #! 参数为(1,1,1,512)
        conv4_3_feats = rescale_factors(conv4_3_feats)    #! (N,64,64,512)

        conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats, conv12_2_feats = SSD512AuxConv(conv7_feats)
        locs, classes_scores = SSD512PredConv(self.n_classes,
                                        conv4_3_feats, 
                                        conv7_feats,
                                        conv8_2_feats, 
                                        conv9_2_feats, 
                                        conv10_2_feats, 
                                        conv11_2_feats,
                                        conv12_2_feats)
        return keras.Model(inputs=inputs,outputs=[locs,classes_scores],name="SSD512")
    
    def create_prior_boxes(self):
        #! 创建先验框, 7 个特征图共 24564 个, 返回格式为`c_x,c_y,w,h`的先验框信息
        fmap_dims = {
            'conv4_3' : 64, 
            'conv7'   : 32, 
            'conv8_2' : 16, 
            'conv9_2' :  8, 
            'conv10_2':  4, 
            'conv11_2':  2,
            'conv12_2':  1,
        }
        obj_scales = {
            'conv4_3' : 0.1 , 
            'conv7'   : 0.2 , 
            'conv8_2' : 0.34, 
            'conv9_2' : 0.48, 
            'conv10_2': 0.62, 
            'conv11_2': 0.76,
            'conv12_2': 0.9 ,
        }
        aspect_ratios = {
            'conv4_3' : [1. , 2. ,      0.5       ],
            'conv7'   : [1. , 2. , 3. , 0.5, 0.333],
            'conv8_2' : [1. , 2. , 3. , 0.5, 0.333],
            'conv9_2' : [1. , 2. , 3. , 0.5, 0.333],
            'conv10_2': [1. , 2. , 3. , 0.5, 0.333],
            'conv11_2': [1. , 2. ,      0.5       ],
            'conv12_2': [1. , 2. ,      0.5       ],
        }

        fmaps = list(fmap_dims.keys()) #! 特征图名称

        prior_boxes = []
        
        for k, fmap in enumerate(fmaps):
            for i in range(fmap_dims[fmap]):
                for j in range(fmap_dims[fmap]):
                    cx = (j + 0.5) / fmap_dims[fmap]
                    cy = (i + 0.5) / fmap_dims[fmap]

                    for ratio in aspect_ratios[fmap]:
                        prior_boxes.append([cx, cy, obj_scales[fmap] * math.sqrt(ratio), obj_scales[fmap] / math.sqrt(ratio)])

                        #! 对于宽高比为1, 额外使用一个先验框, 比例是当前特征图和下一特征图宽高比的几何平均数
                        #! 各特征图下先验宽高比数量为[4,6,6,6,6,4], 与PredConv中的n_boxes一致
                        if ratio == 1:
                            try:
                                addtitional_ratio = math.sqrt(obj_scales[fmap] * obj_scales[fmaps[k+1]])
                            except IndexError:
                                addtitional_ratio = 1.
                            prior_boxes.append([cx, cy, addtitional_ratio, addtitional_ratio])

        prior_boxes = tf.convert_to_tensor(prior_boxes) #! (24564,4)
        prior_boxes = tf.clip_by_value(prior_boxes,0,1) #! 先验框截断, 保证在[0,1]之间 实际上没作用

        return prior_boxes