import os
import time
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.backend as K
from tensorflow.keras import layers
from tensorflow.keras.layers import UpSampling2D as UpSample, Concatenate as Concat
from model_utils import (DWConv, Conv, TransformerLayer, TransformerBlock,
                         Bottleneck, BottleneckCSP, C3, C3TR,
                         SPP, Focus, Detect)

def backbone(x00, gw=2, gd=1):                #! 以yolov5s为例
    y01 = Focus(x00,      3,  16*gw, 3,           node=0) #   1 (160,160, 32)
    y02 = Conv (y01,  16*gw,  32*gw, 3,        2, node=1) #   2 (160,160, 64)
    y03 = C3   (y02,  32*gw,  32*gw, 1*gd,        node=2) #   3 (160,160, 64)
    y04 = Conv (y03,  32*gw,  64*gw, 3,        2, node=3) #   4 ( 80, 80,128)
    y05 = C3   (y04,  64*gw,  64*gw, 3*gd,        node=4) #!  5 ( 80, 80,128) P3
    y06 = Conv (y05,  64*gw, 128*gw, 3,        2, node=5) #   6 ( 40, 40,256)
    y07 = C3   (y06, 128*gw, 128*gw, 3*gd,        node=6) #!  7 ( 40, 40,256) P4
    y08 = Conv (y07, 128*gw, 256*gw, 3,        2, node=7) #   8 ( 20, 20,512)
    y09 = SPP  (y08, 256*gw, 256*gw,              node=8) #   9 ( 20, 20,512) 
    y10 = C3   (y09, 256*gw, 256*gw, 1*gd, False, node=9) #! 10 ( 20, 20,512) P5
    return y05, y07, y10

def head(y05, y07, y10, gw=2, gd=1, 
         nc=80, imgsz=(640,640), strides=[8,16,32], anchors=(), ch=[128,256,512]):
    y11 = Conv    (y10, 256*gw, 128*gw, 1,        1, node=10) #   11 (20,20,256) P5
    y12 = UpSample(name="11.UpSample")(y11)                   #!  12 (40,40,256) P5_UpSample
    y13 = Concat  (name='12.Concat')([y12, y07])              #   13 (40,40,512) P5_UpSample
    y14 = C3      (y13, 256*gw, 128*gw, 1*gd, False, node=13) #   14 (40,40,256) P5_UpSample

    y15 = Conv    (y14, 128*gw,  64*gw, 1,        1, node=14) #   15 (40,40,128) P4
    y16 = UpSample(name='15.UpSample')(y15)                   #!  16 (80,80,128) P4_UpSample
    y17 = Concat  (name='16.Concat')([y16, y05])              #   17 (80,80,256) P4_UpSample
    y18 = C3      (y17, 128*gw,  64*gw, 1*gd, False, node=17) #!  18 (80,80,128) P3_out

    y19 = Conv    (y18,  64*gw,  64*gw, 3,        2, node=18) #   19 (40,40,128) P3_downsample
    y20 = Concat  (name='19.Concat')([y19, y15])              #!  20 (40,40,256) P3_downsample
    y21 = C3      (y20, 128*gw, 128*gw, 1*gd, False, node=20) #   21 (40,40,256) P4_out

    y22 = Conv    (y21, 128*gw, 128*gw, 3,        2, node=21) #!  22 (20,20,256) P4_downsample
    y23 = Concat  (name='22.Concat')([y22, y11])              #   23 (20,20,512) P4_downsample
    y24 = C3      (y23, 256*gw, 256*gw, 1*gd, False, node=23) #!  24 (20,20,512) P5_out

    return Detect([y18, y21, y24],imgsz=imgsz, nc=nc, ch=ch, strides=strides, anchors=anchors, node=24)

def yolov5(gw=2, gd=1, ch=[128,256,512], nc=80, imgsz=(640,640), strides=[8,16,32],anchors=()):
    """
        imgsz: (w,h)
    """
    x = keras.Input([imgsz[1],imgsz[0],3])
    feats = backbone(x, gw=gw, gd=gd)
    outs = head(*feats, gw=gw, gd=gd, ch=ch,
        imgsz=imgsz, nc=nc, strides=strides, anchors=anchors)
    return keras.Model(inputs=x,outputs=outs)

def yolov5s(nc=80, imgsz=(640,640), strides=[8,16,32], anchors=(), background=False):
    if background: nc += 1 #! 如果设置background类别, 则自动增加默认类别, 总类别数加1
    return yolov5(gw=2, gd=1, nc=nc, imgsz=imgsz, strides=strides, anchors=anchors)

def yolov5m(nc=80, imgsz=(640,640), strides=[8,16,32], anchors=(), background=False):
    if background: nc += 1 #! 如果设置background类别, 则自动增加默认类别, 总类别数加1
    return yolov5(gw=3, gd=2, nc=nc, imgsz=imgsz, strides=strides, anchors=anchors)

def yolov5l(nc=80, imgsz=(640,640), strides=[8,16,32], anchors=(), background=False):
    if background: nc += 1 #! 如果设置background类别, 则自动增加默认类别, 总类别数加1
    return yolov5(gw=4, gd=3, nc=nc, imgsz=imgsz, strides=strides, anchors=anchors)

def yolov5x(nc=80, imgsz=(640,640), strides=[8,16,32], anchors=(), background=False):
    if background: nc += 1 #! 如果设置background类别, 则自动增加默认类别, 总类别数加1
    return yolov5(gw=5, gd=4, nc=nc, imgsz=imgsz, strides=strides, anchors=anchors)

def convert_model(model_dir=None, flag='s'):
    #! 导入包
    import onnx
    from onnx import numpy_helper

    #! YOLO模型设置
    model_zoo = {'s': 'yolov5s', 'm': 'yolov5m', 'l': 'yolov5l', 'x': 'yolov5x'}
    model = eval(model_zoo[flag])() #! 初始化模型

    #! 模型文件夹
    model_dir = model_dir if model_dir is not None else os.path.join(os.path.dirname(os.path.abspath(__file__)), "weights")
    
    #! onnx模型
    onnx_model_path = os.path.join(os.path.abspath(model_dir), "%s.onnx"%model_zoo[flag])
    onnx_model = onnx.load(onnx_model_path)
    onnx_weights = onnx_model.graph.initializer

    #! 解析onnx模型
    weights = {}
    for weight in onnx_weights:
        try:
            if len(weight.ListFields()) < 4:
                onnx_extracted_weights_name = weight.ListFields()[1][1]
            else:
                onnx_extracted_weights_name = weight.ListFields()[2][1]
        except:
            onnx_extracted_weights_name = weight.ListFields()[3][1]
        weights[onnx_extracted_weights_name] = numpy_helper.to_array(weight)

    #! 权重载入
    for i, layer in enumerate(model.layers):
        #! 不需要的载入权重的层跳过
        if layer.name == "input_1":
            continue
        elif "__operators__" in layer.name or "transpose" in layer.name or "math" in layer.name:
            continue
        elif "Concat" in layer.name or  "Reshape" in layer.name or "UpSample" in layer.name or "Pad" in layer.name:
            continue
        elif "Sigmoid" in layer.name or "SiLU" in layer.name or "MaxPool" in layer.name or "BN" in layer.name: #! BN是因为参数不对齐, 需要重新训练
            continue
        
        #! 获取节点或名称
        node = layer.name.strip().split(".")[0]
        layer_name = layer.name.replace("Focus.","").replace("C3.","").replace("Bottleneck.","").replace("SPP.","").replace("Detect.","").replace(".Detect","").lower()
        #! 如果是BN层则跳过, 由于权重缺失或其他原因
        if "bn" in layer_name: continue

        layer_weights = []
        name_keyword = "model.%s."%(node)
        for onnx_layer_name, weight in weights.items(): #! 遍历onnx各层权重
            if name_keyword in onnx_layer_name:         #! 关键词在onnx层中出现
                name = "model.%s."%layer_name           #! 拼接部分层名称
                if name in onnx_layer_name:             #! 部分层名称在onnx层中出现
                    if "weight" in onnx_layer_name:
                        layer_weights.insert(0,weight.transpose([3,2,1,0]))
                    else:
                        layer_weights.append(weight)
        # print(node,layer_name,len(layer_weights))
        # if int(node) > 23:
        #     for lw in layer.weights:
        #         print(lw.shape)
        #! 权重载入
        layer.set_weights(layer_weights)
    
    #! h5权重保存
    h5_model_path = os.path.join(os.path.abspath(model_dir), "%s.h5"%model_zoo[flag])
    model.save(h5_model_path)


class YOLOv5:
    def __init__(self, n_classes, input_shape, strides=(), anchors=(), background=False, flag='s', model_path=None, weights_path=None):
        """
            input_shape: (w, h)
        """
        self.n_classes = n_classes
        self.input_shape = input_shape
        self.strides = [8,16,32] if not strides else strides
        self.wh_list = [(input_shape[0]//s,input_shape[1]//s) for s in strides]
        self.anchors = [[ 10,13,  16, 30,  33, 23],[ 30,61,  62, 45,  59,119],[116,90, 156,198, 373,326]] \
            if not anchors else anchors
        self.nl = len(self.anchors)
        self.na = len(self.anchors[0]) // 2
        self.anchors = np.array(self.anchors, dtype=np.float32).reshape([self.nl, self.na, 2])
        self.background = background
        self.flag = flag

        if model_path is not None:
            self.load_model(model_path)
        else:
            self.model = self.create_model()
            if weights_path is not None:
                self.load_weights(weights_path)
            else:
                self.load_pretrained(True)

    def create_model(self):
        flag_mapping = {'s': yolov5s, 'm': yolov5m, 'l': yolov5l, 'x': yolov5x}
        imgsz = (self.input_shape[1], self.input_shape[0])

        return flag_mapping[self.flag](self.n_classes, imgsz, self.strides, self.anchors, self.background)

    def save_model(self, model_path):
        self.model.save(model_path)
    
    def load_weights(self, weights_path):
        self.model.load_weights(weights_path)

    def load_model(self, model_path):
        BaseModel = keras.models.load_model(model_path)
        out1, out2, out3 = BaseModel.output
        
        n_classes = out1.shape[-1] - 6 if self.background else out1.shape[-1] - 5

        if n_classes == self.n_classes: #! 类别数量相同
            self.model = keras.models.load_model(model_path)
        else:
            self.model = self.create_model()
            for layer in BaseModel.layers:
                if "Detect" in layer.name: continue
                if "__operators__" in layer.name: continue
                if "transpose" in layer.name: continue
                if "math" in layer.name: continue
                self.model.get_layer(name=layer.name).set_weights(layer.get_weights())
                self.model.get_layer(name=layer.name).trainable = layer.trainable
        
        del BaseModel

    def load_pretrained(self, trainable=False):
        WEIGHT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "weights")
        flag_mapping = {'s': 'yolov5s.h5', 'm': 'yolov5m.h5', 'l': 'yolov5l.h5', 'x': 'yolov5x.h5'}
        model_path = os.path.join(WEIGHT_DIR, flag_mapping[self.flag])
        
        BaseModel = keras.models.load_model(model_path)
        for layer in BaseModel.layers:
            if "Detect" in layer.name: continue
            self.model.get_layer(name=layer.name).set_weights(layer.get_weights())
            self.model.get_layer(name=layer.name).trainable = True if trainable else False
        del BaseModel

    def fit(self, train_loader, epochs, optimizer, loss_fn, metric_fn, validation_loader=None):
        return self.train(train_loader, epochs, optimizer, loss_fn, metric_fn, validation_loader=validation_loader)
    
    def train(self, train_loader, epochs, optimizer, loss_fn, metric_fn, validation_loader=None):
        #! 日志
        ty,tm,td,tH,tM,tS,_,_,_ = time.localtime(time.time())
        timestamp = "{:04}-{:02}-{:02}_{:02}-{:02}-{:02}".format(ty,tm,td,tH,tM,tS)
        ROOTDIR   = os.path.dirname(os.path.abspath(__file__))
        LOG_DIR   = os.path.join(ROOTDIR,"logs"  ,timestamp)
        MODEL_DIR = os.path.join(ROOTDIR,"models",timestamp)
        summary_writer = tf.summary.create_file_writer(LOG_DIR)

        #! 模型编译
        self.model.compile(optimizer=optimizer)

        #! 模型训练
        for j in range(epochs):
            epoch_string = "epoch [%d][%d]"%(j+1, epochs)
            steps = len(train_loader)
            
            for i, (images, labels) in enumerate(train_loader):
                data = images, labels
                step_string = "step [%d][%d]"%(i+1, steps)
                self.train_step(data, loss_fn, summary_writer, epoch_string, step_string)
            
            self.save_model(os.path.join(MODEL_DIR,"Yolov5%s_last.h5"%self.flag))
            if (j+1) % 100 == 0:
                self.save_model(os.path.join(MODEL_DIR,"Yolov5%s_%d.h5"%(self.flag, j)))
        
    def train_step(self, data, loss_fn, summary_writer, epoch, step):

        images, labels = data

        with tf.GradientTape() as tap:

            predictions = self.model(images) #! 有3个预测结果

            loss, loss_items = loss_fn(labels, predictions, self.anchors, self.strides, self.wh_list)

            trainable_vars = self.model.trainable_variables

            gradients = tap.gradient(loss, trainable_vars)

            #! 梯度剪裁
            gradients, _ = tf.clip_by_global_norm(gradients, 0.5)
            capped_gradients = [(tf.clip_by_value(grad, -0.5, 0.5),var) \
                for grad,var in zip(gradients, self.model.trainable_variables) if grad is not None]
            
            #! 梯度应用
            self.model.optimizer.apply_gradients(capped_gradients)

            #! 学习率信息
            lr = self.model.optimizer.lr(self.model.optimizer.iterations) \
                if isinstance(self.model.optimizer.lr, keras.optimizers.schedules.LearningRateSchedule) \
                else self.model.optimizer.lr

            #! 记录信息
            with summary_writer.as_default():
                tf.summary.scalar('loss',loss_items.numpy().sum(),step=self.model.optimizer.iterations)
                tf.summary.scalar('lbox',loss_items.numpy()[0],step=self.model.optimizer.iterations)
                tf.summary.scalar('lobj',loss_items.numpy()[1],step=self.model.optimizer.iterations)
                tf.summary.scalar('lcls',loss_items.numpy()[2],step=self.model.optimizer.iterations)
                tf.summary.scalar('lr',float(lr),step=self.model.optimizer.iterations)

            #! 打印信息
            tf.print("%s %s loss: %.4f, lbox: %.4f, lobj: %.4f, lcls: %.4f"%(\
                epoch, step, loss_items.numpy().sum(),
                loss_items.numpy()[0], loss_items.numpy()[1], loss_items.numpy()[2]))
            
    def predict(self, images):
        
        predictions = self.model(images) #! (batch, na, wh, no)

        predictions = tf.concat(predictions, axis=2)

        bs, na, _, no = predictions.shape

        predictions = tf.reshape(predictions, [bs, -1, no])

        prob_boxes, prob_conf, prob_classes = tf.split(predictions, [4, 1, -1], axis=-1)

        prob_boxes = prob_boxes.numpy()

        prob_conf = tf.squeeze(tf.sigmoid(prob_conf)).numpy()

        prob_classes = tf.nn.softmax(prob_classes, axis=-1).numpy()

        for i in range(bs):

            boxes = prob_boxes[i]

            conf = prob_conf[i]

            classes = prob_classes[i]

            indices, = np.where(conf>0.05)

            print(indices.shape)

            break

        



        

        return predictions.numpy()
