from functools import reduce, partial
import os
import tqdm
import time
import math
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.backend as K
from tensorflow.keras import layers
from tensorflow.keras import mixed_precision
#? 下面两行语句是使用float16进行训练, 不过我经常nan
# policy = mixed_precision.Policy('mixed_float16')
# mixed_precision.set_global_policy(policy)

from common import _make_grid
from losses import yolo_loss
from cylib.dota import dota_voc_eval
from metrics_utils import calc_iou_thresh, get_evaluate_format

# @tf.function
def train_step(model,data,optimizer,loss_fn,box_ratio=0.05,cls_ratio=0.5,obj_ratio=1.0):
    if loss_fn is None: loss_fn = model.compiled_loss
    images, labels = data
    bs = images.shape[0]
    with tf.GradientTape() as gt:
        predns = model(images)
        lbox, lbxy, lbwh, lcls, lobj = loss_fn(labels,predns)
        lbox_value = tf.stop_gradient(lbox)
        lbxy_value = tf.stop_gradient(lbxy)
        lbwh_value = tf.stop_gradient(lbwh)
        lcls_value = tf.stop_gradient(lcls)
        lobj_value = tf.stop_gradient(lobj)
        # lbox = tf.reduce_mean(lbox)
        # lcls = tf.reduce_mean(lcls)
        # lobj = tf.reduce_mean(lobj)
        # loss = (lbox+lbxy+lbwh) * box_ratio + lcls * cls_ratio + lobj * obj_ratio
        loss = lbox * box_ratio + lcls * cls_ratio + lobj * obj_ratio
        trainable_vars = model.trainable_variables
        # gradients = gt.gradient(loss * tf.cast(bs,loss.dtype), trainable_vars)
        gradients = gt.gradient(loss, trainable_vars)


        #! 梯度裁剪1
        gradients, _ = tf.clip_by_global_norm(gradients,0.99)
    # optimizer.apply_gradients(zip(gradients, trainable_vars))
        #! 梯度优化裁剪2
        capped_gradients = [(tf.clip_by_value(grad,-0.99,0.99),var) for grad,var in zip(gradients,trainable_vars) if grad is not None]
        # capped_gradients = [
        #     (tf.where(tf.math.is_nan(grad),tf.zeros_like(grad),grad),var) 
        #         if tf.reduce_any(tf.math.is_nan(grad)) else (grad,var) 
        #     for grad,var in zip(gradients,trainable_vars) if grad is not None]
    optimizer.apply_gradients(capped_gradients)
    return {
        "lbox":lbox_value, 
        "lbxy":lbxy_value, 
        "lbwh":lbwh_value, 
        "lcls":lcls_value, 
        "lobj":lobj_value, 
        "loss":tf.stop_gradient(loss)
    }

def train_epoch(model,train_dataloader,optimizer,lr_schedule,loss_fn,summary_writer):
    bar_format='{n_fmt}/{total_fmt} {percentage:3.2f}% |{bar:50}| ETA: {elapsed}<{remaining}{postfix}'
    pbar = tqdm.tqdm(train_dataloader,total=len(train_dataloader),bar_format=bar_format) # {remaining}中自带逗号

    imgsz_h, imgsz_w = 0, 0
    obj_ratio = 1.0
    cls_ratio = 0.5
    nc = 1
    step = 0
    # Mlbox, Mlcls, Mlobj, Mloss = 0.0, 0.0, 0.0, 0.0
    mlbox, mlbxy, mlbwh, mlcls, mlobj, mloss = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
    for step,(images,labels) in enumerate(pbar):
        if step==0:
            _, imgsz_h, imgsz_w, _ = images.shape
            _, _, _, nc = labels[0].shape
            nc -= 5
            obj_ratio *= (imgsz_h * imgsz_w) / (640 * 640)
            cls_ratio *= nc / 80 #? 这个公式给nc比较小的时候赋的权重会不会太低?

        iteration = optimizer.iterations # 当前迭代轮次
        if isinstance(optimizer.lr,keras.optimizers.schedules.LearningRateSchedule):
        # if isinstance(optimizer.lr,keras.optimizers.schedules.ExponentialDecay) \
        # or isinstance(optimizer.lr,keras.optimizers.schedules.CosineDecay): #! keras提供类型
            lr = optimizer.lr(iteration)     # 当前学习率, 受数据类型影响, 小数位数可能不精确
        else: #! 自己提供衰减函数
            lr = optimizer.lr
            lr = lr_schedule(iteration)
            K.set_value(optimizer.lr,lr)

        losses = train_step(model,(images,labels),optimizer,loss_fn,cls_ratio=cls_ratio,obj_ratio=obj_ratio)
        lbox = losses["lbox"]
        lbxy = losses["lbxy"]
        lbwh = losses["lbwh"]
        lcls = losses["lcls"]
        lobj = losses["lobj"]
        loss = losses["loss"]
        postfix_str = "lbox: %.4f, lbxy: %.4f, lbwh: %.4f, lcls: %.4f, lobj: %.4f, loss: %.4f, lr: %.8f"%\
            (float(lbox),float(lbxy),float(lbwh),float(lcls),float(lobj),float(loss),float(lr))
        pbar.set_postfix_str(postfix_str)

        # #! 打印当前epoch中的最大损失值
        # Mlbox = lbox if lbox > Mlbox else Mlbox
        # Mlcls = lcls if lcls > Mlcls else Mlcls
        # Mlobj = lobj if lobj > Mlobj else Mlobj
        # Mloss = loss if loss > Mloss else Mloss

        # #! 打印当前epoch中的最小值
        # if tf.equal(step,0): mlbox, mlcls, mlobj, mloss = Mlbox, Mlcls, Mlobj, Mloss
        # mlbox = lbox if lbox < mlbox else mlbox
        # mlcls = lcls if lcls < mlcls else mlcls
        # mlobj = lobj if lobj < mlobj else mlobj
        # mloss = loss if loss < mloss else mloss        

        mlbox += lbox
        mlbxy += lbxy
        mlbwh += lbwh
        mlcls += lcls
        mlobj += lobj
        mloss += loss

        with summary_writer.as_default():
            tf.summary.scalar('lbox',lbox,step=iteration)
            tf.summary.scalar('lbxy',lbxy,step=iteration)
            tf.summary.scalar('lbwh',lbwh,step=iteration)
            tf.summary.scalar('lcls',lcls,step=iteration)
            tf.summary.scalar('lobj',lobj,step=iteration)
            tf.summary.scalar('loss',loss,step=iteration)
            tf.summary.scalar('lr',lr,step=iteration)

    mlbox = mlbox / (tf.cast(step,mlbox.dtype) + 1.0)
    mlbxy = mlbxy / (tf.cast(step,mlbxy.dtype) + 1.0)
    mlbwh = mlbwh / (tf.cast(step,mlbwh.dtype) + 1.0)
    mlcls = mlcls / (tf.cast(step,mlcls.dtype) + 1.0)
    mlobj = mlobj / (tf.cast(step,mlobj.dtype) + 1.0)
    mloss = mloss / (tf.cast(step,mloss.dtype) + 1.0)

    ty,tm,td,tH,tM,tS,_,_,_ = time.localtime(time.time())
    timestamp = "{:04}-{:02}-{:02}_{:02}-{:02}-{:02}".format(ty,tm,td,tH,tM,tS)      
    # tf.print("\nMAX IN EPOCH(%s): {lbox: %.4f, lcls: %.4f, lobj: %.4f, loss: %.4f}"%(timestamp,float(Mlbox),float(Mlcls),float(Mlobj),float(Mloss)))
    tf.print("\nMEAN IN EPOCH(%s): {lbox: %.4f, lbxy: %.4f, lbwh: %.4f, lcls: %.4f, lobj: %.4f, loss: %.4f}"\
        %(timestamp,float(mlbox),float(mlbxy),float(mlbwh),float(mlcls),float(mlobj),float(mloss)))

def train(model,train_dataloader,valid_dataloader,optimizer,lr_schedule=None,epochs=5,
          imgsz=(640,640),grids=None,anchors=None,strides=[8,16,32],gt=True,shuffle=True,
          loss_fn=None,metric_fn=dota_voc_eval,
          amp=True,log_dir=None,model_dir=None):
    # if amp: optimizer = mixed_precision.LossScaleOptimizer(optimizer)
    ty,tm,td,tH,tM,tS,_,_,_ = time.localtime(time.time())
    timestamp = "{:04}-{:02}-{:02}_{:02}-{:02}-{:02}".format(ty,tm,td,tH,tM,tS)
    if log_dir is None: log_dir = os.path.join(os.path.abspath(os.getcwd()),"logs",timestamp)
    if not os.path.exists(log_dir): os.makedirs(log_dir)
    if model_dir is None: model_dir = os.path.join(os.path.join(os.getcwd()),"models",timestamp)
    if not os.path.exists(model_dir): os.makedirs(model_dir)
    summary_writer = tf.summary.create_file_writer(log_dir)

    if anchors is None:
        anchors = [
            [ 10,13,  16, 30,  33, 23],
            [ 30,61,  62, 45,  59,119],
            [116,90, 156,198, 373,326],
        ]

    for i in range(1,1+epochs):
        #! 训练
        ty,tm,td,tH,tM,tS,_,_,_ = time.localtime(time.time())
        timestamp = "{:04}-{:02}-{:02}_{:02}-{:02}-{:02}".format(ty,tm,td,tH,tM,tS)        
        tf.print("EPOCH %d/%d %s"%(i,epochs,timestamp))
        train_epoch(model,train_dataloader,optimizer,lr_schedule,loss_fn,summary_writer)
        if shuffle: train_dataloader.on_epoch_end() # 随机打乱训练数据
        model_path = os.path.join(model_dir,"%d.h5"%i)
        model.save(model_path)
        #! 评估
        rec, prec, ap = evaluate(model,valid_dataloader,metric_fn,summary_writer,imgsz,grids,anchors,strides,gt)
        with summary_writer.as_default():
            tf.summary.scalar('ap',float(ap),step=optimizer.iterations)
            tf.summary.scalar('rec',float(rec),step=optimizer.iterations)
            tf.summary.scalar('prec',float(prec),step=optimizer.iterations)

def evaluate(model,valid_dataloader,metric_fn,summary_writer,imgsz,grids,anchors,strides,gt=True):
    if strides is None: strides = [8,16,32]
    nc = 1 #! 默认类别是1类
    nl = len(anchors)
    na = len(anchors[0]) // 2
    assert nl == len(strides)
    anchors = np.array(anchors).reshape([nl,na,2])

    # !数据准备
    image_id = 0
    recs = {}
    image_ids = []
    categorys = []
    BBs = []
    confidences = []

    # !取出数据
    for idx,(_images,_labels) in enumerate(valid_dataloader):
        if idx==0: #! 获取类别
            b,_,_,nc = _labels[0].shape
            nc -= 5
        else:
            b,_,_,_ = _labels[0].shape
                
        #! 预测
        predns = model(_images) #! 和labels均为列表

        #! 预测结果和标注整理: 缩放至input_shape尺寸, 去重
        #! 标注整理
        labels,indices = decode_labels(_labels,strides,anchors,grids,image_id=image_id)
        recs.update(labels) #! 增加新内容

        #! 预测整理: 非极大值抑制和去重
        predns = [tf.nn.sigmoid(_predns) for _predns in predns]
        predns = [_predns.numpy() if isinstance(_predns,tf.Tensor) else _predns for _predns in predns]
        # predns = decode_predns(predns,strides,anchors,grids,image_id=image_id,xyxy=False,max_output=None)
        predns = decode_predns(predns,strides,anchors,grids,image_id=image_id,xyxy=False)
        image_ids.append(predns[:,0].astype('int'))
        BBs.append(predns[:,1:5])
        confidences.append(predns[:,5])
        categorys.append(predns[:,6].astype('int'))

        #! 图片编号增加
        image_id += b
    
    image_ids = np.concatenate(image_ids,axis=0)
    categorys = np.concatenate(categorys,axis=0)
    BBs = np.concatenate(BBs,axis=0)
    confidences = np.concatenate(confidences,axis=0)

    # tf.print("recs: ",[x["bbox"] for x in recs[0]],summarize=-1)
    # tf.print(tf.convert_to_tensor(BBs[ind]),summarize=-1)

    # filter_ind = np.where(confidences>=0.25)
    # image_ids = image_ids[filter_ind]
    # categorys = categorys[filter_ind]
    # BBs = BBs[filter_ind]
    # confidences = confidences[filter_ind]

    values = image_ids,categorys,confidences,BBs
    classname = [c for c in range(nc)]
    if gt: classname.remove(0)

    instances = BBs.shape[0]
    rec, prec, ap = metric_fn(values,recs,classname)

    ty,tm,td,tH,tM,tS,_,_,_ = time.localtime(time.time())
    timestamp = "{:04}-{:02}-{:02}_{:02}-{:02}-{:02}".format(ty,tm,td,tH,tM,tS)
    tf.print(get_evaluate_format(rec,prec,ap)%(timestamp,instances,rec,prec,ap))
    return rec, prec, ap
        
def decode_labels(labels_list,strides,anchors,grids,image_id=0):
    labels_list = [labels.numpy() if isinstance(labels,tf.Tensor) else labels for labels in labels_list]
    indices = [np.where(np.equal(labels,1)) for labels in labels_list]
        
    recs = {}
    for i, _labels in enumerate(zip(*labels_list)):
        labels_bbox_c = [] # ! 第i张图
        for l in range(len(_labels)): # ! 第l个网格
            data = _labels[l].copy()  # ! (ny*nx,na,5+nc)
            data[:,:,0:2] += grids[l] # ! 从偏移量还原成绝对值
            indices = np.where(np.equal(data[:,:,4],1.0))
            data = data[indices]
            data[:,0:4] *= strides[l]
            data_c = np.argmax(data[:,5:],axis=1)
            data = np.concatenate([data[:,0:4],data_c[:,None]],axis=1) #! bbox, c
            data = np.unique(data,axis=0) # ! 网格内去重
            labels_bbox_c.append(data)
        labels_bbox_c = np.concatenate(labels_bbox_c,axis=0)
        labels_bbox_c = np.unique(labels_bbox_c,axis=0) # ! 网格间去重
        labels_i = np.ones(shape=(labels_bbox_c.shape[0],1)) * (image_id+i)
        labels = np.concatenate([labels_i,labels_bbox_c],axis=1) #! image_id, bbox, c

        objects = []
        for label in labels:
            object_struct = {}
            ii,xc,yc,w,h,c = label  # ! image_id
            object_struct['name'] = int(c)
            object_struct['difficult'] = 0
            x1 = xc - w / 2
            y1 = yc - h / 2
            x2 = xc + w / 2
            y2 = yc + h / 2
            object_struct['bbox'] = [x1,y1,x1,y2,x2,y2,x2,y1]
            objects.append(object_struct)
        recs[int(image_id+i)] = objects
    return recs, indices

def decode_predns(_predns_list,strides,anchors,grids,image_id=0,xyxy=False,max_output=1000):
    _predns_list = [rawdata.numpy() if isinstance(rawdata,tf.Tensor) else rawdata for rawdata in _predns_list]
    
    predns_list = []
    nc = 1

    for i, _predns in enumerate(zip(*_predns_list)):
        predns_bbox_c_c = [] # ! 第i张图预测结果
        for l in range(len(_predns)):
            if i==0 and l==0: 
                _,_,nc = _predns[l].shape # !(ny*nx,na,5+nc)
            data = _predns[l]
            data[:,:,0:2] = (data[:,:,0:2] * 2 - 0.5 + grids[l]) * strides[l]
            data[:,:,2:4] = (data[:,:,2:4] * 2) ** 2 * anchors[l]
            data = np.reshape(_predns[l],(-1,nc))
            data_c = np.argmax(data[:,5:],axis=1)
            data = np.concatenate([data[:,0:5],data_c[:,None]],axis=1) #! bbox + 置信度 + 类别
            data = np.unique(data,axis=0) #! 网格内去重
            predns_bbox_c_c.append(data)
        predns_bbox_c_c = np.concatenate(predns_bbox_c_c,axis=0)   #! xc,yc,w,h,conf,c
        predns_bbox_c_c = np.unique(predns_bbox_c_c,axis=0) #! 网格间去重
        predns_i = np.ones(shape=(predns_bbox_c_c.shape[0],1)) * (image_id+i)
        predns = np.concatenate([predns_i,predns_bbox_c_c],axis=1) #! image_id,xc,yc,w,h,conf,c

        #! 按置信度排序
        sorted_ind = np.argsort(predns[:,5])[::-1]
        predns = predns[sorted_ind]

        #! bbox坐标转换
        if xyxy:
            bboxes = predns[:,1:5]
        else:
            x1y1 = predns[:,1:3] - predns[:,3:5] / 2
            x2y2 = predns[:,1:3] + predns[:,3:5] / 2
            bboxes = np.concatenate([x1y1,x2y2],axis=-1)
            # predns[:,1:5] = bboxes # !转成xyxy格式
            predns = np.concatenate([predns[:,0,None],bboxes,predns[:,5:]],axis=1)

        confidences = predns[:,5]
        thr = calc_iou_thresh()
        if max_output is None: max_output = predns.shape[0]

        # ! 非极大值抑制
        indices = tf.image.non_max_suppression(bboxes,confidences,max_output_size=max_output,iou_threshold=thr)
        predns = tf.gather(predns,indices)
        if isinstance(predns,tf.Tensor): predns = predns.numpy()

        predns_list.append(predns)
    return np.concatenate(predns_list,axis=0)



class Trainer(object):
    def __init__(self,model,optimizer=None,lr_schedule=None,loss_fn=None,metric_fn=None,
                 imgsz=(640,640),strides=[8,16,32],anchors=None,grids=None,dtype=tf.float32,
                 gt=True,shuffle=True,amp=True,log_dir=None,model_dir=None
                 ):
        if optimizer is None: optimizer = model.optimizer
        if loss_fn is None: loss_fn = model.compiled_loss
        if metric_fn is None: metric_fn = model.compiled_metrics
        self.model = model
        self.optimizer = optimizer
        self.lr_schedule = lr_schedule
        self.loss_fn = loss_fn
        self.metric_fn = metric_fn

        if anchors is None: anchors = [[ 10,13,  16, 30,  33, 23],[ 30,61,  62, 45,  59,119],[116,90, 156,198, 373,326]]
        if grids is None: grids = _make_grid(imgsz,len(anchors[0]//2),strides,dtype=dtype)
        self.imgsz = imgsz
        self.strides = strides
        self.anchors = anchors
        self.grids = grids

        self.gt = gt
        self.shuffle = shuffle
        self.amp = amp

        ty,tm,td,tH,tM,tS,_,_,_ = time.localtime(time.time())
        _timestamp = "{:04}-{:02}-{:02}_{:02}-{:02}-{:02}".format(ty,tm,td,tH,tM,tS)
        if log_dir is None: log_dir = os.path.join(os.path.abspath(os.getcwd()),"logs",_timestamp)
        if not os.path.exists(log_dir): os.makedirs(log_dir)
        if model_dir is None: model_dir = os.path.join(os.path.join(os.getcwd()),"models",_timestamp)
        if not os.path.exists(model_dir): os.makedirs(model_dir)
        self.log_dir = log_dir
        self.model_dir = model_dir
        self.summary_writer = tf.summary.create_file_writer(log_dir)

    def __call__(self,train_dataloader,valid_dataloader,epochs=5,valid_epoch=10):
        last_ap = 0.0
        for e in range(1,1+epochs):
            ty,tm,td,tH,tM,tS,_,_,_ = time.localtime(time.time())
            timestamp = "{:04}-{:02}-{:02}_{:02}-{:02}-{:02}".format(ty,tm,td,tH,tM,tS)        
            tf.print("EPOCH %d/%d %s"%(e,epochs,timestamp))

            obj_balance_1 = [0.4 , 1.0, 4.0]
            obj_balance_2 = [4.0 , 0.4, 1.0]
            obj_balance_3 = [1.0 , 4.0, 0.4]

            outer_e = (e-1) % 15
            inner_e = outer_e // 5

            if inner_e < 1:
                self.loss_fn.obj_balance = obj_balance_1
            elif inner_e < 2:
                self.loss_fn.obj_balance = obj_balance_2
            else:
                self.loss_fn.obj_balance = obj_balance_3

            # if (e+1)/(epochs) < 0.05:
            #     for layer in self.model.layers:
            #         if "input" in layer.name or "Detect" in layer.name: continue
            #         if "__operators__" in layer.name or "math" in layer.name: continue

            #         node = int(layer.name.strip().split(".")[0])
            #         if node < 24:
            #             if "bn" not in layer.name.lower():
            #                 layer.trainable = False
            #             else:
            #                 layer.trainable = True
            #         else:
            #             layer.trainable = True

            # elif (e+1)/(epochs) < 0.3:
            #     for layer in self.model.layers:
            #         if "input" in layer.name or "Detect" in layer.name: continue
            #         if "__operators__" in layer.name or "math" in layer.name: continue

            #         node = int(layer.name.strip().split(".")[0])
            #         if node < 10:
            #             if "bn" not in layer.name.lower():
            #                 layer.trainable = False
            #             else:
            #                 layer.trainable = True
            #         else:
            #             layer.trainable = True

            # else:
            #     for layer in self.model.layers:
            #         if "input" in layer.name or "Detect" in layer.name: continue
            #         if "__operators__" in layer.name or "math" in layer.name: continue

            #         layer.trainable = True                           

            train_epoch(self.model,train_dataloader,self.optimizer,self.lr_schedule,self.loss_fn,self.summary_writer)
            if self.shuffle: train_dataloader.on_epoch_end() # 随机打乱训练数据
            last_model_path = os.path.join(self.model_dir,"last.h5")
            best_model_path = os.path.join(self.model_dir,"best.h5")
            self.model.save(last_model_path)
            #! 评估
            if e % valid_epoch == 0: # ! 为提高训练速度，每valid_epoch轮进行一次验证
                rec,prec,ap = evaluate(self.model,valid_dataloader,self.metric_fn,self.summary_writer,self.imgsz,self.grids,self.anchors,self.strides,self.gt)
                with self.summary_writer.as_default():
                    tf.summary.scalar('ap',float(ap),step=self.optimizer.iterations)
                    tf.summary.scalar('rec',float(rec),step=self.optimizer.iterations)
                    tf.summary.scalar('prec',float(prec),step=self.optimizer.iterations)
                if ap > last_ap:
                    last_ap = ap
                    self.model.save(best_model_path)
