from functools import reduce
import os, glob
os.environ["CUDA_VISIBLE_DEVICES"] = ""
import time
import random
import cv2
import math
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.backend as K
from tensorflow.keras import layers

from dl.tf_keras.utils_v2 import FileSequence
from yolo import yolov5s
from losses_v2 import yolo_loss
from cylib.dota import dota_voc_eval
from train import train
from train import lr_schedule as _lr_schedule
from utils import preprocess_batch_labels, preprocess_batch_images, preprocess_batch
from utils import parse_xml_annotation
from utils import _make_grid, _make_grid_anchors
from anchors import autoanchors

def parse_train(train_image_dir,train_label_dir):
    image_name = [os.path.splitext(f)[0] for f in os.listdir(train_image_dir)]
    label_name = [os.path.splitext(f)[0] for f in os.listdir(train_label_dir)]
    names = list(set(image_name) & set(label_name))
    image_paths = [glob.glob(os.path.join(train_image_dir,"%s.*"%name))[0] for name in names]
    label_paths = [os.path.join(train_label_dir,"%s.txt"%name) for name in names]
    return image_paths,label_paths

def split_filenames_labels(filenames,labels=None,split_rate=0.8,shuffle=True,random_state=None):
    if labels is not None: assert len(filenames) == len(labels)
    if shuffle:
        random.seed(random_state)
        idx_list = list(range(len(filenames)))
        random.shuffle(idx_list)
        filenames = [filenames[idx] for idx in idx_list]
        if labels is not None: labels = [labels[idx] for idx in idx_list]

    test_size = int(len(filenames) * split_rate)
    if labels is not None:
        train_datas = filenames[:test_size]
        train_label = labels[:test_size]
        testa_datas = filenames[test_size:]
        testa_label = labels[test_size:]
        return (train_datas,train_label),(testa_datas,testa_label)
    else:
        train_datas = filenames[:test_size]
        testa_datas = filenames[test_size:]
        return train_datas, testa_datas
    
#! 基础参数
anchors = [
    [ 10,13,  16, 30,  33, 23],
    [ 30,61,  62, 45,  59,119],
    [116,90, 156,198, 373,326],
]
nl = len(anchors)
na = len(anchors[0]) // 2
nc = 5
gt = False
strides = [8,16,32]
imgsz = (640,640)
batchsize = 16
epochs = 300

#! 数据准备
print("\033[31m开始准备数据\033[0m")
st = time.time()
# train_image_dir = 'pengcheng/images/train'
# train_label_dir = 'pengcheng/labels/train'
# train_image_dir = '/home/lxin49/Datasets/hit-uav/images/train'
# train_label_dir = '/home/lxin49/Datasets/hit-uav/labels/train'
train_image_dir = "/home/lxin49/Second/DataSets/hit-uav/images/train"
train_label_dir = "/home/lxin49/Second/DataSets/hit-uav/labels/train"
image_paths, label_paths = parse_train(train_image_dir,train_label_dir)
train_datas, testa_datas = split_filenames_labels([(ip,lp) for (ip,lp) in zip(image_paths,label_paths)],split_rate=0.9,shuffle=True)
train_datas = train_datas[:25] #! 仅测试流程
testa_datas = testa_datas[:25] #! 仅测试流程
#! 调整anchors
_label_paths = [td[1] for td in train_datas]
anchors = autoanchors(_label_paths,9,input_shape=imgsz)
anchors = anchors.reshape([-1,6])
print("anchors: ", anchors)
#! 处理网格和锚点网格
grids = _make_grid(imgsz,na,strides)
grid_anchors = _make_grid_anchors(np.array(anchors).reshape([3,3,2]),strides)
print("grid_anchors: ",grid_anchors)
#! 数据处理函数
image_func = lambda x: preprocess_batch_images(x,lambda y:cv2.resize(y,imgsz))
label_func = lambda x: preprocess_batch_labels(x,nc=nc,imgsz=imgsz,grid_anchors=grid_anchors,gt=gt)
batch_func = lambda x: preprocess_batch(x,image_func,label_func,input_shape=imgsz,iM=False)
#! 数据集准备
trainset = FileSequence(batchsize,train_datas,batch_func,shuffle=False,XYT=True)
validset = FileSequence(batchsize,testa_datas,batch_func,shuffle=False,XYT=True)
print("\033[31m数据准备完毕, 一共花费%.4f秒\033[0m"%(time.time()-st))

#! 基线模型
model_path = "weights/yolov5s.h5"
base_model = keras.models.load_model(model_path)
#! 模型准备
model = yolov5s(nc=nc,imgsz=imgsz,anchors=anchors,gt=gt)
#! 参数读取
for b_layer,layer in zip(base_model.layers,model.layers):
    if "input" in layer.name or "Detect" in layer.name: continue
    # print(b_layer.name, layer.name)
    w = [x.numpy() for x in b_layer.weights]
    # print(len(w))
    # print(len(layer.weights))
    layer.set_weights(w)
#! 模型打印
model.summary()
model.save("./5.h5")
keras.utils.plot_model(model,'model.png',show_shapes=True,expand_nested=True)

# #! 首次读取数据
# tf.print("\033[31m")
# for images, labels in trainset:
#     tf.print("in for")
#     tf.print(images.shape)
#     if isinstance(labels,list):
#         for label in labels:
#             if isinstance(label,list):
#                 for _label in label:
#                     print(_label.shape)
#             else:
#                 print(label.shape)
#                 print(label[0,17,0,:])
#         break
#     else:
#         tf.print(labels.shape)
#         print(labels[0,17,0,:])
#     break
# tf.print("\033[0m")

#! 训练参数
# #todo 默认设置, 简单
# initial_learning_rate = 1e-3
# # decay_steps =  300               #! 每100个step学习率调整一次
# decay_steps = len(trainset) * 10 #! 每10个epoch学习率调整一次
# decay_rate = 1.0 - 5e-4          #! 衰减率
# lr_schedule = keras.optimizers.schedules.ExponentialDecay( #! 学习率计划
#       initial_learning_rate,
#       decay_steps=decay_steps,
#       decay_rate=decay_rate,
#       staircase=True,
# )
# optimizer = keras.optimizers.Adam(learning_rate=lr_schedule)

#todo 自定义设置, 复杂
nbs = 64
_init_lr = 1e-2
_mini_lr = _init_lr * 0.01
LR_MAX = 1e-3 #! 5e-2
LR_MIN = 3e-4 #! 5e-4
init_lr = min(max(batchsize/nbs*_init_lr,LR_MIN     ),LR_MAX     )
mini_lr = min(max(batchsize/nbs*_mini_lr,LR_MIN*1e-2),LR_MAX*1e-2)
lr = init_lr
# decay_type = 'step'
decay_type = 'cos'
lr_schedule = _lr_schedule(
    decay_type=decay_type,
    learning_rate=lr,
    min_lr = mini_lr,
    total_iters = len(trainset) * epochs,
    warmup_iters_ratio=0.1,
)
optimizer = keras.optimizers.Adam(learning_rate=lr,beta_1=0.937,beta_2=0.9995)
#! 当不使用gt时, 不应该使用权重, 因为cls中没有类别与背景对应
#! 当使用gt是, 可以使用权重, 因为cls中有默认类别与之对应
# if gt:
#     loss_fn = lambda labels,predns: yolo_loss(labels,predns,CIoU=True,na=na,loop=False,filter=False,weight=True)
# else:
#     loss_fn = lambda labels,predns: yolo_loss(labels,predns,CIoU=True,na=na,loop=True,filter=True,weight=False)
loss_fn = lambda labels,predns: yolo_loss(
    labels,predns,grids,grid_anchors.reshape([-1,na,2]),xyxy=False,GIoU=False,DIoU=False,CIoU=True,nc=nc,na=na,label_smoothing=0.0001,filter=True,weight=False)
train(model,trainset,validset,optimizer,lr_schedule=lr_schedule,epochs=epochs,
      imgsz=imgsz,grids=grids,anchors=anchors,strides=[8,16,32],gt=True,shuffle=False,
      loss_fn=loss_fn,metric_fn=dota_voc_eval,amp=True,log_dir=None,model_dir=None)

# model.compile(optimizer=optimizer,loss=loss_fn)
# model.fit(trainset,epochs=epochs)