from functools import reduce
import os, glob
# os.environ["CUDA_VISIBLE_DEVICES"] = ""   
import time
import random
import cv2
import math
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.backend as K
from tensorflow.keras import layers

from dl.tf_keras.utils_v2 import FileSequence
from yolo import yolov5s
from losses_v2 import YoloLoss
from cylib.dota import dota_voc_eval
from train import Trainer
from train import lr_schedule as _lr_schedule
from utils import preprocess_batch_labels, preprocess_batch_images, preprocess_batch
from utils import _make_grid, _make_grid_anchors, get_positives
from anchors import autoanchors

def parse_train(train_image_dir,train_label_dir):
    image_name = [os.path.splitext(f)[0] for f in os.listdir(train_image_dir)]
    label_name = [os.path.splitext(f)[0] for f in os.listdir(train_label_dir)]
    names = list(set(image_name) & set(label_name))
    image_paths = [glob.glob(os.path.join(train_image_dir,"%s.*"%name))[0] for name in names]
    label_paths = [os.path.join(train_label_dir,"%s.txt"%name) for name in names]
    return image_paths,label_paths

def split_filenames_labels(filenames,labels=None,split_rate=0.8,shuffle=True,random_state=None):
    if labels is not None: assert len(filenames) == len(labels)
    if shuffle:
        random.seed(random_state)
        idx_list = list(range(len(filenames)))
        random.shuffle(idx_list)
        filenames = [filenames[idx] for idx in idx_list]
        if labels is not None: labels = [labels[idx] for idx in idx_list]

    test_size = int(len(filenames) * split_rate)
    if labels is not None:
        train_datas = filenames[:test_size]
        train_label = labels[:test_size]
        testa_datas = filenames[test_size:]
        testa_label = labels[test_size:]
        return (train_datas,train_label),(testa_datas,testa_label)
    else:
        train_datas = filenames[:test_size]
        testa_datas = filenames[test_size:]
        return train_datas, testa_datas
    
#! 基础参数
anchors = [
    [ 10,13,  16, 30,  33, 23],
    [ 30,61,  62, 45,  59,119],
    [116,90, 156,198, 373,326],
]
nl = len(anchors)
nc = 5
gt = False
shuffle = True
strides = [8,16,32]
imgsz = (640,640)
batchsize = 16
epochs = 300

#! 数据准备
print("\033[31m开始准备数据\033[0m")
st = time.time()
# train_image_dir = 'pengcheng/images/train'
# train_label_dir = 'pengcheng/labels/train'
train_image_dir = '/home/lxin49/Datasets/hit-uav/images/train'
train_label_dir = '/home/lxin49/Datasets/hit-uav/labels/train'
# train_image_dir = "/home/lxin49/Second/DataSets/hit-uav/images/train"
# train_label_dir = "/home/lxin49/Second/DataSets/hit-uav/labels/train"
image_paths, label_paths = parse_train(train_image_dir,train_label_dir)
train_datas, testa_datas = split_filenames_labels([(ip,lp) for (ip,lp) in zip(image_paths,label_paths)],split_rate=0.9,shuffle=True)
# train_datas = train_datas[:25] #! 仅测试流程
# testa_datas = testa_datas[:25] #! 仅测试流程
#! 调整anchors
_label_paths = [td[1] for td in train_datas]
anchors = autoanchors(_label_paths,9,input_shape=imgsz)
# anchors = np.array(anchors)
print(anchors.shape)
print(anchors)
na = len(anchors[0]) // 2
#! 处理网格和锚点网格
grids = _make_grid(imgsz,na,strides)
grid_anchors = _make_grid_anchors(np.array(anchors).reshape([nl,na,2]),strides)
print("grid_anchors: ",grid_anchors)

#! 数据处理函数
image_func = lambda x: preprocess_batch_images(x,lambda y:cv2.resize(y,imgsz))
label_func = lambda x: preprocess_batch_labels(x,nc=nc,imgsz=imgsz,grid_anchors=grid_anchors,gt=gt)
train_batch_func = lambda x: preprocess_batch(x,image_func,label_func,input_shape=imgsz,iM=True)
testa_batch_func = lambda x: preprocess_batch(x,image_func,label_func,input_shape=imgsz,iM=False,iL=False)

#! 数据集准备
trainset = FileSequence(batchsize,train_datas,train_batch_func,shuffle=shuffle,XYT=True)
validset = FileSequence(batchsize,train_datas,testa_batch_func,shuffle=False,XYT=True)
print("\033[31m数据准备完毕, 一共花费%.4f秒\033[0m"%(time.time()-st))

#! 正样本比例
positives = get_positives(trainset)
print("positives: ",positives)

#! 基线模型
model_path = "weights/yolov5s.h5"
base_model = keras.models.load_model(model_path)
#! 模型准备
model = yolov5s(nc=nc,imgsz=imgsz,anchors=anchors,gt=gt)
#! 参数读取
for b_layer,layer in zip(base_model.layers,model.layers):
    if "input" in layer.name or "Detect" in layer.name: continue
    # print(b_layer.name, layer.name)
    
    if "__operators__" in layer.name or "math" in layer.name: continue

    node = int(layer.name.strip().split(".")[0])
    
    w = [x.numpy() for x in b_layer.weights]
    # print(len(layer.weights))
    layer.set_weights(w)
    # print(len(w))
    if node < 10 and "bn" not in layer.name.lower():
            layer.trainable = False


#! 模型打印
model.summary()
model.save("./5.h5")
keras.utils.plot_model(model,'model.png',show_shapes=True,expand_nested=True)

#todo 自定义设置, 复杂
nbs = 64
_init_lr = 1e-3
_mini_lr = _init_lr * 0.01
LR_MAX = 1e-3 #! 5e-2
LR_MIN = 3e-4 #! 5e-4
init_lr = min(max(batchsize/nbs*_init_lr,LR_MIN     ),LR_MAX     )
mini_lr = min(max(batchsize/nbs*_mini_lr,LR_MIN*1e-2),LR_MAX*1e-2)
lr = init_lr
# decay_type = 'step'
decay_type = 'cos'
lr_schedule = _lr_schedule(
    decay_type=decay_type,
    learning_rate=lr,
    min_lr = mini_lr,
    total_iters = len(trainset) * epochs,
    warmup_iters_ratio=0.1,
)
optimizer = keras.optimizers.Adam(learning_rate=lr,beta_1=0.937)
#! 当不使用gt时, 不应该使用权重, 因为cls中没有类别与背景对应
#! 当使用gt是, 可以使用权重, 因为cls中有默认类别与之对应

loss_fn = YoloLoss(grids,grid_anchors.reshape([nl,na,2]),nc,na,positives=positives,
                   xyxy=False,GIoU=False,DIoU=False,CIoU=True)
trainer = Trainer(model,optimizer,lr_schedule,loss_fn,dota_voc_eval,
                  imgsz=imgsz,strides=strides,anchors=anchors,grids=grids,gt=gt,shuffle=shuffle)
trainer(trainset,validset,epochs=300)