from functools import reduce
import os, glob
os.environ["CUDA_VISIBLE_DEVICES"] = ""
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" #! ERROR级别
import cv2
import time
import math
import random
import warnings
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.backend as K
from tensorflow.keras import layers

from yolo import yolov5s
from losses_v2 import YoloLoss
from cylib.dota import dota_voc_eval
from train import Trainer
from optimizer_utils import lr_schedule as _lr_schedule
from datasets import YoloDataLoader
from datasets import preprocess_batch
from datasets import preprocess_batch_images
from datasets import preprocess_batch_labels, parse_txt_annotation, _make_grid
from losses_utils import get_positive_weight
from anchors_utils import autoanchors, checkanchors, _make_grid_anchors

warnings.filterwarnings('ignore')

def parse_train(train_image_dir,train_label_dir):
    image_name = [os.path.splitext(f)[0] for f in os.listdir(train_image_dir) if ".jpg" in f]
    label_name = [os.path.splitext(f)[0] for f in os.listdir(train_label_dir) if ".txt" in f]
    names = list(set(image_name) & set(label_name))
    image_paths = [glob.glob(os.path.join(train_image_dir,"%s.jpg"%name))[0] for name in names]
    label_paths = [os.path.join(train_label_dir,"%s.txt"%name) for name in names]
    return image_paths,label_paths

def split_filenames_labels(filenames,labels=None,split_rate=0.8,shuffle=True,random_state=None):
    if labels is not None: assert len(filenames) == len(labels)
    if shuffle:
        random.seed(random_state)
        idx_list = list(range(len(filenames)))
        random.shuffle(idx_list)
        filenames = [filenames[idx] for idx in idx_list]
        if labels is not None: labels = [labels[idx] for idx in idx_list]

    test_size = int(len(filenames) * split_rate)
    if labels is not None:
        train_datas = filenames[:test_size]
        train_label = labels[:test_size]
        testa_datas = filenames[test_size:]
        testa_label = labels[test_size:]
        return (train_datas,train_label),(testa_datas,testa_label)
    else:
        train_datas = filenames[:test_size]
        testa_datas = filenames[test_size:]
        return train_datas, testa_datas
    
#! 基础参数
anchors = [
    [ 10,13,  16, 30,  33, 23],
    [ 30,61,  62, 45,  59,119],
    [116,90, 156,198, 373,326],
]
nl = len(anchors)
nc = 5
gt = False
shuffle = True
strides = [8,16,32]
imgsz = (640,640)
batchsize = 16
epochs = 300

#! 数据准备
print("\033[31m开始准备数据\033[0m")
st = time.time()
# train_image_dir = 'pengcheng/images/train'
# train_label_dir = 'pengcheng/labels/train'
# train_image_dir = '/home/lxin49/Datasets/coco128/images/train2017'
# train_label_dir = '/home/lxin49/Datasets/coco128/labels/train2017'
# train_image_dir = '/home/lxin49/Datasets/hit-uav/images/train'
# train_label_dir = '/home/lxin49/Datasets/hit-uav/labels/train'
train_image_dir = "/home/lxin49/Second/DataSets/hit-uav/images/train"
train_label_dir = "/home/lxin49/Second/DataSets/hit-uav/labels/train"
# train_image_dir = "/home/lxin49/Second/DataSets/coco128/images/train2017"
# train_label_dir = "/home/lxin49/Second/DataSets/coco128/labels/train2017"
image_paths, label_paths = parse_train(train_image_dir,train_label_dir)
# train_datas, testa_datas = split_filenames_labels([(ip,lp) for (ip,lp) in zip(image_paths,label_paths)],split_rate=1.0,shuffle=True)
train_datas = [(ip,lp) for (ip,lp) in zip(image_paths,label_paths)]
# train_datas = train_datas[:25] #! 仅测试流程
# testa_datas = testa_datas[:25] #! 仅测试流程
# #! 调整anchors
labels = np.row_stack([parse_txt_annotation(td[1]) for td in train_datas])
print(labels.shape)
# anchors = autoanchors(_label_paths,9,input_shape=imgsz)
anchors = checkanchors(labels,anchors,input_shape=imgsz)
# # anchors = np.array(anchors)
# print(anchors.shape)
# print(anchors)
na = len(anchors[0]) // 2
#! 处理网格和锚点网格
grids = _make_grid(imgsz,na,strides)
grid_anchors = _make_grid_anchors(np.array(anchors).reshape([nl,na,2]),strides)
print("grid_anchors: ",grid_anchors)

#! 数据处理函数
image_func = lambda x: preprocess_batch_images(x,lambda y:cv2.resize(y,imgsz))
label_func = lambda x: preprocess_batch_labels(x,nc=nc,imgsz=imgsz,grid_anchors=grid_anchors,gt=gt)
batch_func = lambda x,**kwargs: preprocess_batch(x,image_func,label_func,input_shape=imgsz,**kwargs)

#! 数据集准备
trainset = YoloDataLoader(batchsize,train_datas,batch_func,
    together=True,letterbox=True,mosaic=True,makeup=True,shuffle=True)
validset = YoloDataLoader(batchsize,train_datas,batch_func,
    together=True,letterbox=True,mosaic=False,makeup=False,shuffle=False)
print("\033[31m数据准备完毕, 一共花费%.4f秒\033[0m"%(time.time()-st))

# for i,(batch_images,batch_labels) in enumerate(trainset):
#     print(batch_images.shape)
#     print(len(batch_labels))
#     anchors = np.array(anchors).reshape([nl,na,2])

#     for b,image in enumerate(batch_images):
#         write = image.copy()
#         write = (write * 255.0).astype(np.uint8)
#         cv2.imshow("image",write)
#         cv2.waitKey(0)
#         bbox = []
#         for ldx in range(nl):
#             targets = batch_labels[ldx][b] 
#             stride = strides[ldx]
#             anchor = anchors[ldx]
#             t = targets.copy()
#             t = t[np.where(np.equal(t[...,4],1))] #! 取有框
#             t = np.unique(t,axis=0)               #! 去重
#             t[:,0:4] = t[:,0:4] * stride
#             print("t: \n",t[:,0:4])
#             bbox.append(t)
#             t = t[np.where(np.equal(t[...,4],1))]
#         bbox = np.row_stack(bbox)
#         bbox = np.unique(bbox,axis=0)

#         height, width, c = write.shape
#         for box in bbox:
#             xc, yc, w, h = box[0:4]
#             # print(xc,yc,w,h)  
#             x1 = int(round(xc - w / 2))
#             x2 = int(round(xc + w / 2))
#             y1 = int(round(yc - h / 2))
#             y2 = int(round(yc + h / 2))
#             # print(x1,y1,x2,y2)
#             write = cv2.rectangle(write,(x1,y1),(x2,y2),color=(255,0,0),thickness=2)
#         cv2.imwrite("%d_%d.png"%(i,b),write)
#         cv2.imshow("image",write)
#         cv2.waitKey(0)

#! 正样本比例
positive_weight = get_positive_weight(trainset)
# positive_weight = (1-positive_weight)/positive_weight
# positive_weight = 1 - positive_weight
print("\033[31m")
print("positive_weight: ",positive_weight)
print("\033[0m")

model_path = "models/2023-09-06_20-59-00/best.h5"
if not os.path.exists(model_path):
    #! 基线模型
    model_path = "weights/yolov5s.h5"
    base_model = keras.models.load_model(model_path)
    #! 模型准备
    model = yolov5s(nc=nc,imgsz=imgsz,anchors=anchors,gt=gt)
    #! 参数读取
    for b_layer,layer in zip(base_model.layers,model.layers):
        if "input" in layer.name or "Detect" in layer.name: continue
        # print(b_layer.name, layer.name)
        
        if "__operators__" in layer.name or "math" in layer.name: continue

        node = int(layer.name.strip().split(".")[0])
        
        # if node < 24:
        #     w = [x.numpy() for x in b_layer.weights]
        #     layer.set_weights(w)
        #     if "bn" not in layer.name.lower():
        #         layer.trainable = False
else:
    model = keras.models.load_model(model_path)

#! 模型打印
model.summary()
model.save("./5.h5")
keras.utils.plot_model(model,'model.png',show_shapes=True,expand_nested=True)

#todo 自定义设置, 复杂
# nbs = 64
# _init_lr = 1e-2
# _mini_lr = _init_lr * 0.01
# LR_MAX = 1e-3 #! 5e-2
# LR_MIN = 3e-4 #! 5e-4
# init_lr = min(max(batchsize/nbs*_init_lr,LR_MIN     ),LR_MAX     )
# mini_lr = min(max(batchsize/nbs*_mini_lr,LR_MIN*1e-2),LR_MAX*1e-2)
# lr = init_lr
# # decay_type = 'step'
# decay_type = 'cos'
# lr_schedule = _lr_schedule(
#     decay_type=decay_type,
#     learning_rate=lr,
#     min_lr = mini_lr,
#     total_iters = len(trainset) * epochs,
#     warmup_iters_ratio=0.1,
# )
#! 当不使用gt时, 不应该使用权重, 因为cls中没有类别与背景对应
#! 当使用gt是, 可以使用权重, 因为cls中有默认类别与之对应
# initial_learning_rate = 1e-3
# decay_steps = epochs * len(trainset)
# alpha = 1e-5
# lr_schedule = keras.optimizers.schedules.CosineDecay(
#     initial_learning_rate,
#     decay_steps,
#     alpha = 1e-2,
# )

initial_learning_rate = 1e-3
first_decay_steps = len(trainset) * epochs // 3
t_mul=2.0  #! 上个周期步数 * t_mul
m_mul=0.9995  #! 上个周期学习率 * m_mul
alpha=1e-2
lr_schedule = keras.optimizers.schedules.CosineDecayRestarts(
    initial_learning_rate,
    first_decay_steps,
    t_mul,
    m_mul,
    alpha,
)

# optimizer = keras.optimizers.Adam(learning_rate=lr,beta_1=0.937,beta_2=0.9995)
optimizer = keras.optimizers.Adam(learning_rate=lr_schedule,beta_1=0.937)

loss_fn = YoloLoss(grids,grid_anchors.reshape([nl,na,2]),nc,na,positive_weight=positive_weight,
                   xyxy=False,GIoU=False,DIoU=False,CIoU=True)
model.compile(
    optimizer=optimizer,
    # loss=loss_fn,
    # metrics=dota_voc_eval,
)
# trainer = Trainer(model,optimizer,lr_schedule,None,dota_voc_eval,
#                   imgsz=imgsz,strides=strides,anchors=anchors,grids=grids,gt=gt,shuffle=shuffle)
trainer = Trainer(model,None,lr_schedule,loss_fn,dota_voc_eval,
                  imgsz=imgsz,strides=strides,anchors=anchors,grids=grids,gt=gt,shuffle=shuffle)
# trainer = Trainer(model,optimizer,None,loss_fn,dota_voc_eval,
#                   imgsz=imgsz,strides=strides,anchors=anchors,grids=grids,gt=gt,shuffle=shuffle)
trainer(trainset,validset,epochs=epochs)