import torch
import torch.backends.cudnn as cudnn
import torch.optim as optim
import os
from tqdm import tqdm
from torch.utils.data import DataLoader

from pr_yolo.Models.yolo_body import YoloBody
from pr_yolo.Utils.yolo_loss import YOLOLoss, weights_init
from pr_yolo.Utils.dataloader_new import YoloDataset, yolo_dataset_collate
from pr_yolo.Utils.utils import get_classes,get_lr
import configuration as cfg

device = torch.device('cuda' if cfg.cuda else 'cpu')    #使用设备

def save_ckpt(net,current_epoch,optimizer,scheduler,ckpt_name):
    ckpt_dict = {
        'net':net.state_dict(),
        'epoch':current_epoch,
        'optimizer':optimizer.state_dict(),
        'scheduler':scheduler.state_dict(),
    }
    torch.save(ckpt_dict, os.path.join(cfg.checkpoint_path, ckpt_name+".pth"))
    torch.save(ckpt_dict, os.path.join(cfg.checkpoint_path, "last_ckpt.pth"))

def fit_one_epoch(model, yolo_loss, optimizer,scheduler, epoch, epoch_step, epoch_step_val, gen, gen_val):
    train_loss = 0
    model.train()
    with tqdm(total=epoch_step, desc=f'Epoch {epoch}/{cfg.total_epoch} Train', postfix=dict, mininterval=0.3) as pbar:
        for iteration, batch in enumerate(gen):
            if iteration >= epoch_step:
                break

            images, targets = batch[0], batch[1]
            with torch.no_grad():
                if cfg.cuda:
                    images = torch.from_numpy(images).type(torch.FloatTensor).cuda()
                    targets = [torch.from_numpy(ann).type(torch.FloatTensor).cuda() for ann in targets]
                else:
                    images = torch.from_numpy(images).type(torch.FloatTensor)
                    targets = [torch.from_numpy(ann).type(torch.FloatTensor) for ann in targets]
            optimizer.zero_grad()            #   清零梯度
            outputs = model(images)            #   前向传播
            train_loss_value_all = 0

            for l in range(len(outputs)):            #   计算损失
                loss_item, num_pos = yolo_loss(l, outputs[l], targets)
                train_loss_value_all += loss_item
            train_loss_value_all.backward()            #   反向传播
            optimizer.step()

            train_loss += train_loss_value_all.item()

            pbar.set_postfix(**{'lr': get_lr(optimizer),'loss': train_loss / (iteration + 1)})
            pbar.update(1)
    train_line = ("Epoch:%-3d,lr:%-15.15f,tra|loss:%-7.7f|" % (epoch, get_lr(optimizer), train_loss / (epoch_step)))
    #----------------------------开始验证-------------------------------------------------------
    val_loss = 0
    model.eval()
    with tqdm(total=epoch_step_val, desc=f'Val', postfix=dict, mininterval=0.3) as pbar:
        for iteration, batch in enumerate(gen_val):
            images, targets = batch[0], batch[1]
            with torch.no_grad():
                if cfg.cuda:
                    images = torch.from_numpy(images).type(torch.FloatTensor).cuda()
                    targets = [torch.from_numpy(ann).type(torch.FloatTensor).cuda() for ann in targets]
                else:
                    images = torch.from_numpy(images).type(torch.FloatTensor)
                    targets = [torch.from_numpy(ann).type(torch.FloatTensor) for ann in targets]
                outputs = model(images)                #   前向传播
                val_loss_value_all = 0
                # -----------计算损失----------#
                for l in range(len(outputs)):
                    val_loss_item, val_num_pos = yolo_loss(l, outputs[l], targets)
                    val_loss_value_all += val_loss_item

            val_loss += val_loss_value_all.item()
            pbar.set_postfix(**{'val_loss': val_loss / (iteration + 1)})
            pbar.update(1)

    val_line = (",val|loss:%-7.7f|\n" % (val_loss / (epoch_step_val)))

    # ----------保存损失到txt-----------------
    txt_file = open(cfg.train_infor, 'a')
    if epoch == 0:
        txt_file.write(f"----shape:{cfg.input_img_size[0]}-total_epoch:{cfg.total_epoch}-backbone:{cfg.backbone}-mosaic:{cfg.mosaic}"
                       f"-label_smoothing:{cfg.label_smoothing}-lr:{cfg.lr_descend_method}"
                       f"-use_pretrained:{cfg.use_pretrain}-freeze:{cfg.freeze}----\n")
    txt_file.write(train_line + val_line)
    txt_file.close()
    #-------------保存权重--------------------
    if cfg.lr_descend_method == 'Adaptive':
        scheduler.step(train_loss/(epoch_step))
    else:
        scheduler.step()
    save_ckpt(net=model,current_epoch=epoch,optimizer=optimizer,scheduler=scheduler,
              ckpt_name=f"Epoch{epoch}_lr{round(optimizer.param_groups[0]['lr'],8)}"
                        f"_traLoss{round(train_loss/(epoch_step),2)}_"
                        f"valLoss{round(val_loss/(epoch_step_val),2)}")

if __name__ == "__main__":
    #-------------创建一些文件夹---------------------
    if not os.path.isdir(cfg.checkpoint_path):
        os.mkdir(cfg.checkpoint_path)
    # -------------获取classes和anchor-----------------
    class_names, num_classes = get_classes(cfg.classes_path)
    #------------------创建yolo模型-------------------
    model = YoloBody(cfg.anchors_mask, num_classes, backbone = cfg.backbone, pretrained = cfg.use_pretrain)
    #------------放入CUDA-----------------
    if cfg.cuda:
        cudnn.benchmark = True
        model = model.cuda()

    #--------建立loss函数---------------------
    yolo_loss    = YOLOLoss(cfg.yolo_anchors, num_classes, cfg.input_img_size, cfg.cuda, cfg.anchors_mask, cfg.label_smoothing)
    #------------优化器设置-------------------
    optimizer = optim.Adam([{'params':model.parameters(),'initial_lr':cfg.lr}],lr=cfg.lr)
    # optimizer = optim.SGD([{'params':model.parameters(),'initial_lr':cfg.lr}],lr=cfg.lr)
    #----------学习率调整方法------------------
    if cfg.lr_descend_method == 'CosineAnnealingWarmRestarts':
        lr_scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer,T_0=10,T_mult=2,
                                                                      eta_min=1e-7)
    elif cfg.lr_descend_method == "WarmUp":
        lr_scheduler = optim.lr_scheduler.ChainedScheduler
    elif cfg.lr_descend_method == "StepLR":
        lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.8)

    elif cfg.lr_descend_method == "CyclicLR":
        lr_scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer,1e-8, cfg.lr, cfg.total_epoch//30,cfg.total_epoch//30,
                                                         'triangular2',scale_mode='cycle',
                                                         cycle_momentum=False, base_momentum=0.8, max_momentum=0.9)
    elif cfg.lr_descend_method == "Adaptive":
        lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,'min',0.9,patience=2,threshold=0.1,
                                                                  cooldown=0,min_lr=1e-6)

    #-------------------读取数据集对应的txt-------------
    with open(cfg.train_annotation_path) as f:
        train_lines = f.readlines()
    with open(cfg.val_annotation_path) as f:
        val_lines   = f.readlines()
    num_train   = len(train_lines)
    num_val     = len(val_lines)

    #----------------数据集加载--------------------
    train_dataset   = YoloDataset(train_lines, cfg.input_img_size, num_classes, mosaic=cfg.mosaic, train = True)
    val_dataset     = YoloDataset(val_lines,  cfg.input_img_size, num_classes, mosaic=False, train = False)
    gen             = DataLoader(train_dataset, shuffle = True, batch_size=cfg.train_batch_size, num_workers =4, pin_memory=True,
                                drop_last=True, collate_fn=yolo_dataset_collate)
    gen_val         = DataLoader(val_dataset  , shuffle = True, batch_size=cfg.val_batch_size, num_workers =4, pin_memory=True,
                                drop_last=True, collate_fn=yolo_dataset_collate)

    epoch_step = num_train // cfg.train_batch_size
    epoch_step_val = num_val // cfg.val_batch_size
    if epoch_step == 0 or epoch_step_val == 0:
        raise ValueError("数据集过小，无法进行训练，请扩充数据集。")

    #-------------预训练和断点续训-----------------
    if not os.path.isfile(cfg.ckpt_resume_path):
        weights_init(model)
    if os.path.isfile(cfg.ckpt_resume_path):
        import numpy as np
        checkpoint = torch.load(cfg.ckpt_resume_path)
        model_dict = model.state_dict()
        pretrained_dict = checkpoint['net']
        pretrained_dict = {k: v for k, v in pretrained_dict.items() if np.shape(model_dict[k]) == np.shape(v)}
        model_dict.update(pretrained_dict)
        model.load_state_dict(model_dict)
        print(f"Load pth as pretrain:{cfg.ckpt_resume_path}")
        if cfg.continue_train:
            optimizer.load_state_dict(checkpoint["optimizer"])
            cfg.start_epoch = checkpoint['epoch']+1
            lr_scheduler.load_state_dict(checkpoint['scheduler'])
            print(f"Load pth as resume pth:{cfg.ckpt_resume_path}")
        del checkpoint  # free memory

    print(f"==========================打印信息================================\n"
          f"类别数：{num_classes}，训练集大小：{num_train}，验证集大小：{num_val}，选择的模型：{cfg.backbone}\n"
          f"选择的下降方式：{cfg.lr_descend_method}，输入图像分辨率{cfg.input_img_size}")

    #主干特征提取网络特征通用，冻结训练可以加快训练速度
    if cfg.freeze:
        if cfg.start_epoch<=50:        #   冻结backbone部分训练
            for param in model.backbone.parameters():
                param.requires_grad = False
            for epoch in range(cfg.start_epoch, cfg.total_epoch):
                fit_one_epoch(model, yolo_loss, optimizer, lr_scheduler,epoch,
                        epoch_step, epoch_step_val, gen, gen_val)
        if cfg.start_epoch > 50:        #解冻后训练
            for param in model.backbone.parameters():
                param.requires_grad = True
            for epoch in range(cfg.start_epoch, cfg.total_epoch):
                fit_one_epoch(model, yolo_loss, optimizer, lr_scheduler,epoch,
                        epoch_step, epoch_step_val, gen, gen_val)
    else:
        for epoch in range(cfg.start_epoch,cfg.total_epoch):
            fit_one_epoch(model,yolo_loss,optimizer,lr_scheduler,epoch,epoch_step,
                          epoch_step_val,gen,gen_val)