import config.Yolov4 as cfg
from model.loss.yolo_training import YOLOLoss
from dataset.dataset.Yolo_Dataset import create_dataloader
from tools.logger import init_logger
from model.optimize.optimize import AdamW,Sdiffer_weight_decay_SGD
from model.scheduler.scheduler import CosineAnnealingLR
from model.other.Moving_Average import ModelEMA
from models.yolo import Model
from model.loss.yolo5s_loss import compute_loss

import numpy as np
import torch
import os
import torch.backends.cudnn as cudnn
import time
from torch.autograd import Variable
from collections import deque
from tqdm import tqdm
from torch.cuda.amp import autocast as autocast

import warnings
warnings.filterwarnings("ignore", category=Warning)


def load_model():
    logging.info('Loading weights into state dict...')
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model_dict = model.float().state_dict()     # 将模型的所有数据强制转为浮点类型，返回模型的字典
    pretrained_dict = torch.load(cfg.model_path, map_location=device)
    if cfg.model_path=='D:\python\Aclass\yolov5-master\weights\yolov5s.pt':
        pretrained_dict = pretrained_dict['model'].float().state_dict()
    pretrained_dict = {k: v for k, v in pretrained_dict.items() if np.shape(model_dict[k]) == np.shape(v)}
    model_dict.update(pretrained_dict)
    model.load_state_dict(model_dict)
    logging.info('Transferred %g/%g items from %s' % (len(pretrained_dict), len(model.state_dict()), cfg.model_path))  # report
    logging.info('Finished!')
    return model

def train(net,yolo_losses,epoch,epoch_size,epoch_size_val,gen,genval,Epoch,cuda):
    total_loss = 0
    val_loss = 0
    def get_lr(optimizer):
        for param_group in optimizer.param_groups:
            return param_group['lr']

    with tqdm(total=epoch_size, desc=f'Epoch {epoch + 1}/{Epoch}', postfix=dict, mininterval=0.3) as pbar:
        net.train()
        for iteration, batch in enumerate(gen):
            start_time = time.time()
            images, targets = batch[0], batch[1]
            with torch.no_grad():
                if cuda:
                    images = Variable(torch.from_numpy(images).type(torch.FloatTensor)).cuda()
                    targets = [Variable(torch.from_numpy(ann).type(torch.FloatTensor)) for ann in targets]
                else:
                    images = Variable(torch.from_numpy(images).type(torch.FloatTensor))
                    targets = [Variable(torch.from_numpy(ann).type(torch.FloatTensor)) for ann in targets]
            optimizer.zero_grad()

            if cfg.f16 is True:
                with autocast():
                    outputs = net(images)
                    losses = []
                    for i in range(3):
                        loss_item = yolo_losses[i](outputs[i], targets)
                        losses.append(loss_item[0])
                    loss = sum(losses)
                scaler.scale(loss).backward()
                scaler.step(optimizer)
                scaler.update()


            else:
                outputs = net(images)
                losses, loss_items = compute_loss(outputs, targets, model)  # loss scaled by batch_size
                # losses = []
                # for i in range(3):
                #     loss_item = yolo_losses[i](outputs[i], targets)
                #     losses.append(loss_item[0])
                loss = sum(losses)
                loss.backward()
                optimizer.step()

            ema.updates = epoch*epoch_size//(max(round(64/Batch_size),1))
            ema.update(net)
            total_loss += loss
            waste_time = time.time() - start_time

            pbar.set_postfix(**{'total_loss': total_loss.item() / (iteration + 1),
                                'lr': get_lr(optimizer),
                                'step/s': waste_time})
            pbar.update(1)

    print('\nStart Validation')
    with tqdm(total=epoch_size_val, desc=f'Epoch {epoch + 1}/{Epoch}', postfix=dict, mininterval=0.3) as pbar:
        # model.eval()
        for iteration, batch in enumerate(genval):
            images_val, targets_val = batch[0], batch[1]

            with torch.no_grad():
                if cuda:
                    images_val = Variable(torch.from_numpy(images_val).type(torch.FloatTensor)).cuda()
                    targets_val = [Variable(torch.from_numpy(ann).type(torch.FloatTensor)) for ann in targets_val]
                else:
                    images_val = Variable(torch.from_numpy(images_val).type(torch.FloatTensor))
                    targets_val = [Variable(torch.from_numpy(ann).type(torch.FloatTensor)) for ann in targets_val]
                optimizer.zero_grad()
                outputs = ema.ema(images_val)
                losses, loss_items = compute_loss(outputs[1], targets, model)  # loss scaled by batch_size
                # losses = []
                # for i in range(3):
                #     loss_item = yolo_losses[i](outputs[i], targets_val)
                #     losses.append(loss_item[0])
                loss = sum(losses)
                val_loss += loss
                pbar.set_postfix(**{'total_loss': val_loss.item() / (iteration + 1)})
                pbar.update(1)


    print('\nFinish Validation')
    print('Epoch:' + str(epoch + 1) + '/' + str(Epoch))
    print('Total Loss: %.4f || Val Loss: %.4f ' % (total_loss / (epoch_size + 1), val_loss / (epoch_size_val + 1)))
    logging.info('Epoch:' + str(epoch + 1) + '/' + str(Epoch))
    logging.info(f'lr:{get_lr(optimizer)}')
    logging.info('Total Loss: %.4f || Val Loss: %.4f ' % (total_loss / (epoch_size + 1), val_loss / (epoch_size_val + 1)))

    if cfg.save_bool:
        if not os.path.exists(cfg.save_model_dir):
            os.mkdir(cfg.save_model_dir)
            logging.info('Created save_model_path')
        save_path = os.path.join(cfg.save_model_dir, f'Yolov4_epoch{epoch + 1}.pt')
        torch.save(model.state_dict(),save_path)
        logging.info(f'Checkpoint {epoch + 1} saved !')
        saved_models.append(save_path)
        if len(saved_models) > cfg.keep_checkpoint_max > 0:
            model_to_remove = saved_models.popleft()
            try:
                os.remove(model_to_remove)
            except:
                logging.info(f'failed to remove {model_to_remove}')

if __name__ == '__main__':
    logging = init_logger(log_dir='logs',stdout=False)

    yolo_losses = []
    for i in range(3):
        yolo_losses.append(YOLOLoss())

    # model = YoloBody()
    model = Model().to(torch.device('cpu'))
    if os.path.exists(cfg.model_path):
        model = load_model()
    if cfg.Cuda:
        # model = torch.nn.DataParallel(model)  这个是多GPU运算用的
        cudnn.benchmark = True
        cudnn.deterministic = False
    device = 'cuda' if cfg.Cuda is True else 'cpu'
    model.to(device)

    ema = ModelEMA(model)

    torch.manual_seed(1)
    saved_models = deque()
    Init_Epoch = 0
    Unfreeze_Epoch = 10
    Freeze_Epoch = 60
    for stage in range(2):
        stage +=1
        if stage == 0:
            lr = 1e-3
            Batch_size = 2
            optimizer = AdamW(model.parameters(),lr=lr)()
            lr_scheduler = CosineAnnealingLR(optimizer, T_max=Unfreeze_Epoch, eta_min=5*1e-4)()
            gen,train_loader = create_dataloader('train',Batch_size)
            gen_val,val_loader = create_dataloader('val',Batch_size)
            epoch_size = len(train_loader)
            epoch_size_val = len(val_loader)

            for param in model.backbone.parameters():
                param.requires_grad = True

            if cfg.f16==True:
                scaler = torch.cuda.amp.GradScaler()
            for epoch in range(Init_Epoch, Unfreeze_Epoch):
                train(model, yolo_losses, epoch, epoch_size, epoch_size_val, train_loader, val_loader,
                              Unfreeze_Epoch, cfg.Cuda)
                lr_scheduler.step()


        elif stage == 1:
            lr = 5*1e-4
            Batch_size = 2
            # optimizer = AdamW(model.parameters(),lr=lr)()
            optimizer = Sdiffer_weight_decay_SGD(model,lr=lr,batch_size=Batch_size,logger=logging)()
            # lr_scheduler = CosineDecayLR(optimizer, T_max=Unfreeze_Epoch,lr_min=5*1e-5,lr_max=5*1e-4, warmup=int(0.2 * Unfreeze_Epoch))
            lr_scheduler = CosineAnnealingLR(optimizer, T_max=Freeze_Epoch-Unfreeze_Epoch, eta_min=5 * 1e-4)()

            gen, train_loader = create_dataloader('train', Batch_size)
            gen_val, val_loader = create_dataloader('val', Batch_size)

            epoch_size = len(train_loader)
            epoch_size_val = len(val_loader)

            freeze = ['backbone']
            for k,v in model.named_parameters():
                if any(x in k for x in freeze):
                    print('freezing %s' % k)
                    v.requires_grad = False

            if cfg.f16==True:
                scaler = torch.cuda.amp.GradScaler()
            for epoch in range(Unfreeze_Epoch, Freeze_Epoch):
                train(model, yolo_losses, epoch, epoch_size, epoch_size_val, train_loader, val_loader,
                              Freeze_Epoch, cfg.Cuda)
                # lr_scheduler.step(epoch + 1)  # epoch为0的话，学习率为0，所以手动+1
                lr_scheduler.step()  # epoch为0的话，学习率为0，所以手动+1
        else:
            break
