import torchvision.transforms as transforms
from Utils.loss import JointsMSELoss
from Models.pose_resnet import get_pose_net
from torch import optim
import logging
import os
from tqdm import tqdm
import torch
from Utils.evaluate import accuracy
import config as cfg

#------------------设备管理-----------------
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

def save_ckpt(model,current_epoch,optimizer,scheduler,ckpt_name):
    ckpt_dict = {
        'net':model.state_dict(),
        'epoch':current_epoch,
        'optimizer':optimizer.state_dict(),
        'scheduler':scheduler.state_dict(),
    }
    torch.save(ckpt_dict, os.path.join(cfg.ckpt_save_dir, ckpt_name+".pth"))
    torch.save(ckpt_dict, os.path.join(cfg.ckpt_save_dir, "last_ckpt.pth"))

def train(train_loader, model, criterion, optimizer, epoch,len_train,scheduler):
    model.train()

    loss_ = 0       #当前epoch的总loss
    with tqdm(desc=f'Epoch:{epoch}/{cfg.total_epoch}',total=len_train//cfg.train_batch_size,postfix=dict,ncols=100,colour='green') as pbar:
        for i, (input, target, target_weight,meta) in enumerate(train_loader):
            #计算数据加载需要多长时间
            if cfg.cuda:
                input = input.cuda(non_blocking=True)
                target = target.cuda(non_blocking=True)
                target_weight = target_weight.cuda(non_blocking=True)

            #前向传播
            output = model(input)
            loss = criterion(output, target, target_weight)
            #计算梯度和前向传播
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            #计算accuracy，记录loss
            _, avg_acc, cnt, pred = accuracy(output.detach().cpu().numpy(),target.detach().cpu().numpy())

            #pbar更新
            loss_ += loss.item()
            current_loss = loss_ / (i+1)
            lr = optimizer.param_groups[0]['lr']

            pbar.set_postfix(**{'acc':avg_acc,'loss(10^4)':current_loss*(1e4),'lr':lr})
            pbar.update(1)

    #个epoch后保存一次
    if epoch % 3 == 0 or epoch == cfg.total_epoch - 1:
        save_ckpt(model, epoch, optimizer, scheduler, f"Epoch{epoch}_lr{lr}_loss(10^4){current_loss*(1e4)}")

    if cfg.lr_method == 'adaptive':
        scheduler.step(current_loss)
    else:
        scheduler.step()


def validate(model,val_loader,val_dataset,criterion,epoch,output_dir='',
             tb_log_dir='', writer_dict=None):
    # batch_time = AverageMeter()
    # losses = AverageMeter()
    # acc = AverageMeter()

    # switch to evaluate mode
    model.eval()

    # num_samples = len(val_dataset)
    # all_preds = np.zeros((num_samples, config.MODEL.NUM_JOINTTS, 3),
    #                      dtype=np.float32)
    # all_boxes = np.zeros((num_samples, 6))
    # image_path = []
    # filenames = []
    # imgnums = []
    # idx = 0
    val_loss_ = 0
    with tqdm(desc="Validating...",total=len(val_dataset)//cfg.val_batch_size,postfix=dict,ncols=100,colour='white') as pbar:
        for i, (input, target, target_weight,meta) in enumerate(val_loader):
            with torch.no_grad():
                if cfg.cuda:
                    input = input.cuda(non_blocking=True)
                    target = target.cuda(non_blocking=True)
                    target_weight = target_weight.cuda(non_blocking=True)
                #计算模型输出
                output = model(input)
                # if FLIP_TEST:
                #     # this part is ugly, because pytorch has not supported negative index
                #     # input_flipped = model(input[:, :, :, ::-1])
                #     input_flipped = np.flip(input.cpu().numpy(), 3).copy()
                #     input_flipped = torch.from_numpy(input_flipped).cuda()
                #     output_flipped = model(input_flipped)
                #     output_flipped = flip_back(output_flipped.cpu().numpy(),
                #                                val_dataset.flip_pairs)
                #     output_flipped = torch.from_numpy(output_flipped.copy()).cuda()
                #
                #     # feature is not aligned, shift flipped heatmap for higher accuracy
                #     if SHIFT_HEATMAP:
                #         output_flipped[:, :, :, 1:] = \
                #             output_flipped.clone()[:, :, :, 0:-1]
                #         # output_flipped[:, :, :, 0] = 0
                #
                #     output = (output + output_flipped) * 0.5

                loss = criterion(output, target, target_weight)

                #pbar更新
                val_loss_ += loss.item()
                current_loss = val_loss_ / (i+1)
                pbar.set_postfix(**{'Val loss(10^5)':current_loss*(1e5)})
                pbar.update(1)

            # num_images = input.size(0)
            #测试准确率并记录损失
            # losses.update(loss.item(), num_images)
            # _, avg_acc, cnt, pred = accuracy(output.cpu().numpy(),
            #                                  target.cpu().numpy())

            # acc.update(avg_acc, cnt)

            # c = meta['center'].numpy()
            # s = meta['scale'].numpy()
            # score = meta['score'].numpy()
            #
            # preds, maxvals = get_final_preds(
            #     config, output.clone().cpu().numpy(), c, s)

            # all_preds[idx:idx + num_images, :, 0:2] = preds[:, :, 0:2]
            # all_preds[idx:idx + num_images, :, 2:3] = maxvals
            # double check this all_boxes parts
            # all_boxes[idx:idx + num_images, 0:2] = c[:, 0:2]
            # all_boxes[idx:idx + num_images, 2:4] = s[:, 0:2]
            # all_boxes[idx:idx + num_images, 4] = np.prod(s*200, 1)
            # all_boxes[idx:idx + num_images, 5] = score
            # image_path.extend(meta['image'])
            # if config.DATASET.DATASET == 'posetrack':
            #     filenames.extend(meta['filename'])
            #     imgnums.extend(meta['imgnum'].numpy())

            # idx += num_images

            # if i % PRINT_FREQ == 0:
            #     msg = 'Test: [{0}/{1}]\t' \
            #           'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' \
            #           'Loss {loss.val:.4f} ({loss.avg:.4f})\t' \
            #           'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format(
            #               i, len(val_loader), batch_time=batch_time,
            #               loss=losses, acc=acc)
            #     logger.info(msg)
            #
            #     prefix = '{}_{}'.format(os.path.join(output_dir, 'val'), i)
            #     save_debug_images(config, input, meta, target, pred*4, output,
            #                       prefix)
        #
        # name_values, perf_indicator = val_dataset.evaluate(
        #     config, all_preds, output_dir, all_boxes, image_path,
        #     filenames, imgnums)

        # _, full_arch_name = get_model_name(config)
        # if isinstance(name_values, list):
        #     for name_value in name_values:
        #         _print_name_value(name_value, full_arch_name)
        # else:
        #     _print_name_value(name_values, full_arch_name)
        #
        # if writer_dict:
        #     writer = writer_dict['writer']
        #     global_steps = writer_dict['valid_global_steps']
        #     writer.add_scalar('valid_loss', losses.avg, global_steps)
        #     writer.add_scalar('valid_acc', acc.avg, global_steps)
        #     if isinstance(name_values, list):
        #         for name_value in name_values:
        #             writer.add_scalars('valid', dict(name_value), global_steps)
        #     else:
        #         writer.add_scalars('valid', dict(name_values), global_steps)
        #     writer_dict['valid_global_steps'] = global_steps + 1
    #
    # return perf_indicator
    return 0

class AverageMeter(object):
    """Computes and stores the average and current value"""
    def __init__(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        self.avg = self.sum / self.count if self.count != 0 else 0

if __name__ == '__main__':
    # #------返回一个日志记录器，和两个文件路径
    # logger,final_output_dir,tb_log_dir = create_logger(phase='train')

    #--------------加载数据-------------------------------
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    if cfg.data_type == 'vehicle':
        from Datasets.KeyPointDataset import VehicleKeyPoint
        train_dataset = VehicleKeyPoint(txt_path=cfg.vehicle_train_txt_path,
                                        transform=transforms.Compose([transforms.ToTensor(), normalize, ]))
        val_dataset = VehicleKeyPoint(txt_path=cfg.vehicle_valid_txt_path)

    elif cfg.data_type == 'mpii':
        from Datasets.mpii import MPIIDataset
        train_dataset = MPIIDataset(transform=transforms.Compose([transforms.ToTensor(),normalize,]))
        val_dataset = MPIIDataset()

    train_loader = torch.utils.data.DataLoader(train_dataset,cfg.train_batch_size,True,num_workers=4,pin_memory=True)
    val_loader = torch.utils.data.DataLoader(val_dataset,cfg.val_batch_size,True,num_workers=4,pin_memory=True)
    #---------- 模型和cuda，cudnn设置----模型放入cuda-------
    model = get_pose_net(is_train=True,style='pytorch',pre_trained='',num_layers=50,num_keypoint=cfg.num_keypoints)

    if cfg.cuda:
        model = model.cuda()
        if torch.cuda.device_count() > 1:
            model = torch.nn.DataParallel(model)

    torch.backends.cudnn.benchmark = True
    torch.backends.cudnn.deterministic = False
    torch.backends.cudnn.enabled = True

    #定义损失函数，优化器
    criterion = JointsMSELoss(use_target_weight=cfg.use_target_weight)
    if cfg.cuda:
        criterion = criterion.cuda()
    if cfg.optimizer_method == 'sgd':
        optimizer = optim.SGD(model.parameters(),cfg.init_lr,0.9,weight_decay=0.0001,nesterov=False)
    elif cfg.optimizer_method == 'adam':
        optimizer = optim.Adam(model.parameters(),lr=cfg.init_lr)
    else:
        raise ValueError("optimizer_method error!")

    lr_scheduler = None
    if cfg.lr_method == 'adaptive':
        lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.8, patience=2,
                                                               threshold=1e-7, min_lr=1e-7)
    elif cfg.lr_method == 'multiStep':
        lr_scheduler = optim.lr_scheduler.MultiStepLR(optimizer,cfg.train_lr_step,0.1)
    elif cfg.lr_method == 'step':
        lr_scheduler = optim.lr_scheduler.StepLR(optimizer,30,0.8)
    else:
        raise ValueError("Error lr_method parameter.")
    #-----------断点续训练------------------
    if os.path.isfile(cfg.ckpt_resume_path):
        import numpy as np
        checkpoint = torch.load(cfg.ckpt_resume_path)
        model_dict = model.state_dict()
        pretrained_dict = checkpoint['net']
        pretrained_dict = {k: v for k, v in pretrained_dict.items() if np.shape(model_dict[k]) == np.shape(v)}
        model_dict.update(pretrained_dict)
        model.load_state_dict(model_dict)

        if cfg.continue_train:
            optimizer.load_state_dict(checkpoint["optimizer"])
            cfg.start_epoch = checkpoint['epoch']+1
            lr_scheduler.load_state_dict(checkpoint['scheduler'])
        del checkpoint  # free memory

    #-----------------打印一些训练的参数--------------------
    print(f"数据集类别={cfg.data_type}\n关键点数量={cfg.num_keypoints}\n"
          f"训练集长度={len(train_dataset)}\n验证集长度={len(val_dataset)}\n"
          f"训练Batch Size大小={cfg.train_batch_size}\n验证Batch Size大小={cfg.val_batch_size}\n"
          f"优化器={cfg.optimizer_method}\n学习率下降方式={cfg.lr_method}\n"
          f"使用断点续训练={cfg.continue_train}\nPTH权重恢复路径={cfg.ckpt_resume_path}\n"
          f"CUDA设备={cfg.cuda}\n可用CUDA设备数量={torch.cuda.device_count()}")
    best_perf = 0.0
    for epoch in range(cfg.start_epoch, cfg.total_epoch):
        # -------------------开始训练---------------------------
        train(train_loader, model, criterion, optimizer, epoch,len(train_dataset),
              lr_scheduler)

        #-----------------评估----------- #这个函数可以不用，验证或者测试的时候直接使用valid.py函数即可
        perf_indicator = validate(model,val_loader=train_loader,val_dataset=train_dataset,
                 criterion=criterion,epoch=epoch)

        if perf_indicator > best_perf:
            best_perf = perf_indicator

