import os
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms

from Utils.loss import JointsMSELoss
# from Utils.utils import create_logger
import config as cfg
from Models.pose_resnet import get_pose_net
from Datasets.KeyPointDataset import VehicleKeyPoint
from Datasets.mpii import MPIIDataset
from Utils.evaluate import accuracy
import numpy as np
import time
# from Utils.transforms import flip_back
from Utils.inference import get_final_preds
from Utils.vis import save_debug_images

class AverageMeter(object):
    """Computes and stores the average and current value"""
    def __init__(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        if self.count != 0:
            self.avg = self.sum/self.count
        else:
            self.avg = 0

def validate(model:torch.nn.Module,
             dataloader,
             criterion,
             dataset,
             log_dir=cfg.test_output_dir):

    model.eval()

    batch_time = AverageMeter()
    losses = AverageMeter()
    acc = AverageMeter()

    len_dataset = len(test_dataset)     #数据集大小
    # all_preds = np.zeros((len_dataset, cfg.num_keypoints, 3),dtype=np.float32)
    # all_boxes = np.zeros((len_dataset, 6))
    #
    # idx = 0

    with torch.no_grad():
        end = time.time()
        for i, (input, target, target_weight,meta) in enumerate(dataloader):

            if cfg.cuda:
                input = input.cuda(non_blocking=True)
                target = target.cuda(non_blocking=True)
                target_weight = target_weight.cuda(non_blocking=True)
            else:
                input = input.cpu()
                target = target.cpu()
                target_weight = target_weight.cpu()

            #得到预测输出
            output = model(input)
            #这里的损失是每张图片的所有关键点的损失
            loss = criterion(output, target, target_weight)

            num_images = input.size(0)          #batch_size
            #计算准确率和损失
            losses.update(loss.item(), num_images)
            all_acc, avg_acc, count, pred = accuracy(output.cpu().numpy(),target.cpu().numpy())

            acc.update(avg_acc, count)

            #计算耗时
            batch_time.update(time.time() - end)
            end = time.time()

            '''
            下面的注释部分是MPII数据集才有的dataset.evaluate函数，当需要去获取目标
            图像的关键点信息（关键点名称，序号），可以重写被注释部分的代码
            '''
            # c = meta['center'].numpy()      #bbox的中心点坐标
            # s = meta['scale'].numpy()       #根据中心点扩大，变成bbox的大小
            # score = meta['score'].numpy()   #好像是一直为1
            #
            # preds_cords, max_values = get_final_preds(output.clone().cpu().numpy(), c, s)
            # #对每个批次的图片预测出的坐标进行存储,all_preds里存储的是所有图片的关键点坐标和关键点的最大值
            # all_preds[idx:idx + num_images, :, 0:2] = preds_cords[:, :, 0:2]
            # all_preds[idx:idx + num_images, :, 2:3] = max_values
            # #仔细检查all_boxes部分，存储着中心点的位置，扩张率，
            # all_boxes[idx:idx + num_images, 0:2] = c[:, 0:2]
            # all_boxes[idx:idx + num_images, 2:4] = s[:, 0:2]
            # all_boxes[idx:idx + num_images, 4] = np.prod(s*200, 1)
            # all_boxes[idx:idx + num_images, 5] = score
            #
            # idx += num_images

            if i % cfg.print_freq == 0:
                print('Test: [{0}/{1}]\t' \
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' \
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t' \
                      'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format(
                          i, len(dataloader), batch_time=batch_time,
                          loss=losses, acc=acc))
                #存为图片的路径前缀名
                prefix = os.path.join(log_dir,f'val_{i}')

                #保存验证的图片
                save_debug_images(input, meta, target, pred*(cfg.img_size//cfg.heatmap_size), output,prefix)
        # #调试的shape中的2958是数据大小
        # name_values, perf_indicator = dataset.evaluate(all_preds, log_dir)
        # print(name_values)
        # print("===================================")
        # print(perf_indicator)

if __name__ == '__main__':
    model = get_pose_net(is_train=False, style=cfg.model_style,num_keypoint=cfg.num_keypoints)
    #加载模型
    model.load_state_dict(torch.load(cfg.test_model_file)["net"])
    # 定义损失函数,这里的损失是所有关键点数量的损失
    criterion = JointsMSELoss(use_target_weight=cfg.use_target_weight)

    # ---------放入cuda-------------
    if cfg.cuda:
        model = model.cuda()
        criterion = criterion.cuda()
        if torch.cuda.device_count() > 1:
            model = torch.nn.DataParallel(model)
        # cudnn相关设置
        cudnn.benchmark = True
        torch.backends.cudnn.deterministic = False
        torch.backends.cudnn.enabled = True
    else:
        model = model.cpu()
        criterion = criterion.cpu()

    # --加载数据集--
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],#各通道的均值
                                     std=[0.229, 0.224, 0.225]) #各通道的标准差
    if cfg.data_type == 'vehicle':
        test_dataset = VehicleKeyPoint(
            txt_path=cfg.test_vehicle_txt_path,
            transform=transforms.Compose([
                transforms.ToTensor(),
                normalize,
            ]))
    elif cfg.data_type == 'mpii':
        test_dataset = MPIIDataset(
            transform=transforms.Compose([
                transforms.ToTensor(),
                normalize,
            ]))

    test_loader = torch.utils.data.DataLoader(
        test_dataset,
        batch_size=cfg.test_batch_size,
        shuffle=True,
        num_workers=8,
        pin_memory=True
    )
    validate(model=model,dataloader=test_loader,dataset=test_dataset,criterion=criterion)