from collections import OrderedDict
import logging
import os
import json

from scipy.io import loadmat, savemat

logger = logging.getLogger(__name__)

import copy
import logging
import random

import cv2
import numpy as np
import torch
from torch.utils.data import Dataset

from Utils.transforms import get_affine_transform
from Utils.transforms import affine_transform
import config as cfg

logger = logging.getLogger(__name__)


class MPIIDataset(Dataset):
    def __init__(self,transform=None):
        super().__init__()
        self.pixel_std = 200

        self.is_train = cfg.mpii_is_train

        self.scale_factor = cfg.mpii_scale_factor
        self.rotation_factor = cfg.mpii_rot_factor
        self.flip = cfg.mpii_flip

        self.image_size = [cfg.img_size,cfg.img_size]
        self.target_type = cfg.mpii_target_type
        self.heatmap_size = [cfg.heatmap_size,cfg.heatmap_size]
        self.sigma = cfg.mpii_sigma

        self.transform = transform
        self.num_joints = 16
        self.flip_pairs = [[0, 5], [1, 4], [2, 3], [10, 15], [11, 14], [12, 13]]
        self.parent_ids = [1, 2, 6, 6, 3, 4, 6, 6, 7, 8, 11, 12, 7, 7, 13, 14]

        self.db = self.get_db(cfg.mpii_images_path,cfg.mpii_json_path)

        if self.is_train and cfg.mpii_select_data:
            self.db = self.select_data(self.db)

        logger.info('=> load {} samples'.format(len(self.db)))


    def get_db(self,image_path,json_path):
        with open(json_path) as anno_file:
            anno = json.load(anno_file)

        gt_db = []
        for index,a in enumerate(anno):
            image_name = a['image']
            c = np.array(a['center'])
            s = np.array([a['scale'], a['scale']])

            #适当地调整y（也就是高）以避免裁剪到人体，所以变成一个竖着的长方形
            if c[0] != -1:
                c[1] = c[1] + 15 * s[1]
                s = s * 1.25

            # MPII使用matlab的格式，起始序列是1,我们首先将其转换到起始序列是0的
            # we should first convert to 0-based index
            c = c - 1

            joints_3d = np.zeros((self.num_joints, 3), dtype=np.float32)
            joints_3d_vis = np.zeros((self.num_joints,  3), dtype=np.float32)
            if os.path.basename(json_path).split(".")[0] != 'test':
                joints = np.array(a['joints'])
                joints[:, 0:2] = joints[:, 0:2] - 1         #这里将每个点的坐标位置-1,为什么要-1
                joints_vis = np.array(a['joints_vis'])
                assert len(joints) == self.num_joints, \
                    'joint num diff: {} vs {}'.format(len(joints),
                                                      self.num_joints)

                joints_3d[:, 0:2] = joints[:, 0:2]
                joints_3d_vis[:, 0] = joints_vis[:]
                joints_3d_vis[:, 1] = joints_vis[:]

            gt_db.append({
                'image': os.path.join(image_path,image_name),
                'center': c,
                'scale': s,
                'joints_3d': joints_3d,
                'joints_3d_vis': joints_3d_vis,
                'filename': image_name,
                'imgnum': index,
                })

        return gt_db

    def __len__(self,):
        return len(self.db)

    def __getitem__(self, idx):
        '''
        返回4个变量
        input：输入的图片, target：标签, target_weight：标签点是否可见,
         meta：一个字典
        '''
        db_rec = copy.deepcopy(self.db[idx])

        image_file = db_rec['image']
        filename = db_rec['filename']
        imgnum = db_rec['imgnum']

        image = cv2.imread(image_file, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)

        if image is None:
            logger.error('=> fail to read {}'.format(image_file))
            raise ValueError('Fail to read {}'.format(image_file))

        joints = db_rec['joints_3d']
        joints_vis = db_rec['joints_3d_vis']

        c = db_rec['center']    #中心点（已经被更改到以0为索引的开始，且对高度进行了调整，避免正方形裁剪的不合理性）
        s = db_rec['scale']     #扩张率
        score = 1
        r = 0

        #-------------如果是训练的话，生成随机打乱的数据-----------------
        if self.is_train:
            sf = self.scale_factor
            rf = self.rotation_factor
            s = s * np.clip(np.random.randn()*sf + 1, 1 - sf, 1 + sf)
            r = np.clip(np.random.randn()*rf, -rf*2, rf*2) if random.random() <= 0.6 else 0

            if self.flip and random.random() <= 0.5:
                data_numpy = image[:, ::-1, :]  #翻转（水平翻转）

                joints, joints_vis = fliplr_joints(
                    joints, joints_vis, data_numpy.shape[1], self.flip_pairs)
                c[0] = data_numpy.shape[1] - c[0] - 1

        #进行仿射投影变换，样本数据关键点发生角度旋转后，每个像素也要旋转到对应的位置
        trans = get_affine_transform(c, s, r, self.image_size)        #获得旋转矩阵
        input = cv2.warpAffine(image,trans,
                               (int(self.image_size[0]),int(self.image_size[1])),
                               flags=cv2.INTER_LINEAR)  #根据旋转矩阵进行仿射变换
        # cv2.imshow('1',input)
        # cv2.waitKey(0)
        # cv2.destroyAllWindows()
        if self.transform:
            input = self.transform(input)
        #对关键点也进行仿射变换等
        for i in range(self.num_joints):
            if joints_vis[i, 0] > 0.0:
                joints[i, 0:2] = affine_transform(joints[i, 0:2], trans)
        #获得grount truth，热力图target[num_joints,64,64]，target_weight=[num_joints,1]
        target, target_weight = self.generate_target(joints, joints_vis)
        #------------转换成numpy-------------------
        target = torch.from_numpy(target)
        target_weight = torch.from_numpy(target_weight)

        meta = {
            'image': image_file,
            'filename': filename,
            'imgnum': imgnum,
            'joints': joints,
            'joints_vis': joints_vis,
            'center': c,
            'scale': s,
            'rotation': r,
            'score': score
        }

        return input, target, target_weight,meta

    def generate_target(self, joints, joints_vis):
        '''
        :param joints:  [num_joints, 3]，关键点坐标
        :param joints_vis: [num_joints, 3]，关键点坐标是否可见
        :return: target, target_weight(1: visible, 0: invisible)
        '''
        target_weight = np.ones((self.num_joints, 1), dtype=np.float32)
        target_weight[:, 0] = joints_vis[:, 0]
        #热力图方式是否为高斯分布
        assert self.target_type == 'gaussian', \
            'Only support gaussian map now!'
        #使用高斯模糊制作热力图
        if self.target_type == 'gaussian':
            target = np.zeros((self.num_joints,
                               self.heatmap_size[1],
                               self.heatmap_size[0]),
                              dtype=np.float32)

            tmp_size = self.sigma * 3
            #为每个关键点生成热力图target以及对应的热力图权重target_weight
            for joint_id in range(self.num_joints):
                #计算原图到输出热力图的缩放倍数
                feat_stride = [item/self.heatmap_size[i] for i,item in enumerate(self.image_size)]
                #计算出输入原图的关键点，转换到热力图的位置
                mu_x = int(joints[joint_id][0] / feat_stride[0] + 0.5)
                mu_y = int(joints[joint_id][1] / feat_stride[1] + 0.5)
                # Check that any part of the gaussian is in-bounds
                # 根据tmp_size参数，计算出关键点范围左上角和右下角坐标
                ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]
                br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]
                #判断该关键点是否在热力图之外，如果在热力图之外，则把该热力图对应的target_weight设置为0,然后跳过这次循环
                if ul[0] >= self.heatmap_size[0] or ul[1] >= self.heatmap_size[1] \
                        or br[0] < 0 or br[1] < 0:
                    # If not, just return the image as is
                    target_weight[joint_id] = 0
                    continue

                # # Generate gaussian，产生高斯分布的大小
                size = 2 * tmp_size + 1
                x = np.arange(0, size, 1, np.float32)   #x=[0,1,2...,12]
                y = x[:, np.newaxis]    #y=[[0.][1.][2.]...[12.]]
                x0 = y0 = size // 2     #x0=y0=6
                # The gaussian is not normalized, we want the center value to equal 1
                #g.shape=[13,13],数组中间的[7,7]=1,表示离开该中心点越远数值越小
                g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * self.sigma ** 2))

                # Usable gaussian range，判断边界获得有效的高斯分布边界范围
                g_x = max(0, -ul[0]), min(br[0], self.heatmap_size[0]) - ul[0]
                g_y = max(0, -ul[1]), min(br[1], self.heatmap_size[1]) - ul[1]
                # Image range，判断边界，获得有效的图像像素边界
                img_x = max(0, ul[0]), min(br[0], self.heatmap_size[0])
                img_y = max(0, ul[1]), min(br[1], self.heatmap_size[1])
                #如果该关键点对应的target_weight>0.5（表示关键点可见），则把该关键点附近的特征点赋值成gaussian
                v = target_weight[joint_id]
                if v > 0.5:
                    target[joint_id][img_y[0]:img_y[1], img_x[0]:img_x[1]] = \
                        g[g_y[0]:g_y[1], g_x[0]:g_x[1]]

        # img = np.transpose(target.copy(),[1,2,0])*255
        # img = img[:,:,0].astype(np.uint8)
        # img = np.expand_dims(img,axis=-1)
        # cv2.imwrite('./test.jpg', img)

        return target, target_weight

    def evaluate(self, preds, output_dir, *args, **kwargs):
        '''
        preds:ndarray:(image numbers,keypoints numbers,3)
        output_dir:str:保持存储的路径
        '''
        # convert 0-based index to 1-based index
        # 从起始序列为0转换为起始序列为1
        preds = preds[:, :, 0:2] + 1.0

        if output_dir:
            pred_file = os.path.join(output_dir, 'pred.mat')
            savemat(pred_file, mdict={'preds': preds})

        # if 'test' in cfg.DATASET.TEST_SET:
        #     return {'Null': 0.0}, 0.0

        SC_BIAS = 0.6
        threshold = 0.5

        #解析ground truth的mat标签
        # gt_file = os.path.join(cfg.test_output_dir,'annot','groundTruth.mat')
        gt_file = os.path.join(cfg.test_mpii_mat)
        gt_dict = loadmat(gt_file)

        dataset_joints = gt_dict['dataset_joints']
        jnt_missing = gt_dict['jnt_missing']        #jnt_missing，点是否消失，1=消失，0=存在
        pos_gt_src = gt_dict['pos_gt_src']
        headboxes_src = gt_dict['headboxes_src']

        pos_pred_src = np.transpose(preds, [1, 2, 0])
        #这儿只用了13个点
        head = np.where(dataset_joints == 'head')[1][0]
        lsho = np.where(dataset_joints == 'lsho')[1][0]
        lelb = np.where(dataset_joints == 'lelb')[1][0]
        lwri = np.where(dataset_joints == 'lwri')[1][0]
        lhip = np.where(dataset_joints == 'lhip')[1][0]
        lkne = np.where(dataset_joints == 'lkne')[1][0]
        lank = np.where(dataset_joints == 'lank')[1][0]

        rsho = np.where(dataset_joints == 'rsho')[1][0]
        relb = np.where(dataset_joints == 'relb')[1][0]
        rwri = np.where(dataset_joints == 'rwri')[1][0]
        rkne = np.where(dataset_joints == 'rkne')[1][0]
        rank = np.where(dataset_joints == 'rank')[1][0]
        rhip = np.where(dataset_joints == 'rhip')[1][0]

        jnt_visible = 1 - jnt_missing       #点是否可见，1=可见，0=不可见
        #在axis求范数，默认为2范数
        uv_error = pos_pred_src - pos_gt_src
        uv_err = np.linalg.norm(uv_error, axis=1)
        #
        headsizes = headboxes_src[1, :, :] - headboxes_src[0, :, :]
        headsizes = np.linalg.norm(headsizes, axis=0)
        headsizes *= SC_BIAS
        scale = np.multiply(headsizes, np.ones((len(uv_err), 1)))
        scaled_uv_err = np.divide(uv_err, scale)
        scaled_uv_err = np.multiply(scaled_uv_err, jnt_visible)
        jnt_count = np.sum(jnt_visible, axis=1)
        less_than_threshold = np.multiply((scaled_uv_err <= threshold),
                                          jnt_visible)
        PCKh = np.divide(100.*np.sum(less_than_threshold, axis=1), jnt_count)

        # save
        rng = np.arange(0, 0.5+0.01, 0.01)
        pckAll = np.zeros((len(rng), 16))

        for r in range(len(rng)):
            threshold = rng[r]
            less_than_threshold = np.multiply(scaled_uv_err <= threshold,
                                              jnt_visible)
            pckAll[r, :] = np.divide(100.*np.sum(less_than_threshold, axis=1),
                                     jnt_count)

        PCKh = np.ma.array(PCKh, mask=False)
        PCKh.mask[6:8] = True

        jnt_count = np.ma.array(jnt_count, mask=False)
        jnt_count.mask[6:8] = True
        jnt_ratio = jnt_count / np.sum(jnt_count).astype(np.float64)

        name_value = [
            ('Head', PCKh[head]),                           #头，1
            ('Shoulder', 0.5 * (PCKh[lsho] + PCKh[rsho])),  #左肩和右肩，2
            ('Elbow', 0.5 * (PCKh[lelb] + PCKh[relb])),     #左手肘和右手肘，2
            ('Wrist', 0.5 * (PCKh[lwri] + PCKh[rwri])),     #左手腕和右手腕，2
            ('Hip', 0.5 * (PCKh[lhip] + PCKh[rhip])),       #左髋和右髋骨，2
            ('Knee', 0.5 * (PCKh[lkne] + PCKh[rkne])),      #左膝盖和右膝盖，2
            ('Ankle', 0.5 * (PCKh[lank] + PCKh[rank])),     #左脚踝和右脚踝，2
            ('Mean', np.sum(PCKh * jnt_ratio)),
            ('Mean@0.1', np.sum(pckAll[11, :] * jnt_ratio))
        ]
        name_value = OrderedDict(name_value)

        return name_value, name_value['Mean']

    def select_data(self, db):
        db_selected = []
        for rec in db:
            num_vis = 0
            joints_x = 0.0
            joints_y = 0.0
            for joint, joint_vis in zip(
                    rec['joints_3d'], rec['joints_3d_vis']):
                if joint_vis[0] <= 0:
                    continue
                num_vis += 1

                joints_x += joint[0]
                joints_y += joint[1]
            if num_vis == 0:
                continue

            joints_x, joints_y = joints_x / num_vis, joints_y / num_vis

            area = rec['scale'][0] * rec['scale'][1] * (self.pixel_std**2)
            joints_center = np.array([joints_x, joints_y])
            bbox_center = np.array(rec['center'])
            diff_norm2 = np.linalg.norm((joints_center-bbox_center), 2)
            ks = np.exp(-1.0*(diff_norm2**2) / ((0.2)**2*2.0*area))

            metric = (0.2 / 16) * num_vis + 0.45 - 0.2 / 16
            if ks > metric:
                db_selected.append(rec)

        logger.info('=> num db: {}'.format(len(db)))
        logger.info('=> num selected db: {}'.format(len(db_selected)))
        return db_selected

if __name__ == '__main__':
    dataset = MPIIDataset()
    output = dataset.__getitem__(0)
    print(output[0].shape)
    print(output[1].shape)
    print(output[2].shape)
    print(output[3].shape)

    # mat_file_pred = loadmat('/media/jiji/3b75ab20-92d3-4be6-81c2-4e1798e2fe16/权重/关键点/Mpii/新建文件夹/results/pred.mat')
    # mat_file_groundTruth = loadmat('/media/jiji/3b75ab20-92d3-4be6-81c2-4e1798e2fe16/公司数据/mpii_human_pose_v1/annot/gt_valid.mat')
    print(1)