import copy
import logging
import os.path
from collections import OrderedDict
from scipy.io import loadmat, savemat

import cv2
import numpy as np
import torch
from torch.utils.data import Dataset
import config as cfg
from Utils.transforms import fliplr_joints,get_affine_transform,affine_transform
import random

class VehicleKeyPoint(Dataset):
    def __init__(self,txt_path:str,transform=None):
        print("---------------loading data ... -----------------------")
        super(VehicleKeyPoint, self).__init__()
        self.is_test = cfg.vehicle_is_test
        self.image_size = (cfg.img_size,cfg.img_size)
        self.heatmap_size = (cfg.heatmap_size,cfg.heatmap_size)
        self.sigma = 2
        self.transform = transform
        self.scale_factor = 0.25
        self.rotation_factor = 30

        self.dataset = self.get_data(txt_path)  #读取数据

    def __len__(self):
        return len(self.dataset)

    def __getitem__(self, index):
        rect_item = copy.deepcopy(self.dataset[index])      #从dataset中获取样本信息
        img_path = rect_item['image']                             #图像路径
        filaname = rect_item['filename']
        imgnum = rect_item['imgnum']
        joints = rect_item['joints_3d']                           #获取人体关键点坐标
        joints_vis = rect_item['joints_3d_vis']                     #权重
        image = cv2.imread(img_path,cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
        h,w = image.shape[0],image.shape[1]

        if image is None:
            raise ValueError("Failed to read{}".format(img_path))

        s = rect_item['scale']
        c = rect_item['center']
        score = rect_item['score'] if 'score' in rect_item else 1
        r = 0

        input = cv2.resize(image,self.image_size)
        #对关键点坐标进行变换
        for i in range(cfg.num_keypoints):
            joints[i,0] = joints[i,0] * (self.image_size[0]/w)
            joints[i,1] = joints[i,1] * (self.image_size[1]/h)

        if self.transform:
            input = self.transform(input)

        #获得ground truth，热力图
        target,target_weight = self.generate_target(joints,joints_vis)
        #转换成tensor
        target = torch.from_numpy(target)
        target_weight = torch.from_numpy(target_weight)
        meta = {
            'image':img_path,
            'filename':filaname,
            'imgnum':imgnum,
            'joints':joints,
            'joints_vis':joints_vis,
            'center':c,
            'scale':s,
            'rotation':r,
            'score':score,
        }

        return input,target,target_weight,meta

    def affine_transform(self,pt, t):
        new_pt = np.array([pt[0], pt[1], 1.]).T
        new_pt = np.dot(t, new_pt)
        return new_pt[:2]
    def generate_target(self, joints, joints_vis):
        '''
        :param joints:  [num_joints, 3]
        :param joints_vis: [num_joints, 3]
        :return: target, target_weight(1: visible, 0: invisible)
        '''
        target_weight = np.ones((cfg.num_keypoints, 1), dtype=np.float32)
        target_weight[:, 0] = joints_vis[:, 0]

        target = np.zeros((cfg.num_keypoints,
                           self.heatmap_size[1],
                           self.heatmap_size[0]),
                          dtype=np.float32)

        tmp_size = self.sigma * 3

        for joint_id in range(cfg.num_keypoints):
            feat_stride = [item/self.heatmap_size[i] for i,item in enumerate(self.image_size)]
            mu_x = int(joints[joint_id][0] / feat_stride[0] + 0.5)
            mu_y = int(joints[joint_id][1] / feat_stride[1] + 0.5)
            # Check that any part of the gaussian is in-bounds
            ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]
            br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]
            if ul[0] >= self.heatmap_size[0] or ul[1] >= self.heatmap_size[1] \
                    or br[0] < 0 or br[1] < 0:
                # If not, just return the image as is
                target_weight[joint_id] = 0
                continue

            # # Generate gaussian
            size = 2 * tmp_size + 1
            x = np.arange(0, size, 1, np.float32)
            y = x[:, np.newaxis]
            x0 = y0 = size // 2
            # The gaussian is not normalized, we want the center value to equal 1
            g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * self.sigma ** 2))

            # Usable gaussian range
            g_x = max(0, -ul[0]), min(br[0], self.heatmap_size[0]) - ul[0]
            g_y = max(0, -ul[1]), min(br[1], self.heatmap_size[1]) - ul[1]
            # Image range
            img_x = max(0, ul[0]), min(br[0], self.heatmap_size[0])
            img_y = max(0, ul[1]), min(br[1], self.heatmap_size[1])

            v = target_weight[joint_id]
            if v > 0.5:
                target[joint_id][img_y[0]:img_y[1], img_x[0]:img_x[1]] = \
                    g[g_y[0]:g_y[1], g_x[0]:g_x[1]]

        return target, target_weight

    def get_data(self,txt_path):
        with open(txt_path) as f:
            lines = f.readlines()

        newlines = []
        for index,line in enumerate(lines):
            img_path = line.split(" ", 1)[0].strip()        #图片绝对路径
            h,w,c = cv2.imread(img_path).shape
            keypoint = line.split(" ", 1)[1].strip()
            kps = []                                        #关键点坐标位置，ndarry格式
            for k in keypoint.split(" "):
                kps.append([float(p) for p in k[:-2].split(",")])
            masks = [float(k[-1]) for k in keypoint.split(" ")] #关键点是否可见

            joint_3d = np.zeros((cfg.num_keypoints,3),dtype=np.float32)
            joints_3d_vis = np.zeros((cfg.num_keypoints,3),dtype=np.float32)

            if not self.is_test:  #训练数据要特殊处理，测试数据集不需要这个信息
                joints = np.array(kps)
                joints_vis = np.array(masks)
                if len(joints) != cfg.num_keypoints:
                    raise ValueError(f"Keypoint num diff:{len(joints)} vs {cfg.num_keypoints}.\n"
                                     f"Error file:{line}")

                joint_3d[:,0:2] = joints[:,0:2]
                joints_3d_vis[:,0] = 1-joints_vis[:]
                joints_3d_vis[:,1] = 1-joints_vis[:]

            newlines.append({
                'image':img_path,
                'center':np.array([w/2-1,h/2-1]),
                'scale':np.array([2,2]),
                'joints_3d':joint_3d,
                'joints_3d_vis':joints_3d_vis,
                'filename':os.path.basename(img_path),
                'imgnum':index
            })
        return newlines

if __name__ == '__main__':
    dataset1 = VehicleKeyPoint(cfg.vehicle_valid_txt_path)
    y = dataset1.__getitem__(0)
    print("1")
    # mat_file = loadmat('/media/jiji/3b75ab20-92d3-4be6-81c2-4e1798e2fe16/权重/关键点/HumanPosePytorch/新建文件夹/results/pred.mat')
    # print(mat_file)