# -*- coding: utf-8 -*-
# @Time    : 2021/7/7 14:46
# @Author  : LuoTianHang
# ####################datalist.py 说明##########################
# this script is used for the train.py to prepare the data
import colorsys
import json

import numpy as np
import torch
from PIL import Image
from torch.utils.data import Dataset
from torchvision.transforms import transforms

data_resize = transforms.Compose(
    [
        transforms.Resize(256, 256)
    ]
)

data_transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
color_jitter = transforms.Compose([
    transforms.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5)
])

hsv_tuples = [(x / 16, 1., 1.)
              for x in range(16)]

colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(
    map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
        colors))

joint_connected = [
    [9, 8], [8, 7], [12, 7], [13, 7], [11, 12], [10, 11], [14, 13], [15, 14], [6, 7], [2, 6], [3, 6], [1, 2], [4, 3],
    [0, 1], [5, 4]

]

mpii = ["r ankle", "r knee", "r hip", "l hip", "l knee", "l ankle", "pelvis", "thorax", "upper neck", "head top",
        "r wrist", "r elbow", "r shoulder", "l shoulder", "l elbow", "l wrist"
        ]

hsv_tuples_lines = [(x / len(joint_connected), 1., 1.)
                    for x in range(len(joint_connected))]

colors_line = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples_lines))
colors_line = list(
    map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
        colors_line))


class MPII(Dataset):
    def __init__(self, data, mode="train"):
        self.data = data
        self.mode = mode

        f = open(r"E:\Datasets\mpii\annot\trainval.json", "r", encoding="utf-8")
        self.train_val_json = json.load(f)
        self.center_scale = {}
        for j in self.train_val_json:
            self.center_scale[j['image']] = [j['center'], j['scale'], j["joints"], j["joints_vis"]]

    def __getitem__(self, index):
        self.data_index = eval(self.data[index])
        img_path = "E:/Datasets/mpii/images/" + self.data_index["filename"]
        joints = self.center_scale[self.data_index["filename"]][2]
        joint_vis = self.center_scale[self.data_index["filename"]][3]
        # for i in range(16):
        #     joints.append(self.data_index["joint_pos"][str(i)])
        #     joint_vis.append(self.data_index["is_visible"][str(i)])

        scale = self.center_scale[self.data_index["filename"]][1]
        center = self.center_scale[self.data_index["filename"]][0]
        head_box = self.data_index["head_rect"]

        image = Image.open(img_path).convert("RGB")

        # #  可视化展示label
        # from PIL import ImageDraw
        # image_draw = ImageDraw.Draw(image)
        # for index, js in enumerate(joints):
        #     image_draw.ellipse((js[0] - 5, js[1] - 5, js[0] + 5, js[1] + 5), colors[index])
        # for index, j in enumerate(joint_connected):
        #     if joints[j[0]][0] != -1 and joints[j[1]][0] != -1:
        #         image_draw.line([tuple(joints[j[0]]), tuple(joints[j[1]])], fill=colors_line[index], width=3)
        #
        # image.show()

        joints = np.array(joints)
        joint_vis = np.array(joint_vis)

        if sum(joint_vis) == 0:
            image = image.resize([256, 256], Image.BICUBIC)
            return data_transform(image), torch.zeros([16, 64, 64]), torch.tensor(joint_vis), torch.tensor(scale)

        # 切割的方法1
        try:
            offset = 100
            top, left, bottom, right = max(int(min(joints[joint_vis == 1][:, 1]) - offset), 0), \
                                       max(int(min(joints[joint_vis == 1][:, 0]) - offset), 0), \
                                       min(int(max(joints[joint_vis == 1][:, 1]) + offset), image.height), \
                                       min(int(max(joints[joint_vis == 1][:, 0]) + offset), image.width)
        except:
            # 将目标裁剪出来, left top 为相对坐标的原点
            # else:
            #     # 切割的方法2
            top, left, bottom, right = max(int(center[1] - scale * 200 / 2), 0), \
                                       max(int(center[0] - scale * 200 / 2), 0), \
                                       min(int(center[1] + scale * 200 / 2), image.height), \
                                       min(int(center[0] + scale * 200 / 2), image.width)

        image = image.crop((left, top, right, bottom))
        joints[:, 0] = (joints[:, 0] - left)
        joints[:, 1] = (joints[:, 1] - top)

        new_image = Image.new(mode="RGB", size=[256, 256], color=(128, 128, 128))

        ratio = image.height / image.width

        if ratio >= 1:
            new_height = 256
            new_width = int(256 / ratio)
        else:
            new_width = 256
            new_height = int(256 * ratio)
        image = image.resize((new_width, new_height))
        joints[:, 0] = joints[:, 0] / (right - left) * new_width
        joints[:, 1] = joints[:, 1] / (bottom - top) * new_height
        new_image_hw = (new_height, new_width)

        # '''
        # color jitter
        # '''
        # image = color_jitter(image)

        '''
        random rotate
        '''
        rotate_angle = 0
        image = image.rotate(-rotate_angle, fillcolor=(128, 128, 128))

        j_one = np.ones([joints.shape[0], 1])
        joints = np.hstack([joints, j_one])

        joints_M = joints

        # joints_M[:, 0], joints_M[:, 1] = (joints[:, 0] - new_width // 2) * np.cos(rotate_angle / 180 * np.pi) - (
        #         joints[:, 1] - new_height // 2) * np.sin(rotate_angle / 180 * np.pi) + new_width // 2, \
        #                                  (joints[:, 0] - new_width // 2) * np.sin(rotate_angle / 180 * np.pi) + (
        #                                          joints[:, 1] - new_height // 2) * np.cos(
        #                                      rotate_angle / 180 * np.pi) + new_height // 2

        # 可视化展示label
        # from PIL import ImageDraw
        # image_draw = ImageDraw.Draw(image)
        # for index, js in enumerate(joints_M):
        #     image_draw.ellipse((js[0] - 5, js[1] - 5, js[0] + 5, js[1] + 5), colors[index])
        # #
        # image.show()

        new_image.paste(image, (0, 0))
        # new_image.show()
        # print(scale)
        target = MPII.draw_map(joints_M, joint_vis, new_image_hw)
        # MPII.draw(joints_M,joint_vis,new_image_hw,new_image)

        #  Todo 用于验证生成的heatmap 和 数据标签是否一致
        # import cv2
        # temp_image = np.zeros([64, 64])
        # for t in target:
        #     temp_image = 1 * temp_image + 1 * t.cpu().detach().numpy()
        #
        # temp_image = (temp_image * 255).astype(np.uint8)
        # temp_image = cv2.cvtColor(temp_image, cv2.COLOR_GRAY2BGR)
        # temp_ori_image = np.array(new_image.resize((64, 64)))
        # ttt = np.vstack([temp_image, temp_ori_image])
        # cv2.imwrite("dsa.jpg", ttt)
        # cv2.imshow("Dsa", cv2.resize(ttt,dsize=None,fx=4,fy=4))
        # cv2.waitKey()

        new_image = data_transform(new_image)
        if self.mode == "train":

            return new_image, target, torch.tensor(joint_vis), torch.tensor(scale)
        else:
            left = head_box[0] // 4
            top = head_box[1] // 4
            right = head_box[2] // 4
            bottom = head_box[3] // 4

            scale = np.linalg.norm(np.array([right - left, bottom - top]), 2)
            return new_image, target, torch.tensor(joint_vis), torch.tensor(scale)

    def __len__(self):
        return len(self.data)

    @staticmethod
    def draw_map(joints, joint_vis, new_image_hw):
        n_height, n_width = new_image_hw[0], new_image_hw[1]
        num_joints = len(joint_vis)

        sigma = 1

        target = torch.zeros([num_joints, 64, 64])
        for index, jv in enumerate(joint_vis):
            if jv != 1:
                continue
            else:
                x0 = int(joints[index][0])
                y0 = int(joints[index][1])
                if x0 < 0 or y0 < 0 or x0 > n_width or y0 > n_height:
                    continue
                target[index] = MPII.gaussian(target[index], x0 // 4, y0 // 4, sigma, n_height // 4, n_width // 4)
                # print(mpii[index])

        return target

    @staticmethod
    def draw(joints, joint_vis, new_image_hw, image):
        n_height, n_width = new_image_hw[0], new_image_hw[1]
        num_joints = len(joint_vis)
        from PIL import ImageDraw
        image_draw = ImageDraw.Draw(image)
        sigma = 1
        for index, jv in enumerate(joint_vis):
            if jv != 1:
                continue
            else:
                x0 = int(joints[index][0])
                y0 = int(joints[index][1])
                if x0 < 0 or y0 < 0:
                    continue
                # image = MPII.gaussian(image, x0 // 4, y0 // 4, sigma, n_height // 4, n_width // 4)

                image_draw.ellipse(
                    (joints[index][0] - 5, joints[index][1] - 5, joints[index][0] + 5, joints[index][1] + 5),
                    colors[index])

        image.show()

    @staticmethod
    def gaussian(target, x0, y0, sigma, height, width):
        size = 6 * sigma + 1
        x = np.arange(0, size, 1, float)
        y = x[:, np.newaxis]
        x_center = y_center = size // 2

        g = np.exp(- ((x - x_center) ** 2 + (y - y_center) ** 2) / (2 * sigma ** 2))

        g = torch.tensor(g)

        img_left = max(x0 - 3, 0)
        img_right = min(x0 + 3 + 1, width)
        img_up = max(y0 - 3, 0)
        img_bottom = min(y0 + 3 + 1, height)

        g_left = 3 - (x0 - img_left)
        g_right = 3 + (img_right - x0)
        g_up = 3 - (y0 - img_up)
        g_bottom = 3 + (img_bottom - y0)

        target[img_up:img_bottom, img_left:img_right] = g[g_up:g_bottom, g_left:g_right]

        return target


if __name__ == "__main__":
    dataset = MPII(json_path="data/train.json")
