# -*- coding: utf-8 -*-
# @Time    : 2021/7/7 14:46
# @Author  : LuoTianHang


# ####################test.py 说明##########################
# this script is for the single test of the model
import colorsys

import numpy as np
import torch
from PIL import Image, ImageDraw, ImageFont
from torch.backends import cudnn

from config import GetConfig
from datalist import data_transform
from model import preresnet56

hsv_tuples = [(x / 16, 1., 1.)
              for x in range(16)]

colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(
    map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
        colors))
joint_connected = [
    [9, 8], [8, 7], [12, 7], [13, 7], [11, 12], [10, 11], [14, 13], [15, 14], [6, 7], [2, 6], [3, 6], [1, 2], [4, 3],
    [0, 1], [5, 4]

]

hsv_tuples_lines = [(x / len(joint_connected), 1., 1.)
                    for x in range(len(joint_connected))]

colors_line = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples_lines))
colors_line = list(
    map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
        colors_line))

mpii = ["r ankle", "r knee", "r hip", "l hip", "l knee", "l ankle", "pelvis", "thorax", "upper neck", "head top",
        "r wrist", "r elbow", "r shoulder", "l shoulder", "l elbow", "l wrist"
        ]


class Inference:
    def __init__(self):
        self.args = GetConfig()
        use_cuda = self.args.use_cuda and torch.cuda.is_available()
        self.device = torch.device('cuda' if use_cuda else 'cpu')

        self.model = preresnet56(num_classes=16)
        if use_cuda:
            self.model = torch.nn.DataParallel(self.model, device_ids=range(torch.cuda.device_count())).to(self.device)
            cudnn.enabled = True
            cudnn.benchmark = True

        print("load the weight from pretrained-weight file")
        model_dict = self.model.state_dict()
        pretrained_dict = torch.load(self.args.pretrained_weight, map_location=self.device)['model_state_dict']
        pretrained_dict = {k: v for k, v in pretrained_dict.items() if np.shape(model_dict[k]) == np.shape(v)}
        model_dict.update(pretrained_dict)
        self.model.load_state_dict(model_dict)
        print("Finished to load the weight")

        self.model.eval()

    @torch.no_grad()
    def inference_valid(self, json_data):
        img_path = "E:/Datasets/mpii/images/" + json_data["image"]
        joints = json_data["joints"]
        joint_vis = json_data["joints_vis"]

        scale = json_data["scale"]
        center = json_data["center"]

        image = Image.open(img_path).convert("RGB")

        final_image = image
        font = ImageFont.truetype(font='model_data/simhei.ttf',
                                  size=np.floor(1e-2 * np.shape(final_image)[1] + 0.5).astype('int32'))

        joints = np.array(joints)
        joint_vis = np.array(joint_vis)

        offset = 100
        top, left, bottom, right = max(int(min(joints[joint_vis == 1][:, 1]) - offset), 0), \
                                   max(int(min(joints[joint_vis == 1][:, 0]) - offset), 0), \
                                   min(int(max(joints[joint_vis == 1][:, 1]) + offset), image.height), \
                                   min(int(max(joints[joint_vis == 1][:, 0]) + offset),
                                       image.width)

        # 将目标裁剪出来, left top 为相对坐标的原点
        image = image.crop((left, top, right, bottom))
        ori_image_hw = (bottom - top, right - left)
        new_image = Image.new(mode="RGB", size=[256, 256], color=(128, 128, 128))
        ratio = image.height / image.width
        if ratio >= 1:
            new_height = 256
            new_width = int(256 / ratio)
        else:
            new_width = 256
            new_height = int(256 * ratio)
        image = image.resize((new_width, new_height))
        new_image_hw = (new_height, new_width)
        new_image.paste(image, (0, 0))

        image = data_transform(new_image)
        image = image.unsqueeze(0)

        image = image.to(self.device)

        outputs = self.model(image).cpu().numpy()[0]
        ans = []
        for output in outputs:
            ans.append(list(np.unravel_index(np.argmax(output), output.shape)))
        score = []
        for index, a in enumerate(ans):
            score.append(outputs[index, a[0], a[1]].item())

        print(score)
        image_draw = ImageDraw.Draw(final_image)
        offset = 2

        # 坐标整理
        ans = Inference.location_convert(ans, (left, top), ori_image_hw, new_image_hw)

        for index, a in enumerate(ans):
            if score[index] >= 0.005:
                image_draw.ellipse((a[1] - offset, a[0] - offset, a[1] + offset,
                                    a[0] + offset),
                                   colors[index])
                image_draw.text((a[1], a[0]), mpii[index], fill=colors[index], font=font)

        for index, j in enumerate(joint_connected):
            if score[j[0]] >= 0.005 and score[j[1]] >= 0.005:
                image_draw.line([(ans[j[0]][1], ans[j[0]][0]), (ans[j[1]][1], ans[j[1]][0])],
                                fill=colors_line[index], width=1)

        final_image.show()
        final_image.save("result1.jpg")

    @torch.no_grad()
    def inference_test(self, json_data):
        image_path = "E:/Datasets/mpii/images/" + json_data["image"]

        scale = json_data["scale"]
        center = json_data["center"]

        # center[1] = center[1] + 15 * scale

        scale = scale * 200

        image = Image.open(image_path)

        # imagedraw = ImageDraw.Draw(image)
        # offset = 7
        # #
        # imagedraw.ellipse(
        #     (center[0] - offset, center[1] - offset, center[0] + offset, center[1] + offset),
        #     (255, 255, 0))

        left = max((int(center[0] - scale / 2)), 0)
        top = max((int(center[1] - scale / 2)), 0)
        right = min(int(center[0] + scale / 2), image.width)
        bottom = min(int(center[1] + scale / 2), image.height)

        # imagedraw.rectangle([(left, top), (right, bottom)], fill=None,outline=(0, 255, 255),width=5)

        # image.show()
        image = image.crop((left, top, right, bottom))
        # image.show()

        ratio = image.height / image.width
        if ratio >= 1:
            new_height = 256
            new_width = int(256 / ratio)
        else:
            new_width = 256
            new_height = int(256 * ratio)
        image = image.resize((new_width, new_height))
        new_image = Image.new(mode="RGB", size=[256, 256], color=(128, 128, 128))
        new_image.paste(image, (0, 0))

        image = data_transform(new_image)
        image = image.unsqueeze(0)

        image = image.to(self.device)

        outputs = self.model(image).cpu().numpy()[0]
        ans = []
        for output in outputs:
            ans.append(np.unravel_index(np.argmax(output), output.shape))

        score = []
        for index, a in enumerate(ans):
            score.append(outputs[index, a[0], a[1]].item())

        print(score)

        imagedraw = ImageDraw.Draw(new_image)
        offset = 2
        for index, a in enumerate(ans):
            if score[index] >= 0.005:
                imagedraw.ellipse((a[1] * 4 - offset, a[0] * 4 - offset, a[1] * 4 + offset, a[0] * 4 + offset),
                                  colors[index])
        for index, j in enumerate(joint_connected):
            if score[j[0]] >= 0.005 and score[j[1]] >= 0.005:
                imagedraw.line([(ans[j[0]][1] * 4, ans[j[0]][0] * 4), (ans[j[1]][1] * 4, ans[j[1]][0] * 4)],
                               fill=colors_line[index], width=1)

        new_image.show()

    @staticmethod
    def location_convert(locs, lt, ori_image_hw, new_image_hw):
        for loc in locs:
            loc[0], loc[1] = loc[0] * 4 * (ori_image_hw[0] / new_image_hw[0]) + lt[1], loc[1] * 4 * (
                    ori_image_hw[1] / new_image_hw[1]) + lt[0]
        return locs


if __name__ == "__main__":
    model = Inference()

    json_data = {
        "joints_vis": [
            1,
            1,
            1,
            1,
            1,
            1,
            1,
            1,
            1,
            1,
            1,
            1,
            1,
            1,
            1,
            1
        ],
        "joints": [
            [
                515.0,
                512.0
            ],
            [
                514.0,
                420.0
            ],
            [
                406.0,
                388.0
            ],
            [
                392.0,
                360.0
            ],
            [
                493.0,
                434.0
            ],
            [
                518.0,
                504.0
            ],
            [
                399.0,
                374.0
            ],
            [
                498.0,
                317.0
            ],
            [
                504.5953,
                315.1758
            ],
            [
                585.4047,
                292.8242
            ],
            [
                628.0,
                426.0
            ],
            [
                551.0,
                398.0
            ],
            [
                501.0,
                351.0
            ],
            [
                495.0,
                282.0
            ],
            [
                425.0,
                301.0
            ],
            [
                483.0,
                334.0
            ]
        ],
        "image": "086617615.jpg",
        "scale": 2.51531,
        "center": [
            472.0,
            377.0
        ]
    }
    model.inference_valid(json_data)

    json_data = {
        "image": "086617615.jpg",
        "scale": 2.51531,
        "center": [
            472.0,
            377.0
        ]
    }

    model.inference_test(json_data)
