# -*- coding: utf-8 -*-
# !/usr/bin/env python
"""
-------------------------------------------------
   File Name：     inference
   Description :   
   Author :       lth
   date：          2022/2/19
-------------------------------------------------
   Change Activity:
                   2022/2/19 6:14: create this script
-------------------------------------------------
"""
__author__ = 'lth'

import colorsys

import cv2
import numpy as np
import torch
from PIL import Image, ImageFont, ImageDraw
from torch.backends import cudnn

from config import GetConfig
from datalist import predict_transform
from model import CenterNet
from train import get_classes
from utils import TargetEncode, OutputDecode


class CenterNetInference(object):
    def __init__(self):
        self.args = GetConfig()
        print(f"-----------{self.args.project_name}-------------")
        use_cuda = self.args.use_cuda and torch.cuda.is_available()

        self.device = torch.device('cuda' if use_cuda else 'cpu')
        self.classes = get_classes(self.args.class_path)
        self.model = CenterNet(len(self.classes))
        if use_cuda:
            self.model = torch.nn.DataParallel(self.model, device_ids=range(torch.cuda.device_count())).to(self.device)
            cudnn.benchmark = True

        print("load the weight from pretrained-weight file")
        model_dict = self.model.state_dict()
        pretrained_dict = torch.load(self.args.pretrained_weight, map_location=self.device)['model_state_dict']
        pretrained_dict = {k: v for k, v in pretrained_dict.items() if np.shape(model_dict[k]) == np.shape(v)}
        model_dict.update(pretrained_dict)
        self.model.load_state_dict(model_dict)
        print("Finished to load the weight")

        self.target_encode = TargetEncode(image_size=self.args.image_size, class_num=len(self.classes))
        self.output_decode = OutputDecode(image_size=self.args.image_size, class_num=len(self.classes), conf_thres=0.3,
                                          nms_thres=0.01)

        self.image_size = self.args.image_size

        hsv_tuples = [(x / len(self.classes), 1., 1.)
                      for x in range(len(self.classes))]

        self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
        self.colors = list(
            map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
                self.colors))

        self.model.eval()

    @torch.no_grad()
    def predict(self, image_path):
        image = Image.open(image_path).convert("RGB")

        final_image = image

        font = ImageFont.truetype(font='model_data/simhei.ttf',
                                  size=np.floor(3e-2 * np.shape(image)[1] + 0.5).astype('int32'))
        height = image.height
        width = image.width

        ratio = height / width
        if ratio > 1:
            new_height = self.image_size[0]
            new_width = self.image_size[1] / ratio
        else:
            new_width = self.image_size[1]
            new_height = self.image_size[0] * ratio

        image = image.resize((int(new_width), int(new_height)))

        new_image = Image.new("RGB", (512, 512), (128, 128, 128))
        new_image.paste(image)

        data = predict_transform(new_image).unsqueeze(0)

        outputs = self.model(data)
        outputs_heatmap, outputs_offset, outputs_wh = outputs[0], outputs[1], outputs[2]
        output = torch.cat([outputs_heatmap, outputs_offset, outputs_wh], dim=1)
        result = self.output_decode(output)

        # t_map = torch.zeros(outputs_heatmap.shape[2], outputs_heatmap.shape[3], device=outputs_heatmap.device)
        # for t in outputs_heatmap[0]:
        #     t_map += torch.sigmoid(t)
        # t_map=(t_map-torch.min(t_map))/(torch.max(t_map)-torch.min(t_map))
        # t_map = t_map.cpu().numpy()
        # cv2.imshow("Dsa",t_map*255)
        # cv2.waitKey()
        # t_image = Image.fromarray(t_map * 255)
        # t_image = t_image.resize([width, height])

        # x1 y1 x2 y2 class_conf class_pred conf
        print("<------------------------------------->")

        if result[0] is None:
            print("没有检测到物体")
            return

        print("一共" + str(len(result[0])) + "个物体")

        if len(result[0]) > 0:
            draw = ImageDraw.Draw(final_image)
            for t in result:
                for r in t:
                    # region 坐标还原纠正
                    r[0] = r[0] * width / new_width
                    r[1] = r[1] * height / new_height
                    r[2] = r[2] * width / new_width
                    r[3] = r[3] * height / new_height

                    if r[0] < 0:
                        r[0] = 0
                    if r[1] < 0:
                        r[1] = 0
                    if r[2] > width:
                        r[2] = width
                    if r[3] > height:
                        r[3] = height
                    # endregion

                    offset = -0

                    draw.rectangle((r[0] + offset, r[1] + offset, r[2] - offset, r[3] - offset),
                                   outline=self.colors[self.classes.index(self.classes[int(r[-2].item())])], width=3)
                    label_content = '{} {:.4f}'.format(self.classes[int(r[-2].item())], r[4].item() * r[-1].item())
                    print(label_content, r[4].item(), r[-1].item(), r[-2].item())
                    # print(r[0].item() + offset, r[1].item() + offset, r[2].item() - offset, r[3].item() - offset)
                    label_size = draw.textsize(label_content, font)
                    if r[1] - label_size[1] >= 0:
                        text_origin = np.array([r[0] + offset, r[1] + offset - label_size[1]])
                    else:
                        text_origin = np.array([r[0] + offset, r[1] + offset + 1])
                    draw.rectangle(
                        [tuple(text_origin), tuple(text_origin + label_size)],
                        fill=self.colors[self.classes.index(self.classes[int(r[-2].item())])])
                    draw.text(text_origin, label_content, fill="black",
                              font=font)

        final_image = final_image.convert("RGBA")
        # t_image = t_image.convert("RGBA")
        # # t_image.show()
        #
        # f_image = Image.blend(final_image, t_image, 0.4)

        final_image.save("detect.png")


if __name__ == "__main__":
    model = CenterNetInference()
    model.predict(image_path="images/11.jpg")
