# -*- coding: utf-8 -*-
# !/usr/bin/env python
"""
-------------------------------------------------
   File Name：     inference
   Description :   
   Author :       lth
   date：          2022/6/24
-------------------------------------------------
   Change Activity:
                   2022/6/24 18:17: create this script
-------------------------------------------------
"""
__author__ = 'lth'

import copy
import time

import cv2
import numpy as np
import torch
from PIL import Image, ImageDraw, ImageFont
from torch.backends import cudnn

from .config import GetConfig
from .datalist import infer_transform
from .model import RetinaFace
from .utils import OutputDecode

radius = 3
colors = ["blue", "green", "red", "yellow", "black"]
colors_cv = [(0, 255, 255), (255, 0, 0), (255, 0, 255), (125, 0, 255), (255, 125, 0)]


class RetinaFaceNetInference:
    def __init__(self):
        self.args = GetConfig()
        print(f"-----------{self.args.project_name}-------------")

        # region 项目运行配置
        use_cuda = self.args.use_cuda and torch.cuda.is_available()
        self.device = torch.device('cuda' if use_cuda else 'cpu')
        # endregion

        self.model = RetinaFace()
        if use_cuda:
            self.model = torch.nn.DataParallel(self.model, device_ids=range(torch.cuda.device_count())).to(self.device)
            cudnn.benchmark = True

        print("load the weight from pretrained-weight file")
        model_dict = self.model.state_dict()
        pretrained_dict = torch.load(self.args.pretrained_weight, map_location=self.device)['model_state_dict']
        pretrained_dict = {k: v for k, v in pretrained_dict.items() if np.shape(model_dict[k]) == np.shape(v)}
        model_dict.update(pretrained_dict)
        self.model.load_state_dict(model_dict)
        self.model.to(self.device)
        print("Finished to load the weight")

        self.output_decode = OutputDecode()

    @torch.no_grad()
    def predict_image(self, image):
        height, width = image.height, image.width
        ratio = width / height
        if ratio > 1:
            nw = 840
            nh = int(840 / ratio)
        else:
            nh = 840
            nw = int(840 * ratio)
        new_img = image.resize((nw, nh), Image.BICUBIC)
        image = Image.new("RGB", (840, 840), (128, 128, 128))
        image.paste(new_img, [0, 0])

        image = infer_transform(image).unsqueeze(0).to(self.device)

        outputs = self.model(image)
        outputs = self.output_decode(image, outputs)

        res = []

        for output in outputs:
            if output is None:
                continue
            output[:, [0, 2, 7, 9, 11, 13, 15]] = output[:, [0, 2, 7, 9, 11, 13, 15]] * (width / nw)
            output[:, [1, 3, 8, 10, 12, 14, 16]] = output[:, [1, 3, 8, 10, 12, 14, 16]] * (height / nh)

            res.append(output.cpu().numpy())
        return res

    @torch.no_grad()
    def predict(self, image_path):
        # img = cv2.imread(image_path)
        # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        #
        # height, width = img.shape[0], img.shape[1]
        #
        # ratio = width / height
        # if ratio > 1:
        #     nw = 840
        #     nh = int(840 / ratio)
        # else:
        #     nh = 840
        #     nw = int(840 * ratio)
        #
        # img = cv2.resize(img, (nw, nh))
        # image = np.ones([840, 840, 3], dtype=np.uint8)*128
        # image[:nh, :nw] = img

        img = Image.open(image_path).convert("RGB")
        height, width = img.height, img.width
        ratio = width / height
        if ratio > 1:
            nw = 840
            nh = int(840 / ratio)
        else:
            nh = 840
            nw = int(840 * ratio)
        new_img = img.resize((nw, nh), Image.BICUBIC)
        image = Image.new("RGB", (840, 840), (128, 128, 128))
        image.paste(new_img, [0, 0])

        image = infer_transform(image).unsqueeze(0).to(self.device)

        outputs = self.model(image)
        outputs = self.output_decode(image, outputs)

        draw_image = ImageDraw.Draw(img)

        for output in outputs:
            output[:, [0, 2, 7, 9, 11, 13, 15]] = output[:, [0, 2, 7, 9, 11, 13, 15]] * (width / nw)
            output[:, [1, 3, 8, 10, 12, 14, 16]] = output[:, [1, 3, 8, 10, 12, 14, 16]] * (height / nh)
            for o in output:
                draw_image.rectangle([o[0], o[1], o[2], o[3]], outline=(0, 255, 255), width=3)
                for color_idx, w in enumerate(range(0, 10, 2)):
                    draw_image.ellipse((o[7 + w] - radius, o[7 + w + 1] - radius,
                                        o[7 + w] + radius, o[7 + w + 1] + radius), colors_cv[color_idx],
                                       outline=colors[color_idx],
                                       width=-1)
        img.show()
        img.save("result.jpg")

    @torch.no_grad()
    def predict_from_video(self, video_path, video_save_path):
        capture = cv2.VideoCapture(video_path)
        font = ImageFont.truetype(font='data/simhei.ttf',
                                  size=np.floor(3e-2 * 1024 + 0.5).astype('int32'))

        video_fps = 25.0
        if video_save_path != "":
            fourcc = cv2.VideoWriter_fourcc(*'mp4v')
            size = (int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)), int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))
            out = cv2.VideoWriter(video_save_path, fourcc, video_fps, size)

        fps = 0
        while True:
            t1 = time.time()
            ref, frame = capture.read()

            if frame is None:
                capture.release()
                out.release()
                break

            temp = copy.deepcopy(frame)
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            height, width = frame.shape[0], frame.shape[1]

            ratio = width / height
            if ratio > 1:
                nw = 840
                nh = int(840 / ratio)
            else:
                nh = 840
                nw = int(840 * ratio)

            frame = cv2.resize(frame, (nw, nh))
            image = np.ones([840, 840, 3], dtype=np.uint8) * 128
            image[:nh, :nw] = frame

            image = infer_transform(image).unsqueeze(0).to(self.device)

            outputs = self.model(image)
            outputs = self.output_decode(image, outputs)

            for output in outputs:
                if output is None:
                    continue
                print(output)
                output[:, [0, 2, 7, 9, 11, 13, 15]] = output[:, [0, 2, 7, 9, 11, 13, 15]] * (width / nw)
                output[:, [1, 3, 8, 10, 12, 14, 16]] = output[:, [1, 3, 8, 10, 12, 14, 16]] * (height / nh)
                for o in output:
                    temp = cv2.rectangle(temp, (int(o[0]), int(o[1])), (int(o[2]), int(o[3])), (0, 0, 255), 2)
                    for color_idx, w in enumerate(range(0, 10, 2)):
                        temp = cv2.circle(temp, (int(o[7 + w]), int(o[7 + w + 1])), 2, colors_cv[color_idx], -1)

            fps = (fps + (1. / (time.time() - t1))) / 2

            print("fps= %.2f" % (fps))
            temp = cv2.putText(temp, "fps= %.2f" % (fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)

            if video_save_path != "":
                out.write(temp)

            cv2.imshow("video", temp)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                capture.release()
                out.release()
                break
        capture.release()
        out.release()
        cv2.destroyAllWindows()


if __name__ == "__main__":
    model = RetinaFaceNetInference()
    # model.predict(r"E:\Datasets2\widerface\train\images\16--Award_Ceremony\16_Award_Ceremony_Awards_Ceremony_16_15.jpg")
    # print("------------------------------------------------------------------------------------------------")
    # model.predict(r"E:\Datasets2\widerface\train\images\0--Parade\0_Parade_Parade_0_904.jpg")
    # print("------------------------------------------------------------------------------------------------")
    # model.predict(r"G:\datasets\WIDER_FACE\WIDER_train\WIDER_train\images\0--Parade\0_Parade_Parade_0_730.jpg")
    # print("------------------------------------------------------------------------------------------------")
    # model.predict(r"G:\datasets\WIDER_FACE\WIDER_train\WIDER_train\images\22--Picnic\22_Picnic_Picnic_22_62.jpg")
    # print("------------------------------------------------------------------------------------------------")
    # model.predict(r"G:\datasets\WIDER_FACE\WIDER_train\WIDER_train\images\22--Picnic\22_Picnic_Picnic_22_65.jpg")
    # print("------------------------------------------------------------------------------------------------")
    # model.predict(r"E:\Datasets2\person_detect\person_from_武装分子\jpg\2a3c8bd4-1caf-11eb-9ea3-ccf9e4dccc96.jpg")
    # model.predict_from_video("12.mp4","c4.mp4")
    # model.predict("data/7.jpeg")
    model.predict_from_video("blow.mp4", "c5.mp4")
