# -*- coding: utf-8 -*-
# !/usr/bin/env python
"""
-------------------------------------------------
   File Name：     RSTP
   Description :   
   Author :       lth
   date：          2022/7/25
-------------------------------------------------
   Change Activity:
                   2022/7/25 4:51: create this script
-------------------------------------------------
"""
__author__ = 'lth'



import numpy as np
import torch
from PIL import Image, ImageDraw,ImageFont
from torch.backends import cudnn
import threading
from config import GetConfig
from datalist import infer_transform
from model import RetinaFace
from utils import OutputDecode
import cv2
import numpy as np
import time
import copy
radius = 3
colors = ["blue", "green", "red", "yellow", "black"]
colors_cv=[(0,255,255),(255,0,0),(255,0,255),(125,0,255),(255,125,0)]

class RetinaFaceNetInference:
    def __init__(self):
        self.args = GetConfig()
        print(f"-----------{self.args.project_name}-------------")

        # region 项目运行配置
        use_cuda = self.args.use_cuda and torch.cuda.is_available()
        self.device = torch.device('cuda' if use_cuda else 'cpu')
        # endregion

        self.model = RetinaFace()
        if use_cuda:
            self.model = torch.nn.DataParallel(self.model, device_ids=range(torch.cuda.device_count())).to(self.device)
            cudnn.benchmark = True

        print("load the weight from pretrained-weight file")
        model_dict = self.model.state_dict()
        pretrained_dict = torch.load(self.args.pretrained_weight, map_location=self.device)['model_state_dict']
        pretrained_dict = {k: v for k, v in pretrained_dict.items() if np.shape(model_dict[k]) == np.shape(v)}
        model_dict.update(pretrained_dict)
        self.model.load_state_dict(model_dict)
        self.model.to(self.device)
        print("Finished to load the weight")

        self.output_decode = OutputDecode()
        self.Frame=None

    def Receive(self):
        print("Start Receive")
        cap = cv2.VideoCapture("rtsp://admin:1qaz@wsx@192.168.31.120:554/h264/ch33/main/av_stream")
        ret, frame = cap.read()
        while ret:
            ret, frame = cap.read()
            self.Frame = frame

    def Display(self):
        print("Start Displaying")
        while True:
            if self.Frame is not None:
                frame = self.Frame

                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                temp = frame
                height, width = frame.shape[0], frame.shape[1]

                ratio = width / height
                if ratio > 1:
                    nw = 840
                    nh = int(840 / ratio)
                else:
                    nh = 840
                    nw = int(840 * ratio)

                frame = cv2.resize(frame, (nw, nh))

                image = np.ones([840, 840, 3], dtype=np.uint8) * 128
                image[:nh, :nw] = frame

                image = infer_transform(image).unsqueeze(0).to(self.device)

                outputs = self.model(image)
                outputs = self.output_decode(image, outputs)

                for output in outputs:
                    if output is None:
                        continue
                    print(output)
                    output[:, [0, 2, 7, 9, 11, 13, 15]] = output[:, [0, 2, 7, 9, 11, 13, 15]] * (width / nw)
                    output[:, [1, 3, 8, 10, 12, 14, 16]] = output[:, [1, 3, 8, 10, 12, 14, 16]] * (height / nh)
                    for o in output:
                        temp = cv2.rectangle(temp, (int(o[0]), int(o[1])), (int(o[2]), int(o[3])), (0, 0, 255), 2)
                        for color_idx, w in enumerate(range(0, 10, 2)):
                            temp = cv2.circle(temp, (int(o[7 + w]), int(o[7 + w + 1])), 2, colors_cv[color_idx], -1)

                frame = cv2.cvtColor(temp, cv2.COLOR_BGR2RGB)
                frame = cv2.resize(frame, (3840, 2160))
                cv2.imshow("frame1", frame)
                cv2.waitKey(1) & 0xff

    def work(self):
        p1 = threading.Thread(target=self.Receive)
        p2 = threading.Thread(target=self.Display)
        p1.start()
        p2.start()

if __name__=="__main__":
    model=RetinaFaceNetInference()
    model.work()