# -*- coding: utf-8 -*-
# !/usr/bin/env python
"""
-------------------------------------------------
   File Name：     demo
   Description :   
   Author :       lth
   date：          2022/8/5
-------------------------------------------------
   Change Activity:
                   2022/8/5 11:19: create this script
-------------------------------------------------
"""
__author__ = 'lth'

import time

import cv2
import numpy as np
import torch
from PIL import Image

from FaceParsing.test import Inference as FaceParsingInference
from RetinaFaceNet.inference import RetinaFaceNetInference


class MIXDemo:
    def __init__(self):
        self.face_parsing_model = FaceParsingInference()
        self.retina_face_net_model = RetinaFaceNetInference()

    @torch.no_grad()
    def predict_from_video(self, video_path, video_save_path):
        capture = cv2.VideoCapture(video_path, cv2.CAP_DSHOW)

        video_fps = 25.0
        if video_save_path != "":
            fourcc = cv2.VideoWriter_fourcc(*'mp4v')
            size = (int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)), int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))
            out = cv2.VideoWriter(video_save_path, fourcc, video_fps, size)

        fps = 0
        while True:
            t1 = time.time()
            ref, frame = capture.read()

            if frame is None:
                capture.release()
                out.release()
                break

            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            frame = Image.fromarray(frame).convert("RGBA")

            outputs = self.retina_face_net_model.predict_image(frame)
            for output in outputs:
                if output is None:
                    continue
                for o in output:
                    offset = 40
                    o = [int(o[0]) - offset, int(o[1]) - offset, int(o[2]) + offset, int(o[3]) + offset]
                    head_img = frame.crop(o[:4])
                    new_head_img = self.face_parsing_model.predict_image(head_img).convert("RGBA")
                    image_blend = Image.blend(head_img, new_head_img, 0.4)
                    frame.paste(image_blend, o[:4], mask=new_head_img.split()[-1])

            fps = (fps + (1. / (time.time() - t1))) / 2
            frame = np.array(frame)
            frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
            frame = cv2.putText(frame, "fps= %.2f" % (fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)

            if video_save_path != "":
                out.write(frame)

            cv2.imshow("video", frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                capture.release()
                out.release()
                break
        capture.release()
        out.release()
        cv2.destroyAllWindows()

    @torch.no_grad()
    def predict_online(self, video_path):
        capture = cv2.VideoCapture(video_path)
        flag = capture.isOpened()
        print("the camera is " + str(flag))
        fps = 0
        while True:
            t1 = time.time()
            ref, frame = capture.read()

            if frame is None:
                continue
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            frame = Image.fromarray(frame).convert("RGBA")

            outputs = self.retina_face_net_model.predict_image(frame)
            for output in outputs:
                if output is None:
                    continue
                for o in output:
                    offset = 40
                    o = [int(o[0]) - offset, int(o[1]) - offset, int(o[2]) + offset, int(o[3]) + offset]
                    head_img = frame.crop(o[:4])
                    new_head_img = self.face_parsing_model.predict_image(head_img).convert("RGBA")
                    image_blend = Image.blend(head_img, new_head_img, 0.4)
                    frame.paste(image_blend, o[:4], mask=new_head_img.split()[-1])

            fps = (fps + (1. / (time.time() - t1))) / 2
            frame = np.array(frame)
            frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
            frame = cv2.putText(frame, "fps= %.2f" % (fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)

            cv2.imshow("video", frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                capture.release()

                break
        capture.release()

        cv2.destroyAllWindows()


if __name__ == "__main__":
    demo = MIXDemo()
    demo.predict_online(0)
