#!/usr/bin/env python3
import cv2
import numpy as np
import tqdm
from face_det import SCRFD
import insightface
import hyperlpr3 as lpr3

import click

assert insightface.__version__ >= '0.4'

custom_color = (142, 173, 50)


class AlgoMixPipeline(object):

    def __init__(self, scrfd_path: str = None):
        self.fdet = SCRFD(model_file=scrfd_path, )
        self.bdet = insightface.model_zoo.get_model('scrfd_person_2.5g.onnx', download=True)
        self.bdet.prepare(0, nms_thresh=0.65, input_size=(640, 640))
        self.lpr = lpr3.LicensePlateCatcher(detect_level=lpr3.DETECT_LEVEL_HIGH)

    def _face_detect(self, image: np.ndarray):
        return self.fdet.detect(image, input_size=(320, 320), confidence_threshold=0.3)

    def _body_detect(self, image: np.ndarray):
        bboxes, kpss = self.bdet.detect(image)
        return self.detect_person(image, bboxes, kpss)

    def _lpr_rec(self, image: np.ndarray):
        return self.lpr(image)

    @staticmethod
    def detect_person(img, bboxes, kpss):
        bboxes = np.round(bboxes[:, :4]).astype(np.int)
        kpss = np.round(kpss).astype(np.int)
        kpss[:, :, 0] = np.clip(kpss[:, :, 0], 0, img.shape[1])
        kpss[:, :, 1] = np.clip(kpss[:, :, 1], 0, img.shape[0])
        vbboxes = bboxes.copy()
        vbboxes[:, 0] = kpss[:, 0, 0]
        vbboxes[:, 1] = kpss[:, 0, 1]
        vbboxes[:, 2] = kpss[:, 4, 0]
        vbboxes[:, 3] = kpss[:, 4, 1]
        return bboxes, vbboxes

    @staticmethod
    def draw_person(image, bboxes, up_halt=True):
        for i in range(bboxes.shape[0]):
            bbox = bboxes[i]
            x1, y1, x2, y2 = bbox

            # Modify y2 if up_halt is True
            if up_halt:
                y2 = y1 + abs(y2 - y1) // 2

            cv2.rectangle(image, (x1, y1), (x2, y2), custom_color, 2)
            color = (255, 0, 0)
            cv2.circle(image, (x1, y1), 3, custom_color, 2)
            cv2.circle(image, (x1, y2), 3, custom_color, 2)
            cv2.circle(image, (x2, y1), 3, custom_color, 2)
            cv2.circle(image, (x2, y2), 3, custom_color, 2)

    @staticmethod
    def draw_face(image: np.ndarray, bboxes, scale=2.0):
        for idx, box in enumerate(bboxes):
            x1, y1, x2, y2, score = box.astype(np.int)

            # Calculate center of the box
            center_x = (x1 + x2) // 2
            center_y = (y1 + y2) // 2

            # Determine the size of the box
            width = x2 - x1
            height = y2 - y1

            # Determine the longer side
            max_side = max(width, height)

            # Calculate new half-width and half-height
            half_size = int(max_side * scale / 2)

            # Calculate new corners of the box to form a square
            scaled_x1 = center_x - half_size
            scaled_y1 = center_y - half_size
            scaled_x2 = center_x + half_size
            scaled_y2 = center_y + half_size

            cv2.rectangle(image, (scaled_x1, scaled_y1), (scaled_x2, scaled_y2), custom_color, 2)

    @staticmethod
    def draw_plate(image, results, scale=1.2):
        for code, confidence, type_idx, box in results[:1]:
            x1, y1, x2, y2 = box

            # 计算框的中心点
            center_x = (x1 + x2) // 2
            center_y = (y1 + y2) // 2

            # 根据 scale 调整框的大小
            new_width = int((x2 - x1) * scale / 2)
            new_height = int((y2 - y1) * scale / 2)

            # 计算新的角点
            scaled_x1 = center_x - new_width
            scaled_y1 = center_y - new_height
            scaled_x2 = center_x + new_width
            scaled_y2 = center_y + new_height

            cv2.rectangle(image, (scaled_x1, scaled_y1), (scaled_x2, scaled_y2), custom_color, 4, cv2.LINE_AA)

    def face_inside_body(self, face_box, body_boxes):
        fx1, fy1, fx2, fy2 = face_box[:4]
        for bbox in body_boxes:
            bx1, by1, bx2, by2 = bbox[:4]
            if fx1 >= bx1 and fy1 >= by1 and fx2 <= bx2 and fy2 <= by2:
                return True
        return False

    def run_and_draw_total(self, image, mode=3, half=False):
        if mode == 1:
            # 仅绘制车牌
            lpr_result = self._lpr_rec(image)
            self.draw_plate(image, lpr_result)
        elif mode == 2:
            # 仅绘制人体框
            body_bboxes, _ = self._body_detect(image)
            self.draw_person(image, body_bboxes, up_halt=half)
        else:
            # 优先绘制人脸，如果人脸不在人体框内则绘制人体框
            body_bboxes, _ = self._body_detect(image)
            face_bboxes, _ = self._face_detect(image)

            for body_box in body_bboxes:
                if not any(self.face_inside_body(face_box, [body_box]) for face_box in face_bboxes):
                    self.draw_person(image, np.array([body_box]), up_halt=half)

            # 绘制所有检测到的面部框
            self.draw_face(image, face_bboxes)

            if mode == 1:
                # 再次检查是否需要绘制车牌
                lpr_result = self._lpr_rec(image)
                self.draw_plate(image, lpr_result)

        return image


# def run_img():
#     image = cv2.imread("images/demo.png")
#
#     image = axp.run_and_draw_total(image)
#
#     cv2.imshow("w", image)
#     cv2.waitKey(0)


def run_video():
    # Create an instance of your AlgoMixPipeline with the specified model
    axp = AlgoMixPipeline("models/s25g320.onnx")

    # Open the video file
    cap = cv2.VideoCapture(
        "/Users/tunm/Downloads/进场+工位视频素材/圈出【员工脸部】+【手持手机】玩游戏.mov")  # Replace with your video file path

    while True:
        # Read a new frame from the video
        ret, frame = cap.read()

        # Check if frame is read correctly
        if not ret:
            break

        # Process the frame
        frame = axp.run_and_draw_total(frame, mode=1, half=True)

        # Display the frame
        cv2.imshow("w", frame)

        # Break the loop if 'q' is pressed
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # Release the video capture object and close OpenCV windows
    cap.release()
    cv2.destroyAllWindows()


def process_video(video_path, mode, half):
    # 创建视频捕获对象
    cap = cv2.VideoCapture(video_path)

    if not cap.isOpened():
        print("Error: Unable to open video file")
        return

    # 获取视频的基本信息
    fps = int(cap.get(cv2.CAP_PROP_FPS))
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

    # 定义视频写入对象，确保输出为MP4格式
    output_filename = video_path.rsplit('.', 1)[0] + '_pro.avi'
    out = cv2.VideoWriter(output_filename, cv2.VideoWriter_fourcc(*'XVID'), fps, (width, height))

    # 初始化AlgoMixPipeline
    axp = AlgoMixPipeline("models/s25g320.onnx")

    # 使用tqdm显示处理进度
    for _ in tqdm.tqdm(range(total_frames), desc="Processing Video"):
        ret, frame = cap.read()
        if not ret:
            break

        # 使用AlgoMixPipeline处理每一帧
        processed_frame = axp.run_and_draw_total(frame, mode=mode, half=half)

        # 写入处理后的帧
        out.write(processed_frame)

    # 释放资源
    cap.release()
    out.release()


@click.command()
@click.argument('video_path', type=click.Path(exists=True))
@click.option('--mode', default=3, help='Mode of operation. 1: Only plates, 2: Only bodies, 3: Faces or bodies')
@click.option('--half', is_flag=True, help='Draw only the upper half of the body if set')
def cli(video_path, mode, half):
    """
    Process a video file and output a new video with detections.

    VIDEO_PATH: Path to the video file to process.
    """
    process_video(video_path, mode, half)
    click.echo(f"Processed video saved as: {video_path.rsplit('.', 1)[0]}_pro.mp4")


if __name__ == '__main__':
    cli()
