#!/usr/bin/env python
# coding: utf-8

# %%
import cv2
import time
import numpy as np
import mediapipe as mp
from tqdm import tqdm
from matplotlib import pyplot as plt
from pathlib import Path

BaseOptions = mp.tasks.BaseOptions
FaceLandmarker = mp.tasks.vision.FaceLandmarker
FaceLandmarkerOptions = mp.tasks.vision.FaceLandmarkerOptions
VisionRunningMode = mp.tasks.vision.RunningMode
filepath = Path(__file__).resolve().parent
model_path = filepath / "jobs/face_landmarker.task"


input_video_path = "data/anchorwoman.mp4"
filename = Path(input_video_path).stem
directory = Path(input_video_path).parent
output_video_path = input_video_path.replace(filename, f"{filename}_cropped")
target_size = (512, 512)
bias = ()
# bias = (250, 750, 250, 750)  # (tl_y, br_y, tl_x, br_x)
zoom_coef = 1.6


# %%
"""
裁剪并缩放视频中的人脸区域，显示进度条。

Args:
    input_video_path: 输入视频的路径。
    output_video_path: 输出视频的路径。
    target_size: 目标尺寸 (宽度, 高度)。
"""

mp_face_detection = mp.solutions.face_detection

cap = cv2.VideoCapture(input_video_path)
if not cap.isOpened():
    raise BaseException("无法打开视频文件")

# 获取视频总帧数
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
fps = cap.get(cv2.CAP_PROP_FPS)
print(f"视频总帧数: {total_frames}")  # 打印总帧数

face_boxes = []

with mp_face_detection.FaceDetection(min_detection_confidence=0.1) as face_detection:
    # 使用 tqdm 创建进度条
    for _ in tqdm(range(total_frames), desc="检测人脸"):
        success, image = cap.read()
        if not success:
            break

        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        if bias:  # 检查 bias 是否为空
            cropped_image = image[bias[0]:bias[1], bias[2]:bias[3]]  # 使用偏移量裁剪图像
        else:
            cropped_image = image # 如果bias为空，则不裁剪
        results = face_detection.process(cropped_image)

        if results.detections:
            for detection in results.detections:
                bbox = detection.location_data.relative_bounding_box
                h, w, c = cropped_image.shape
                xmin, ymin, width, height = int(bbox.xmin * w), int(bbox.ymin * h), int(bbox.width * w), int(bbox.height * h)
                if bias: # 检查 bias 是否为空
                    # 计算全局人脸框，添加偏移量
                    global_xmin = xmin + bias[2]
                    global_ymin = ymin + bias[0]
                    global_xmax = global_xmin + width
                    global_ymax = global_ymin + height
                else:
                     global_xmin = xmin
                     global_ymin = ymin
                     global_xmax = xmin + width
                     global_ymax = ymin + height
                face_boxes.append((global_xmin, global_ymin, global_xmax, global_ymax))

cap.release()

xmin, ymin, xmax, ymax = face_boxes[-1]
print(xmin, ymin, xmax, ymax)
plt.imshow(image)
plt.gca().add_patch(plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, fill=False, edgecolor='red', linewidth=2))
plt.axis('off')
plt.show() 



# %%


face_boxes = np.array(face_boxes)
min_x, min_y, _, _ = face_boxes.min(axis=0).astype(int)
_, _, max_x, max_y = face_boxes.max(axis=0).astype(int)
# 计算中心点
center_x = (min_x + max_x) // 2
center_y = (min_y + max_y) // 2
print(min_x, min_y, max_x, max_y)
print(center_x, center_y)


# %%


if len(face_boxes)==0:
    raise BaseException("未检测到人脸")

# 计算原始框的宽度和高度
original_width = max_x - min_x
original_height = max_y - min_y

# 根据 zoom_coef 调整宽度和高度
scaled_width = int(original_width * zoom_coef)
scaled_height = int(original_height * zoom_coef)
scaled_size = max(scaled_width, scaled_height)

# 重新计算边界框的坐标，确保中心点不变
min_x = center_x - scaled_size // 2
min_y = center_y - scaled_size // 2
max_x = center_x + scaled_size // 2
max_y = center_y + scaled_size // 2
# 重新打开视频并进行裁剪和缩放
cap = cv2.VideoCapture(input_video_path)
fourcc = cv2.VideoWriter_fourcc(*'mp4v')  # 或 'XVID'
out = cv2.VideoWriter(output_video_path, fourcc, fps, target_size)

# 获取视频的宽度和高度，用于边界检查
video_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
video_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# 使用 tqdm 创建进度条
for _ in tqdm(range(total_frames), desc="裁剪和缩放"):
    success, image = cap.read()
    if not success:
        break
    try:  # 增加异常处理
        # 确保裁剪坐标在有效范围内
        cropped_min_x = max(0, min_x)
        cropped_min_y = max(0, min_y)
        cropped_max_x = min(video_width, max_x)
        cropped_max_y = min(video_height, max_y)
        # 只有当裁剪区域有效时才进行裁剪
        if cropped_min_x < cropped_max_x and cropped_min_y < cropped_max_y:
            cropped_image = image[cropped_min_y:cropped_max_y, cropped_min_x:cropped_max_x]
            resized_image = cv2.resize(cropped_image, target_size)
            out.write(resized_image)
        else:
            # 如果裁剪区域无效，则输出黑色图像或跳过
            black_image = np.zeros((target_size[1], target_size[0], 3), dtype=np.uint8)
            out.write(black_image)
            print(f"裁剪区域无效，已跳过当前帧。min_x: {min_x}, min_y: {min_y}, max_x: {max_x}, max_y: {max_y}")

    except Exception as e:
        print(f"处理帧时出错: {e}")
        print(f"min_x: {min_x}, min_y: {min_y}, max_x: {max_x}, max_y: {max_y}")
        break  # 发生错误时，退出循环

cap.release()
out.release()
print(f"视频已裁剪并缩放到 {target_size}")
print(f"对应原视频裁剪框为 {[cropped_min_x, cropped_min_y, cropped_max_x, cropped_max_y]}")
print(f'ffmpeg回贴参数: -filter_complex "[1]scale={scaled_size}:{scaled_size}[vid2];[0][vid2]overlay=x={min_x}:y={min_y}"')
with open(directory / f"{filename}.sh", "w") as f:
  f.write(f'ffmpeg -i {input_video_path} -i {output_video_path} -filter_complex "[1]scale={scaled_size}:{scaled_size}[vid2];[0][vid2]overlay=x={min_x}:y={min_y}" -c:a copy {directory}/{filename}_pasteback.mp4')