# -*-coding: utf-8 -*-
import os
import onnxruntime
import numpy as np
import torch
import torchvision.transforms as transforms
from PIL import Image
import cv2
import argparse
import warnings
warnings.filterwarnings("ignore")


# -------------------- 工具函数 --------------------
def cut_resize_letterbox(image_bgr, det, target_size):
    """使用 OpenCV 图像进行裁剪和缩放"""
    h, w = image_bgr.shape[:2]
    x1, y1, x2, y2 = map(int, det[:4])
    w_det, h_det = x2 - x1, y2 - y1
    max_side = max(w_det, h_det)
    pad_x = (max_side - w_det) / 2
    pad_y = (max_side - h_det) / 2

    crop_x1 = max(int(x1 - pad_x), 0)
    crop_y1 = max(int(y1 - pad_y), 0)
    crop_x2 = min(int(x2 + pad_x), w)
    crop_y2 = min(int(y2 + pad_y), h)
    
    face_bgr = image_bgr[crop_y1:crop_y2, crop_x1:crop_x2]
    face_resized = cv2.resize(face_bgr, target_size)
    scale = max_side / target_size[0]
    return face_resized, scale, crop_x1, crop_y1


def to_numpy(tensor):
    return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()


def py_cpu_nms(dets, thresh):
    x1, y1, x2, y2, scores = dets[:, 0], dets[:, 1], dets[:, 2], dets[:, 3], dets[:, 4]
    areas = (y2 - y1 + 1) * (x2 - x1 + 1)
    keep = []
    idxs = scores.argsort()[::-1]
    while idxs.size > 0:
        i = idxs[0]
        keep.append(i)
        xx1 = np.maximum(x1[i], x1[idxs[1:]])
        yy1 = np.maximum(y1[i], y1[idxs[1:]])
        xx2 = np.minimum(x2[i], x2[idxs[1:]])
        yy2 = np.minimum(y2[i], y2[idxs[1:]])
        w = np.maximum(0, xx2 - xx1 + 1)
        h = np.maximum(0, yy2 - yy1 + 1)
        iou = (w * h) / (areas[i] + areas[idxs[1:]] - w * h)
        idxs = idxs[np.where(iou <= thresh)[0] + 1]
    return keep


def process_output(dets, conf_thresh, scale, pad_w, pad_h, iw, ih):
    out = []
    for det in dets:
        if det[4] < conf_thresh:
            continue
        cx, cy, w, h = det[:4]
        x1 = max(((cx - w / 2.) - pad_w) / scale, 0.)
        y1 = max(((cy - h / 2.) - pad_h) / scale, 0.)
        x2 = min(((cx + w / 2.) - pad_w) / scale, iw)
        y2 = min(((cy + h / 2.) - pad_h) / scale, ih)
        score = det[4] * det[15]
        out.append([x1, y1, x2, y2, score])
    return out


def pad_image(image_bgr, target_size):
    """使用 OpenCV 图像进行填充和缩放"""
    h, w = image_bgr.shape[:2]
    target_w, target_h = target_size
    scale = min(target_w / w, target_h / h)
    nw, nh = int(w * scale + 0.5), int(h * scale + 0.5)
    pad_w, pad_h = (target_w - nw) // 2, (target_h - nh) // 2
    
    resized = cv2.resize(image_bgr, (nw, nh))
    new_image = np.full((target_h, target_w, 3), (128, 128, 128), dtype=np.uint8)
    new_image[pad_h:pad_h+nh, pad_w:pad_w+nw] = resized
    
    return new_image, scale, pad_w, pad_h


def get_img_tensor_opencv(bgr_img, use_cuda, target_size, transform):
    """将 OpenCV BGR 图像转换为 PyTorch 张量"""
    rgb_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2RGB)
    pil_img = Image.fromarray(rgb_img)
    if pil_img.size != target_size:
        pil_img = pil_img.resize(target_size)
    tensor_img = transform(pil_img).unsqueeze(0)
    return tensor_img.cuda() if use_cuda else tensor_img


# -------------------- 姿态估计部分 --------------------
# 3D模板点定义
MODEL_3D_POINTS = np.array([
    [0.0, 0.0, 0.0],       # 鼻尖
    [0.0, 88.0, 12.0],     # 下巴（Y 向下为正，Z 向前为正）
    [-47.0, -40.0, 23.0],  # 左眼角（Y 向上为负）
    [47.0, -40.0, 23.0],   # 右眼角
    [-32.0, 39.0, 18.0],   # 左嘴角（Y 向下为正）
    [32.0, 39.0, 18.0]     # 右嘴角
], dtype=np.float64)

def get_camera_matrix(w, h, face_w=None):
    if face_w is not None:
        fx = fy = face_w * 5
    else:
        fx = fy = w / 1.5
    cx, cy = w / 2.0, h / 2.0
    return np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]], dtype=np.float64)


def calc_pose(landmarks, w, h, face_w):
    """修正pitch和roll对换的问题"""
    image_points = np.array([
        landmarks[54],  # 鼻尖
        landmarks[16],  # 下巴
        landmarks[60],  # 左眼角
        landmarks[72],  # 右眼角
        landmarks[76],  # 左嘴角
        landmarks[82]   # 右嘴角
    ], dtype=np.float64)

    camera_matrix = get_camera_matrix(w, h, face_w=face_w)
    dist_coeffs = np.zeros((4, 1))

    success, rvec, tvec = cv2.solvePnP(
        MODEL_3D_POINTS, image_points, camera_matrix, dist_coeffs,
        flags=cv2.SOLVEPNP_EPNP
    )
    if not success:
        return None, None, None, None, None, None

    R, _ = cv2.Rodrigues(rvec)
    r11, r12, r13 = R[0, 0], R[0, 1], R[0, 2]
    r21, r22, r23 = R[1, 0], R[1, 1], R[1, 2]
    r31, r32, r33 = R[2, 0], R[2, 1], R[2, 2]

    # Yaw（偏航角，左右转头）
    yaw = np.arctan2(r21, r11)
    yaw_deg = np.degrees(yaw)

    # Pitch（俯仰角，上下抬头）
    pitch = np.arctan2(r32, r33)
    pitch_deg = np.degrees(pitch)

    # Roll（翻滚角，左右歪头）
    roll = np.arctan2(-r31, np.sqrt(r32**2 + r33**2))
    roll_deg = np.degrees(roll)

    return -pitch_deg, roll_deg, yaw_deg, rvec, tvec, camera_matrix


def draw_axis(img, rvec, tvec, cam_matrix, face_center=None, dist_coeffs=None, length=80):
    if dist_coeffs is None:
        dist_coeffs = np.zeros((4, 1))
    img = np.ascontiguousarray(img)

    axis_points_3d = np.float32([
        [0, 0, 0],
        [-length, 0, 0],     # X: 红 → 右
        [0, length, 0],      # Y: 绿 → 上
        [0, 0, -length]      # Z: 蓝 → 前（朝向摄像头）
    ])

    projected_points_2d, _ = cv2.projectPoints(axis_points_3d, rvec, tvec, cam_matrix, dist_coeffs)
    projected_points_2d = projected_points_2d.astype(int).squeeze()

    # 确定原点位置
    if face_center is None:
        origin = tuple(projected_points_2d[0].ravel())
    else:
        origin = tuple(map(int, face_center))

    # 边界安全函数
    def clip_point(pt, w, h):
        return (max(0, min(w - 1, pt[0])), max(0, min(h - 1, pt[1])))

    h, w = img.shape[:2]
    origin = clip_point(origin, w, h)

    # 计算偏移量
    projected_origin = tuple(projected_points_2d[0].ravel())
    offset_x = origin[0] - projected_origin[0]
    offset_y = origin[1] - projected_origin[1]

    # 调整所有投影点位置
    adjusted_points = projected_points_2d + np.array([[offset_x, offset_y]])

    # 绘制轴线
    colors = [(0, 0, 255), (0, 255, 0), (255, 0, 0)]  # X=红, Y=绿, Z=蓝
    for i in range(1, 4):
        p2 = tuple(map(int, adjusted_points[i].ravel()))
        p2 = clip_point(p2, w, h)
        cv2.line(img, origin, p2, colors[i-1], 2, cv2.LINE_AA)

    return img


def draw_landmarks(img, landmarks, color=(0, 255, 0), radius=2):
    """绘制面部关键点"""
    img = np.ascontiguousarray(img)
    h, w = img.shape[:2]

    for i,(x, y) in enumerate(landmarks):
        # 绘制关键姿态点（不同颜色区分）
        if i == 54:
            cv2.circle(img, (int(x), int(y)), 3, (0, 0, 255), -1)  # 鼻尖（红）
        elif i == 16:
            cv2.circle(img, (int(x), int(y)), 3, (255, 0, 0), -1)  # 下巴（蓝）
        elif i == 72:
            cv2.circle(img, (int(x), int(y)), 3, (255, 255, 0), -1)# 左眼角（黄）
        elif i == 60:
            cv2.circle(img, (int(x), int(y)), 3, (0, 255, 255), -1)# 右眼角（青）
        elif i == 76:
            cv2.circle(img, (int(x), int(y)), 3, (255, 0, 255), -1)# 左嘴角（紫）
        elif i == 82:
            cv2.circle(img, (int(x), int(y)), 3, (0, 128, 255), -1)# 右嘴角（橙）
    return img


# -------------------- 视频推理主函数 --------------------
def inference_video(args):
    # 初始化ONNX模型
    facedetect_sess = onnxruntime.InferenceSession(args.facedetect_onnx_model)
    pfld_sess = onnxruntime.InferenceSession(args.pfld_onnx_model)

    # 图像变换
    detect_transform = transforms.Compose([transforms.ToTensor()])
    pfld_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize([0.5]*3, [0.5]*3)
    ])

    # 打开视频文件或摄像头
    if args.video_path == "0":
        # 使用摄像头（0为默认摄像头）
        cap = cv2.VideoCapture(0)
        print("[INFO] 正在使用摄像头进行推理...")
    else:
        # 读取视频文件
        if not os.path.exists(args.video_path):
            print(f"[ERROR] 视频文件不存在: {args.video_path}")
            return
        cap = cv2.VideoCapture(args.video_path)
        print(f"[INFO] 正在读取视频文件: {args.video_path}")

    # 获取视频属性
    fps = int(cap.get(cv2.CAP_PROP_FPS))
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

    # 初始化视频写入器（保存输出视频）
    os.makedirs(os.path.dirname(args.save_video_path), exist_ok=True)
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')  # MP4格式
    out = cv2.VideoWriter(args.save_video_path, fourcc, fps, (width, height))

    frame_count = 0
    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break  # 视频读取完毕

        frame_count += 1
        img_cv = frame
        ih, iw = img_cv.shape[:2]

        # 人脸检测
        pad_img, scale, pad_w, pad_h = pad_image(img_cv, args.facedetect_input_size)
        tensor_img = get_img_tensor_opencv(pad_img, args.use_cuda, args.facedetect_input_size, detect_transform)
        preds = facedetect_sess.run(None, {facedetect_sess.get_inputs()[0].name: to_numpy(tensor_img)})[0][0]
        dets = np.array(process_output(preds, 0.5, scale, pad_w, pad_h, iw, ih))
        
        if len(dets) > 0:
            dets = dets[py_cpu_nms(dets, 0.5)]

            for det in dets:
                # 提取人脸并预测特征点
                face_img, scale_l, x_off, y_off = cut_resize_letterbox(img_cv, det, args.pfld_input_size)
                pfld_tensor = get_img_tensor_opencv(face_img, args.use_cuda, args.pfld_input_size, pfld_transform)
                preds = pfld_sess.run(None, {pfld_sess.get_inputs()[0].name: to_numpy(pfld_tensor)})[0][0]

                # 解析98个特征点坐标
                pts = np.zeros((98, 2), dtype=np.float64)
                for i in range(98):
                    pts[i, 0] = preds[i * 2] * args.pfld_input_size[0] * scale_l + x_off
                    pts[i, 1] = preds[i * 2 + 1] * args.pfld_input_size[1] * scale_l + y_off

                # 绘制特征点
                img_cv = draw_landmarks(img_cv, pts)

                # 姿态估计与绘制
                pitch, yaw, roll, rvec, tvec, cam = calc_pose(pts, iw, ih, face_w=None)
                if pitch is not None:
                    face_center = ((det[0] + det[2]) / 2, (det[1] + det[3]) / 2)
                    img_cv = draw_axis(img_cv, rvec, tvec, cam, face_center=face_center)
                    # 绘制姿态角文本
                    cv2.putText(
                        img_cv, 
                        f"P:{pitch:.1f} Y:{yaw:.1f} R:{roll:.1f}",
                        (int(det[0]), int(det[1]) - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0), 2
                    )

        # 写入输出视频
        out.write(img_cv)

        # 显示实时帧（按'q'退出）
        cv2.imshow('Head Pose Estimation (Video)', img_cv)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            print("[INFO] 用户手动退出推理")
            break

        # 打印进度
        if frame_count % 10 == 0:
            print(f"[PROGRESS] 已处理 {frame_count}/{total_frames} 帧")

    # 释放资源
    cap.release()
    out.release()
    cv2.destroyAllWindows()
    print(f"[OK] 视频推理完成！输出文件已保存到: {args.save_video_path}")


# -------------------- 参数配置 --------------------
def parse_args():
    parser = argparse.ArgumentParser(description='PFLD Head Pose Inference (Video Version)')
    parser.add_argument('--use_cuda', default=False, type=bool)
    parser.add_argument('--pfld_onnx_model', default="./onnx_models/PFLD_GhostOne_112_1_opt.onnx")
    parser.add_argument('--pfld_input_size', default=(112, 112))
    parser.add_argument('--facedetect_onnx_model', default="./onnx_models/origin/yolov5face_n_640.onnx")
    parser.add_argument('--facedetect_input_size', default=(640, 640))
    parser.add_argument('--video_path', default=1, help="视频文件路径或摄像头编号（0为默认摄像头）")
    parser.add_argument('--save_video_path', default="./test_output/pose_video.mp4", help="输出视频保存路径")
    return parser.parse_args()


if __name__ == "__main__":
    args = parse_args()
    inference_video(args)