#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
YuNet 版深度+人脸误差发布节点（多人脸：只取最近；忽略深度>阈值）
- 相机：Intel RealSense D405（640x480@30）
- 检测：OpenCV FaceDetectorYN (YuNet)
- 深度：优先在“鼻尖关键点”处取小窗口中位数（无关键点则用人脸中心）
- 策略：同屏多人脸 -> 计算每张脸的深度 -> 过滤掉 z>max_depth_m -> 选 z 最小者
- 发布：/result3_topic -> geometry_msgs/Point(x=-ex(px), y=ey(px), z=depth(m))
按 q 退出。

依赖：
  pip install opencv-python numpy pyrealsense2
  OpenCV >= 4.6（包含 FaceDetectorYN）
"""
import os
import sys
import time
import argparse
import urllib.request
from typing import Optional, Tuple, List

import numpy as np
import cv2

import rclpy
from rclpy.node import Node
from geometry_msgs.msg import Point

import pyrealsense2 as rs


# -------------------- YuNet 加载与工具 --------------------
def _download_yunet_if_needed(model_path: str) -> str:
    if os.path.isfile(model_path):
        return model_path
    os.makedirs(os.path.dirname(model_path) or ".", exist_ok=True)
    urls = [
        "https://github.com/opencv/opencv_zoo/raw/main/models/face_detection_yunet/face_detection_yunet_2023mar.onnx",
        "https://github.com/opencv/opencv_zoo/raw/main/models/face_detection_yunet/face_detection_yunet_2022dec.onnx",
    ]
    for url in urls:
        try:
            print(f"[YuNet] downloading: {url}")
            urllib.request.urlretrieve(url, model_path)
            print(f"[YuNet] saved to: {model_path}")
            return model_path
        except Exception as e:
            print(f"[YuNet] download failed: {e}")
    print("[YuNet] auto-download failed, please specify --model manually.")
    return model_path


def create_yunet(model_path: str,
                 input_size: Tuple[int, int],
                 score_th: float,
                 nms_th: float,
                 top_k: int,
                 backend_id: int = cv2.dnn.DNN_BACKEND_OPENCV,
                 target_id: int = cv2.dnn.DNN_TARGET_CPU):
    W, H = input_size
    if hasattr(cv2, "FaceDetectorYN") and hasattr(cv2.FaceDetectorYN, "create"):
        return cv2.FaceDetectorYN.create(model=model_path,
                                         config="",
                                         input_size=(W, H),
                                         score_threshold=score_th,
                                         nms_threshold=nms_th,
                                         top_k=top_k,
                                         backend_id=backend_id,
                                         target_id=target_id)
    if hasattr(cv2, "FaceDetectorYN_create"):
        return cv2.FaceDetectorYN_create(model_path, "", (W, H),
                                         score_th, nms_th, top_k,
                                         backend_id, target_id)
    raise RuntimeError("OpenCV FaceDetectorYN not available. Please upgrade to 4.6+.")


def parse_yunet_outputs(dets: np.ndarray):
    """ dets: Nx15 [x,y,w,h,score, 10*landmarks] -> [(bbox, score, landmarks(5,2))] """
    results = []
    if dets is None or len(dets) == 0:
        return results
    for d in dets:
        x, y, w, h, s = d[:5]
        lms = d[5:].reshape(5, 2)
        results.append(((int(x), int(y), int(w), int(h)), float(s), lms))
    return results


# -------------------- ROS2 节点 --------------------
class DepthFaceNearestPublisher(Node):
    def __init__(self, args):
        super().__init__('depth_face_nearest_publisher')
        self.pub = self.create_publisher(Point, '/result3_topic', 10)

        # RealSense
        self.pipe = rs.pipeline()
        cfg = rs.config()
        cfg.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
        cfg.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
        self.profile = self.pipe.start(cfg)

        self.align = rs.align(rs.stream.color)
        depth_sensor = self.profile.get_device().first_depth_sensor()
        self.depth_scale = float(depth_sensor.get_depth_scale())
        self.get_logger().info(f"Depth scale: {self.depth_scale:.6f} m/unit")

        # 滤波（可按需再加 spatial/temporal）
        self.hole_filling = rs.hole_filling_filter(2)

        # 参数
        self.score_th = args.score_th
        self.nms_th = args.nms_th
        self.top_k = args.top_k
        self.depth_roi_r = args.depth_roi_r
        self.max_err_clip = args.max_err_clip
        self.max_depth_m = args.max_depth_m
        self.avg_len = args.avg_len

        # YuNet
        self.detector = None
        self.input_size = None  # (W,H)
        self.model_path = args.model or "./face_detection_yunet_2023mar.onnx"

        # 平滑 buffers
        self.buf_ex: List[float] = []
        self.buf_ey: List[float] = []
        self.buf_z : List[float] = []

        # 显示
        cv2.namedWindow('YuNet Nearest Face', cv2.WINDOW_NORMAL)
        self.prev_time = time.time()
        self.timer = self.create_timer(1.0/30.0, self.timer_cb)

    def timer_cb(self):
        frames = self.pipe.poll_for_frames()
        if not frames:
            return
        frames = self.align.process(frames)
        depth_frame = frames.get_depth_frame()
        color_frame = frames.get_color_frame()
        if not depth_frame or not color_frame:
            return

        # 深度孔洞填充
        depth_frame = self.hole_filling.process(depth_frame)

        color = np.asanyarray(color_frame.get_data())
        depth = np.asanyarray(depth_frame.get_data())  # uint16
        H, W = color.shape[:2]
        cx, cy = W // 2, H // 2

        # 初始化/更新 YuNet
        if self.input_size is None or self.input_size != (W, H):
            self.input_size = (W, H)
            self.model_path = _download_yunet_if_needed(self.model_path)
            self.detector = create_yunet(self.model_path, self.input_size,
                                         self.score_th, self.nms_th, self.top_k)
        else:
            self.detector.setInputSize(self.input_size)

        # FPS
        now = time.time()
        fps = 1.0 / (now - self.prev_time) if now != self.prev_time else 0.0
        self.prev_time = now

        # 检测
        dets = self.detector.detect(color)[1]
        faces = parse_yunet_outputs(dets)

        # 对每张人脸估计“代表深度”，过滤>阈值，并选择最近的
        candidates = []
        r = int(self.depth_roi_r)
        for (x, y, fw, fh), score, lms in faces:
            # 框中心
            fx = x + fw // 2
            fy = y + fh // 2

            # 关键点 -> 鼻尖（YuNet顺序：左眼、右眼、鼻尖、左嘴角、右嘴角）
            if lms is not None and lms.shape == (5, 2):
                roi_cx, roi_cy = lms[2].astype(int)  # 鼻尖
            else:
                roi_cx, roi_cy = fx, fy

            x0, x1 = max(roi_cx - r, 0), min(roi_cx + r + 1, W)
            y0, y1 = max(roi_cy - r, 0), min(roi_cy + r + 1, H)
            patch = depth[y0:y1, x0:x1].astype(np.float32)
            valid = patch[patch > 0] * self.depth_scale  # m
            z_m = float(np.median(valid)) if valid.size > 0 else 0.0

            # 过滤无效/过远
            if z_m <= 0.0:
                continue
            if z_m > self.max_depth_m:
                continue

            candidates.append(((x, y, fw, fh), (fx, fy), (roi_cx, roi_cy), z_m, score, lms))

        # 选择最近的人脸（z 最小）
        if len(candidates) > 0:
            candidates.sort(key=lambda it: it[3])  # by z_m asc
            (x, y, fw, fh), (fx, fy), (roi_cx, roi_cy), z_m, score, lms = candidates[0]

            # 像素误差（cx,cy 为画面中心）
            ex = float(fx - cx)
            ey = float(fy - cy)

            # 可视化
            cv2.rectangle(color, (x, y), (x + fw, y + fh), (255, 0, 0), 2)
            if lms is not None:
                for (lx, ly) in lms.astype(int):
                    cv2.circle(color, (int(lx), int(ly)), 2, (0, 255, 255), -1)
            cv2.circle(color, (roi_cx, roi_cy), 3, (0, 255, 0), -1)  # ROI 中心
            cv2.rectangle(color,
                          (max(roi_cx - r, 0), max(roi_cy - r, 0)),
                          (min(roi_cx + r + 1, W), min(roi_cy + r + 1, H)),
                          (0, 255, 255), 1)
            cv2.putText(color, f"z={z_m:.3f}m", (x, max(0, y - 8)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 200, 255), 1)
        else:
            # 没有符合条件的目标（要么无人脸，要么都超过 max_depth_m）
            ex, ey, z_m = 0.0, 0.0, 0.0

        # 平滑与限幅
        self.buf_ex.append(ex); self.buf_ey.append(ey); self.buf_z.append(z_m if z_m > 0 else np.nan)
        if len(self.buf_ex) > self.avg_len:
            self.buf_ex.pop(0); self.buf_ey.pop(0); self.buf_z.pop(0)

        ex_s = float(np.nanmean(self.buf_ex)) if self.buf_ex else 0.0
        ey_s = float(np.nanmean(self.buf_ey)) if self.buf_ey else 0.0
        valid_z = [v for v in self.buf_z if not np.isnan(v) and v > 0]
        z_s = float(np.median(valid_z)) if len(valid_z) > 0 else 0.0

        clip = float(self.max_err_clip)
        ex_s = max(min(ex_s, clip), -clip)
        ey_s = max(min(ey_s, clip), -clip)

        # 发布
        msg = Point()
        msg.x = -ex_s
        msg.y = ey_s
        msg.z = z_s
        self.pub.publish(msg)

        # 叠加可视化
        cv2.line(color, (cx - 10, cy), (cx + 10, cy), (0, 255, 0), 1)
        cv2.line(color, (cx, cy - 10), (cx, cy + 10), (0, 255, 0), 1)
        cv2.putText(color, f"Err(px): ({ex_s:.1f},{ey_s:.1f})", (10, H - 55),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 255), 2)
        cv2.putText(color, f"Depth(m): {z_s:.3f}", (10, H - 30),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 200, 0), 2)
        cv2.putText(color, f"FPS: {fps:.1f}", (10, 28),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)

        info = f"faces:{len(faces)} kept:{len(candidates)} max_depth:{self.max_depth_m:.2f}m"
        cv2.putText(color, info, (10, 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (200, 255, 200), 1)

        cv2.imshow('YuNet Nearest Face', color)
        if (cv2.waitKey(1) & 0xFF) == ord('q'):
            rclpy.shutdown()

    def destroy_node(self):
        try:
            self.pipe.stop()
        except Exception:
            pass
        cv2.destroyAllWindows()
        super().destroy_node()


# -------------------- CLI --------------------
def parse_args():
    ap = argparse.ArgumentParser()
    ap.add_argument("--model", type=str, default="", help="YuNet onnx 模型路径；为空则尝试下载到 ./face_detection_yunet_2023mar.onnx")
    ap.add_argument("--score_th", type=float, default=0.9, help="检测置信度阈值")
    ap.add_argument("--nms_th",   type=float, default=0.3, help="NMS 阈值")
    ap.add_argument("--top_k",    type=int,   default=5000, help="最大候选数")
    ap.add_argument("--depth_roi_r", type=int, default=6, help="鼻尖附近深度ROI半径(像素)")
    ap.add_argument("--avg_len",  type=int,   default=5, help="误差/深度滑窗长度")
    ap.add_argument("--max_err_clip", type=float, default=120.0, help="像素误差限幅")
    ap.add_argument("--max_depth_m", type=float, default=0.50, help="最大接受的人脸深度(米)，超过则忽略")
    return ap.parse_args()


def main():
    global args
    args = parse_args()
    rclpy.init()
    node = DepthFaceNearestPublisher(args)
    try:
        rclpy.spin(node)
    finally:
        node.destroy_node()
        rclpy.shutdown()


if __name__ == "__main__":
    main()
