#!/usr/bin/env python3
"""
前后(Z}深度闭环版 step4_test.py
- 相机:Intel RealSense D405
  1) 从 RealSense 获取对齐到彩色的深度与彩色图像。
  2) OpenCV Haar 级联做人脸检测,计算人脸中心与图像中心的像素误差 (ex, ey)。
  3) 在人脸中心附近取一个小窗口的深度中位数作为目标深度 z(单位:米}。
  4) 发布 geometry_msgs/Point 到 /result3_topic:
     x = -ex(像素},y = ey(像素},z = depth_m(米}。
  5) 叠加可视化信息与 FPS。

依赖:
  pip install pyrealsense2 opencv-python numpy

运行:
  python3 step4_test_realsense_depth_frontback.py
"""

import rclpy
from rclpy.node import Node
from geometry_msgs.msg import Point

import cv2
import numpy as np
import time

# RealSense SDK
import pyrealsense2 as rs

class DepthFaceErrorPublisher(Node):
    def __init__(self):
        super().__init__('depth_face_error_publisher')
        self.pub = self.create_publisher(Point, '/result3_topic', 10)

        # === RealSense 初始化 ===
        self.pipe = rs.pipeline()
        self.cfg = rs.config()
        # D405 一般支持 640x480@30; 若失败,请按实际分辨率修改
        self.cfg.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
        self.cfg.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
        self.profile = self.pipe.start(self.cfg)

        # 对齐:将深度对齐到彩色
        self.align = rs.align(rs.stream.color)

        # 深度尺度(z16 * scale = 米}
        depth_sensor = self.profile.get_device().first_depth_sensor()
        self.depth_scale = float(depth_sensor.get_depth_scale())  # meters per unit
        self.get_logger().info(f"Depth scale: {self.depth_scale} m/unit")

        # (可选}孔洞填充滤波
        self.hole_filling = rs.hole_filling_filter(2)  # 0/1/2 不同强度

        # === 人脸级联 ===
        self.face_cascade = cv2.CascadeClassifier(
            cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
        if self.face_cascade.empty():
            raise RuntimeError('无法加载 Haar 级联模型')

        cv2.namedWindow('Face Error Preview', cv2.WINDOW_NORMAL)
        self.get_logger().info("节点已启动,按 'q' 退出窗口")

        # 定时器(约 30Hz}
        self.prev_time = time.time()
        self.timer = self.create_timer(1.0/30.0, self.timer_cb)

        # 滑动平均滤波(像素误差 & 深度米}
        self.N = 5
        self.buf_ex, self.buf_ey, self.buf_z = [], [], []

        # ROI 采样窗口半径(以像素计},在人脸中心附近取 (2r+1)^2 个深度点
        self.depth_roi_r = 6  # 可调:3~10

    def timer_cb(self):
        # 非阻塞获取对齐帧
        frames = self.pipe.poll_for_frames()
        if not frames:
            return
        frames = self.align.process(frames)

        depth_frame = frames.get_depth_frame()
        color_frame = frames.get_color_frame()
        if not depth_frame or not color_frame:
            return

        # 可选深度孔洞填充
        depth_frame = self.hole_filling.process(depth_frame)

        # 转 numpy
        color = np.asanyarray(color_frame.get_data())
        depth = np.asanyarray(depth_frame.get_data())  # uint16,需乘 depth_scale 才是米

        h, w = color.shape[:2]
        cx, cy = w // 2, h // 2

        # FPS
        now = time.time()
        fps = 1.0 / (now - self.prev_time) if now != self.prev_time else 0.0
        self.prev_time = now

        # 人脸检测
        gray = cv2.cvtColor(color, cv2.COLOR_BGR2GRAY)
        faces = self.face_cascade.detectMultiScale(gray, 1.1, 5, minSize=(60, 60))

        ex, ey, z_m = 0.0, 0.0, 0.0

        if len(faces) > 0:
            # 取最大人脸
            x, y, fw, fh = max(faces, key=lambda r: r[2] * r[3])
            fx = x + fw // 2
            fy = y + fh // 2

            # 像素误差(以图像中心为 0}
            ex = float(fx - cx)
            ey = float(fy - cy)

            # 在人脸中心附近取小窗口深度(单位:米}
            r = self.depth_roi_r
            x0, x1 = max(fx - r, 0), min(fx + r + 1, w)
            y0, y1 = max(fy - r, 0), min(fy + r + 1, h)
            patch = depth[y0:y1, x0:x1].astype(np.float32)

            # 过滤无效深度(0}并转米
            valid = patch[patch > 0] * self.depth_scale
            if valid.size > 0:
                z_m = float(np.median(valid))  # 中位数更稳
            else:
                z_m = 0.25  # 无有效深度

            # 可视化框 & 中心
            cv2.rectangle(color, (x, y), (x + fw, y + fh), (255, 0, 0), 2)
            cv2.circle(color, (fx, fy), 5, (0, 255, 0), -1)
            cv2.rectangle(color, (x0, y0), (x1, y1), (0, 255, 255), 1)
        else:
            # 没有人脸:保持 0‘
            z_m = 0.25# 默认深度,可调
            pass

        # --- 滑动平均(抖动更小}---
        self.buf_ex.append(ex); self.buf_ey.append(ey); self.buf_z.append(z_m)
        if len(self.buf_ex) > self.N:
            self.buf_ex.pop(0); self.buf_ey.pop(0); self.buf_z.pop(0)
        ex_s = float(np.mean(self.buf_ex))
        ey_s = float(np.mean(self.buf_ey))
        z_s = float(np.median([v for v in self.buf_z if v > 0])) if any(v > 0 for v in self.buf_z) else 0.0

        # 像素误差限幅,避免过大跳变
        ex_s = max(min(ex_s, 100.0), -100.0)
        ey_s = max(min(ey_s, 100.0), -100.0)

        # 发布(x,y: 像素；z: 米}。注意:原逻辑是 x 取反
        msg = Point()
        msg.x = -ex_s
        msg.y = ey_s
        msg.z = z_s  # meters
        self.pub.publish(msg)

        # 叠加可视化
        cv2.line(color, (cx - 10, cy), (cx + 10, cy), (0, 255, 0), 1)
        cv2.line(color, (cx, cy - 10), (cx, cy + 10), (0, 255, 0), 1)
        cv2.putText(color, f"Err(px): ({ex_s:.1f},{ey_s:.1f})", (10, h - 50), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 255), 2)
        cv2.putText(color, f"Depth(m): {z_s:.3f}", (10, h - 25), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 200, 0), 2)
        cv2.putText(color, f"FPS: {fps:.1f}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)

        cv2.imshow('Face Error Preview', color)
        if (cv2.waitKey(1) & 0xFF) == ord('q'):
            rclpy.shutdown()

    def destroy_node(self):
        try:
            self.pipe.stop()
        except Exception:
            pass
        cv2.destroyAllWindows()
        super().destroy_node()


def main(args=None):
    rclpy.init(args=args)
    node = DepthFaceErrorPublisher()
    try:
        rclpy.spin(node)
    finally:
        node.destroy_node()
        rclpy.shutdown()

if __name__ == '__main__':
    main()
