import cv2
import numpy as np
from openni import openni2
from ultralytics import YOLO

# -------------------- 1. 加载 YOLO 模型 --------------------
model = YOLO(r"C:\Users\Administrator\PycharmProjects\astra\yolov8n-seg.pt")

# -------------------- 2. 初始化 OpenNI2 深度设备 --------------------
openni2.initialize()
dev = openni2.Device.open_any()
depth_stream = dev.create_depth_stream()
dev.set_image_registration_mode(True)
depth_stream.start()

# -------------------- 3. 打开 RGB 摄像头（cv2 方式） --------------------
cap = cv2.VideoCapture(0)
if not cap.isOpened():
    print("RGB camera not found.")
    exit()
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
cap.set(cv2.CAP_PROP_FPS, 30)

# -------------------- 4. 相机内参（Matlab 标定结果） --------------------
intrinsics = {
    "fx": 2.41623998e+03,
    "fy": 2.39505951e+03,
    "cx": 2.50517588e+02,
    "cy": 2.57066059e+02
}

# -------------------- 5. 工具函数：像素 + 深度 -> 三维坐标 --------------------
def depth_to_camera_coords(x, y, depth_value, intrinsics):
    fx, fy, cx, cy = intrinsics["fx"], intrinsics["fy"], intrinsics["cx"], intrinsics["cy"]
    z = depth_value / 1000.0  # mm -> m
    X = (x - cx) * z / fx
    Y = (y - cy) * z / fy
    return np.round([X, Y, z], 3)

# -------------------- 6. 窗口设置 --------------------
cv2.namedWindow("Depth Image")
cv2.namedWindow("Color Image")

# -------------------- 7. 主循环 --------------------
print("Press 'q' to quit.")

while True:
    # --- 获取深度帧 ---
    frame = depth_stream.read_frame()
    dframe_data = np.frombuffer(frame.get_buffer_as_uint16(), dtype=np.uint16).reshape(480, 640)
    depth_image = dframe_data.astype(np.float32)

    # 渲染深度图显示
    dim_gray = cv2.convertScaleAbs(depth_image, alpha=0.07)
    depth_colormap = cv2.applyColorMap(dim_gray, cv2.COLORMAP_JET)

    # --- 获取彩色帧 ---
    ret, color_image = cap.read()
    if not ret:
        print("未能读取彩色图像")
        break

    # 水平翻转（修复镜像）
    color_image = cv2.flip(color_image, 1)

    # --- YOLO 检测 ---
    results = model.predict(color_image, save=False, verbose=False)
    im_array = results[0].plot()

    # 计算检测框中心的三维坐标
    for box in results[0].boxes.xywh.cpu().numpy().astype(int):
        x, y = box[0], box[1]
        if 0 <= y < depth_image.shape[0] and 0 <= x < depth_image.shape[1]:
            depth_value = depth_image[y, x]
            coords = depth_to_camera_coords(x, y, depth_value, intrinsics)

            # 绘制结果
            cv2.circle(im_array, (x, y), 4, (255, 255, 255), -1)
            # cv2.putText(im_array, str(coords), (x + 10, y + 10),
            #             cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255),
            #             1, cv2.LINE_AA)
            text = f"({coords[0]:.3f},{coords[1]:.3f},{coords[2]:.3f})"
            cv2.putText(im_array, text, (x + 10, y + 10),
                        cv2.FONT_HERSHEY_PLAIN, 0.7, (255, 255, 255),
                        1, cv2.LINE_AA)

    # --- 显示 ---
    cv2.imshow("Depth Image", depth_colormap)
    cv2.imshow("Color Image", im_array)

    # --- 退出键 ---
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

# -------------------- 8. 资源释放 --------------------
depth_stream.stop()
dev.close()
openni2.unload()
cap.release()
cv2.destroyAllWindows()
