from __future__ import annotations

from pathlib import Path
from typing import List, Tuple
import io


def _capture_realsense_color(sample: Path) -> Path:
    """Capture one color frame from RealSense and save to `sample`.
    Returns the saved image path.
    """
    import pyrealsense2 as rs
    import numpy as np
    import cv2

    sample.parent.mkdir(parents=True, exist_ok=True)

    pipeline = rs.pipeline()
    config = rs.config()
    config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30)

    try:
        profile = pipeline.start(config)
        # Warm-up frames for auto-exposure
        for _ in range(15):
            pipeline.wait_for_frames()

        frames = pipeline.wait_for_frames()
        color_frame = frames.get_color_frame()
        if not color_frame:
            raise RuntimeError("未获取到彩色帧")

        color_image = np.asanyarray(color_frame.get_data())  # BGR
        ok = cv2.imwrite(str(sample), color_image)
        assert ok and sample.exists(), f"保存失败: {sample}"
        return sample
    finally:
        pipeline.stop()


def _yolo_obb_boxes(weights: Path, sample: Path) -> List[Tuple[float, float, float, float, float]]:
    """Run YOLOv8-OBB on `sample` using `weights` and return
    boxes as list of [x_center, y_center, width, height, angle_deg].
    """
    from ultralytics import YOLO
    import numpy as np

    assert weights.exists(), f"best.pt 不存在: {weights}"
    assert sample.exists(), f"指定图片不存在: {sample}"

    model = YOLO(str(weights))
    results = model(source=str(sample), conf=0.5)

    boxes: List[Tuple[float, float, float, float, float]] = []
    for r in results:
        if getattr(r, "obb", None) is None or not hasattr(r.obb, "xywhr"):
            continue
        # Convert tensors to numpy for safe iteration
        xywhr = r.obb.xywhr.detach().cpu().numpy()
        # Optional: conf/cls not required for the final cell prints
        for row in xywhr:
            x_center, y_center, width, height, angle_rad = map(float, row)
            angle_deg = float(angle_rad * 180.0 / np.pi)
            if width > height:
                angle_deg += 90.0
            boxes.append((float(x_center), float(y_center), float(width), float(height), float(angle_deg)))
    return boxes


def _last_cell_print(sample: Path, boxes: List[Tuple[float, float, float, float, float]]) -> List:
    """Replicate the prints from the notebook's last cell.
    Returns a list where general info lines are strings, and each target line is a tuple:
    (index, X, Y, Z, angle_deg, u_pixel, v_pixel)
    """
    import cv2
    import numpy as np
    import json
    import pyrealsense2 as rs

    if not sample.exists():
        raise RuntimeError(f"无法读取图片: {sample}")

    # 读取图像尺寸并打印
    img = cv2.imread(str(sample))
    assert img is not None, f"无法读取图片: {sample}"
    H, W = img.shape[:2]

    out_lines: List = []
    print(f"图片尺寸: width={W}, height={H}")
    print(str([list(map(float, b)) for b in boxes]))

    # 加载对齐深度与内参（若存在），否则实时获取一帧
    depth_path = sample.with_name(f"{sample.stem}_depth.png")
    intr_path = sample.with_name(f"{sample.stem}_intrinsics.json")

    use_files = depth_path.exists() and intr_path.exists()

    if use_files:
        # Match notebook's informational print
        print(f"使用已保存的深度与内参: {depth_path.name}, {intr_path.name}")
        depth_img = cv2.imread(str(depth_path), cv2.IMREAD_UNCHANGED)
        assert depth_img is not None and depth_img.ndim == 2, f"无法读取深度图: {depth_path}"
        with open(intr_path, "r", encoding="utf-8") as f:
            intr = json.load(f)
        fx = float(intr["fx"]); fy = float(intr["fy"])
        ppx = float(intr["ppx"]); ppy = float(intr["ppy"])
        depth_scale = float(intr.get("depth_scale", 0.001))  # 默认 0.001 m/单位
    else:
        # Match notebook's informational print
        
        pipeline = rs.pipeline()
        config = rs.config()
        config.enable_stream(rs.stream.color, W, H, rs.format.bgr8, 30)
        config.enable_stream(rs.stream.depth, 848, 480, rs.format.z16, 30)
        align = rs.align(rs.stream.color)
        try:
            profile = pipeline.start(config)
            depth_sensor = profile.get_device().first_depth_sensor()
            depth_scale = float(depth_sensor.get_depth_scale())

            # 预热与重试
            for _ in range(30):
                try:
                    pipeline.wait_for_frames(1000)
                except Exception:
                    pass

            frames = None
            for _ in range(10):
                try:
                    frames = pipeline.wait_for_frames(2000)
                    if frames:
                        break
                except Exception:
                    continue
            if frames is None:
                raise RuntimeError("相机帧在超时内未到达，请检查USB连接/分辨率/占用进程")

            aligned = align.process(frames)
            depth_frame = aligned.get_depth_frame()
            color_frame = aligned.get_color_frame()
            assert depth_frame and color_frame, "未获取到对齐后的彩色或深度帧"

            vprofile = color_frame.profile.as_video_stream_profile()
            intrinsics = vprofile.get_intrinsics()
            fx, fy = float(intrinsics.fx), float(intrinsics.fy)
            ppx, ppy = float(intrinsics.ppx), float(intrinsics.ppy)
            depth_img = np.asanyarray(depth_frame.get_data())
        finally:
            pipeline.stop()

    def get_depth_m(u, v, ksize=7):
        # 在像素(u,v)周围取窗口中位数，忽略0深度；返回米
        u = int(round(u)); v = int(round(v))
        h, w = depth_img.shape[:2]
        u = max(0, min(w - 1, u)); v = max(0, min(h - 1, v))
        r = ksize // 2
        x0, x1 = max(0, u - r), min(w, u + r + 1)
        y0, y1 = max(0, v - r), min(h, v + r + 1)
        patch = depth_img[y0:y1, x0:x1].astype(np.float32)
        valid = patch[patch > 0]
        if valid.size == 0:
            return None
        return float(np.median(valid)) * float(depth_scale)

    print(
        f"相机内参: fx={fx:.2f}, fy={fy:.2f}, ppx={ppx:.2f}, ppy={ppy:.2f}, depth_scale={depth_scale}"
    )

    # 逐目标反投影并以元组形式保存
    for i, (xc, yc, w_box, h_box, ang) in enumerate(boxes, start=1):
        Z = get_depth_m(xc, yc, ksize=7)
        if Z is None:
            
            continue
        X = (xc - ppx) * Z / fx
        Y = (yc - ppy) * Z / fy
        # Append tuple: (index, X, Y, Z, angle_deg, u, v)
        out_lines.append((int(i), float(X), float(Y), float(Z), float(ang), float(xc), float(yc)))

    return out_lines


def run_inference() -> List:
    """Run the original notebook flow and return the list of outputs.
    General info lines are strings; per-target results are tuples:
    (index, X, Y, Z, angle_deg, u_pixel, v_pixel)
    """
    # Paths mirroring the notebook
    captures_dir = Path("/home/robot1/JuliusWorkplace/Tongji_Intelligent_Systems_Lab/OBB_yolov8/captures")
    sample = captures_dir / "realsense.jpg"
    weights = Path("/home/robot1/JuliusWorkplace/runs_obb/yolov8_obb_local/weights/best.pt")

    # 1) Capture one frame to `sample`
    _capture_realsense_color(sample)

    # 2) YOLO OBB inference to produce `boxes`
    boxes = _yolo_obb_boxes(weights, sample)

    # 3) Replicate the last cell prints and return as structured list
    return _last_cell_print(sample, boxes)


if __name__ == "__main__":
    # Optional CLI: print the result
    results = run_inference()
    print("----------results----------")
    print(results)
