#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
Example usage:
  python predict_orange.py
This will:
  1. Pull a single depth/color frame from depth_data.bin / color_data.bin using DepthColorProcessor.
  2. Read "detected_objects.json" for bounding boxes, pick the first object, compute 3D pose in base_link frame.
  3. Print the result.
  4. Exit.
"""

import json
import numpy as np
from scipy.spatial.transform import Rotation as R#type: ignore
import pyrealsense2 as rs#type: ignore      # Make sure pyrealsense2 is installed

# Import our DepthColorProcessor from read_depth_color.py
from DepthColorProcessor import DepthColorProcessor
from armMotion import armMotion
from Serial_Control import SerialControl
from typing import Union
from config import port

# -----------------------------
# 1) Hand-eye / Robot transforms
# As in your original code snippet for RealSense:
#    base_link -> end_link
#    end_link  -> camera_link
#    camera_link -> pixel
# We'll need transforms T_base_to_end and T_hand_eye. Below are
# example values gleaned from your `predict_PC.py`.
# Adjust to match your actual system.
# -----------------------------
class TransformPos:
    def __init__(self,serial,dcp):
        self.armmotion=armMotion(serial,dcp)
        self.dcp=dcp
    def get_base2end_end2cam(self):
        translation_base_to_end,quaternion_base_to_end=self.armmotion.forwardK()
        # translation_base_to_end = np.array([-0.073, 0.007, 0.072])  # [x, y, z] (m)
        # quaternion_base_to_end = [0.655, 0.590, -0.316, -0.349]      # [x, y, z, w]

        # (end_link->camera) from your snippet
        translation_end_to_camera = np.array([-0.0214793, -0.0486306, -0.0450709])  # x, y, z (m)
        quaternion_end_to_camera = [0.9956458782, -0.0894632558, 0.0188356071, 0.0181887597]  # w, x, y, z
        # We must reorder this to [x, y, z, w] if using scipy's R.from_quat(...).
        q_w, q_x, q_y, q_z = quaternion_end_to_camera
        quaternion_end_to_camera_xyzw = [q_x, q_y, q_z, q_w]

        def build_transform(translation_m, quat_xyzw):
            """Helper to build a 4×4 homogeneous transform from translation + quaternion."""
            rotation_3x3 = R.from_quat(quat_xyzw).as_matrix()
            T = np.eye(4)
            T[:3, :3] = rotation_3x3
            T[:3, 3] = translation_m
            return T

        T_base_to_end = build_transform(translation_base_to_end, quaternion_base_to_end)
        T_end_to_camera = build_transform(translation_end_to_camera, quaternion_end_to_camera_xyzw)
        # => T_hand_eye = (end_link->camera_link)
        T_hand_eye = T_end_to_camera
        return T_base_to_end, T_hand_eye

    def compute_target_pose_in_base_link(self,
                                         ux: int, 
                                         uy: int, 
                                         distance_m: float,
                                         depth_intri, 
                                         T_base_to_end: np.ndarray, 
                                         T_hand_eye: np.ndarray):
        """
        Given a pixel (ux, uy) and the distance in meters at that pixel, compute the 3D pose 
        of the target in the base_link frame.

        Parameters:
        -----------
        ux : int
            The pixel's x-coordinate (column index).
        uy : int
            The pixel's y-coordinate (row index).
        distance_m : float
            The distance in meters at pixel (ux, uy).
        depth_intri : rs.intrinsics
            The RealSense intrinsics for the depth frame.
        T_base_to_end : np.ndarray
            A 4x4 homogeneous transform from base_link to end_link.
        T_hand_eye : np.ndarray
            A 4x4 homogeneous transform from end_link to camera_link 
            (i.e. the hand-eye transform).

        Returns:
        --------
        dict
            Dictionary of the form:
            {
                "x": tx,
                "y": ty,
                "z": tz,
                "qx": qx,
                "qy": qy,
                "qz": qz,
                "qw": qw,
            }
        """

        # 1) Use RealSense API to deproject pixel (ux, uy) to 3D coordinates in the camera frame
        #    This works if you have already imported pyrealsense2 as rs above.
        camera_xyz = rs.rs2_deproject_pixel_to_point(depth_intri, (ux, uy), distance_m)
        
        # 2) Adjust for sign/orientation if needed (depends on camera mounting)
        #    This line mirrors the example in predict_orig.py.
        camera_xyz = np.array(camera_xyz) * -1

        # 3) Build a 4×4 transform from camera_link -> target_link
        camera_link_to_target = np.eye(4)
        camera_link_to_target[:3, 3] = camera_xyz

        # 4) Transform to base_link frame: base_link -> target_link
        target_in_base = T_base_to_end @ T_hand_eye @ camera_link_to_target

        # 5) Extract translation
        tx, ty, tz = target_in_base[0, 3], target_in_base[1, 3], target_in_base[2, 3]

        # 6) Extract rotation and convert to quaternion
        rot_matrix = target_in_base[:3, :3]
        r = R.from_matrix(rot_matrix)
        qx, qy, qz, qw = r.as_quat()  # [x, y, z, w]

        # 7) Return a simple dictionary (instead of a ROS PoseStamped)
        return {
            "x": tx,
            "y": ty,
            "z": tz,
            "qx": qx,
            "qy": qy,
            "qz": qz,
            "qw": qw,
        }
    def imgPos2BasePos(self, recog_obj: dict):
        # 1) Get relevant transforms
        T_base_to_end, T_hand_eye = self.get_base2end_end2cam()
        
        # 2) Read depth/color frame:
        depth_data, color_data = self.dcp.read_depth_color(
            depth_bin="depth_data.bin", 
            color_bin="color_data.bin"
        )
        
        print("[INFO] Loaded depth_data.shape =", depth_data.shape,
              "color_data.shape =", color_data.shape)
        
        ux = int(recog_obj["pos"][0])
        uy = int(recog_obj["pos"][1])
        label = recog_obj.get("label", "<unknown>")
        conf  = recog_obj.get("conf", 0.0)

        # 3) Check pixel range:
        if (uy < 0 or uy >= depth_data.shape[0]) or (ux < 0 or ux >= depth_data.shape[1]):
            print(f"[ERROR] Pixel (ux={ux}, uy={uy}) is out of range.")
            return

        depth_raw_value = depth_data[uy, ux]  # e.g., 16-bit raw
        print(f"[INFO] The raw depth at pixel ({ux}, {uy}) is: {depth_raw_value}")

        # Convert raw depth to meters (assuming your raw is in mm)
        distance_m = depth_raw_value * 0.001  

        # ---------------------------------------------------------------------
        # Hard-code the RealSense intrinsics rather than retrieving at runtime:
        # ---------------------------------------------------------------------
        # depth_intri = {
        #     "width": 640,
        #     "height": 480,
        #     "ppx": 320.483,
        #     "ppy": 242.454,
        #     "fx": 383.232,
        #     "fy": 383.232,
        #     "model": rs.distortion.brown_conrady,
        #     "coeffs": [0, 0, 0, 0, 0]
        # }
        depth_intri = rs.intrinsics()
        depth_intri.width = 640
        depth_intri.height = 480
        depth_intri.ppx = 320.483
        depth_intri.ppy = 242.454
        depth_intri.fx = 383.232
        depth_intri.fy = 383.232
        
        # 4) Compute target pose in base_link
        target_pose_base = self.compute_target_pose_in_base_link(
            ux=ux,
            uy=uy,
            distance_m=distance_m,
            depth_intri=depth_intri,
            T_base_to_end=T_base_to_end,
            T_hand_eye=T_hand_eye
        )

        # 5) Print or use results
        print("----------------------------------------------------------")
        print(f"[RESULT] Object label = {label}, conf = {conf}")
        print(f"[RESULT] target_pose in base_link frame = {target_pose_base}")
        print("----------------------------------------------------------")
if __name__ == "__main__":
    with open("detected_objects.json", "r") as f:   
        det = json.load(f)
    objs = det.get("objs", [])
    if not objs:
        print("[WARNING] No objects found in detected_objects.json - nothing to compute.")
        exit()
    first_obj = objs[0]
    transformPos=TransformPos(SerialControl(port,115200),DepthColorProcessor(interactive_plots=False))   
    transformPos.imgPos2BasePos(first_obj) 