"""
Intel RealSense Camera Interface for ReKep
Compatible with OGCamera interface
"""

import pyrealsense2 as rs
import numpy as np
import cv2
from typing import Dict, Tuple, Optional
import sys
import os

# Add parent directory to path
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
# from og_utils import pixel_to_3d_points


class RealSenseCamera:
    """
    Wrapper for Intel RealSense camera
    Provides interface compatible with OGCamera
    """
    
    def __init__(self, width=640, height=480, fps=30):
        """
        Initialize RealSense camera
        
        Args:
            width: RGB image width
            height: RGB image height
            fps: Frames per second
        """
        self.pipeline = rs.pipeline()
        config = rs.config()
        
        # Enable depth and color streams
        config.enable_stream(rs.stream.depth, width, height, rs.format.z16, fps)
        config.enable_stream(rs.stream.color, width, height, rs.format.rgb8, fps)
        # 硬编码作为有效的组合
        # print("Requesting Depth: 640x480 and Color: 1280x720")
        # config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
        # config.enable_stream(rs.stream.color, 1280, 720, rs.format.rgb8, 30)
        
        # Start pipeline
        profile = self.pipeline.start(config)
        
        # Get intrinsic parameters
        depth_profile = rs.video_stream_profile(profile.get_stream(rs.stream.depth))
        color_profile = rs.video_stream_profile(profile.get_stream(rs.stream.color))
        self.depth_intrinsics = depth_profile.get_intrinsics()
        self.color_intrinsics = color_profile.get_intrinsics()
        
        # Create aligner to align depth to color
        align_to = rs.align(rs.stream.color)
        self.align = align_to
        
        # Generate intrinsics matrix
        self.intrinsics = self._get_intrinsics_matrix(self.color_intrinsics)
        
        # Extrinsics (will be calibrated)
        self.extrinsics = np.eye(4)  # Identity as default
        
        print(f"RealSense camera initialized: {width}x{height} @ {fps}fps")
    
    def _get_intrinsics_matrix(self, intrinsics) -> np.ndarray:
        """
        Convert RealSense intrinsics to 3x3 matrix
        
        Args:
            intrinsics: RealSense intrinsics object
            
        Returns:
            3x3 numpy array
        """
        return np.array([
            [intrinsics.fx, 0, intrinsics.ppx],
            [0, intrinsics.fy, intrinsics.ppy],
            [0, 0, 1]
        ])
    
    def get_obs(self) -> Dict:
        """
        Get camera observation data
        Compatible with OGCamera.get_obs() interface
        
        Returns:
            Dictionary containing:
            - rgb: (H, W, 3) RGB image (uint8)
            - depth: (H, W) Depth image in meters (float32)
            - points: (H, W, 3) 3D point cloud in camera frame
            - seg: (H, W) Segmentation mask (int32)
            - intrinsic: (3, 3) Camera intrinsics matrix
            - extrinsic: (4, 4) Camera extrinsics matrix
        """
        # Wait for frames
        frames = self.pipeline.wait_for_frames()
        
        # Align depth frame to color frame
        aligned_frames = self.align.process(frames)
        
        depth_frame = aligned_frames.get_depth_frame()
        color_frame = aligned_frames.get_color_frame()
        
        if not depth_frame or not color_frame:
            return None
        
        # Convert to numpy arrays
        depth_image = np.asanyarray(depth_frame.get_data())
        rgb_image = np.asanyarray(color_frame.get_data())
        
        # Convert depth to meters (Z16 format -> meters)
        depth_image = depth_image.astype(np.float32) / 1000.0
        
        # Generate point cloud
        points = self._depth_to_points(depth_image, rgb_image.shape[:2])
        
        # Generate simple segmentation mask (can be replaced with SAM)
        mask = self._generate_mask(depth_image)
        
        return {
            "rgb": rgb_image,
            "depth": depth_image,
            "points": points,
            "seg": mask,
            "intrinsic": self.intrinsics,
            "extrinsic": self.extrinsics
        }
    
    def _depth_to_points(self, depth: np.ndarray, shape: Tuple) -> np.ndarray:
        """
        Convert depth image to 3D point cloud
        Using the same logic as og_utils.pixel_to_3d_points
        
        Args:
            depth: (H, W) Depth image in meters
            shape: Image shape (height, width)
            
        Returns:
            (H, W, 3) Point cloud in camera frame
        """
        H, W = shape
        fx, fy = self.intrinsics[0, 0], self.intrinsics[1, 1]
        cx, cy = self.intrinsics[0, 2], self.intrinsics[1, 2]
        
        # Create grid of (x, y) pixel coordinates
        i, j = np.meshgrid(np.arange(W), np.arange(H), indexing='xy')
        
        # Convert pixel coordinates to normalized camera coordinates
        z = depth
        x = (i - cx) * z / fx
        y = (j - cy) * z / fy
        
        # Stack to form (H, W, 3)
        camera_coordinates = np.stack([x, y, z], axis=-1)
        
        # Filter invalid points
        valid_mask = z > 0
        camera_coordinates[~valid_mask] = [0, 0, 0]
        
        return camera_coordinates
    
    def _generate_mask(self, depth: np.ndarray, min_dist: float = 0.25, max_dist: float = 2.0) -> np.ndarray:
        """
        Generate simple segmentation mask based on depth
        
        Args:
            depth: (H, W) Depth image
            min_dist: Minimum valid distance in meters (0.25m for L515)
            max_dist: Maximum valid distance in meters (L515 range up to 9m)
            
        Returns:
            (H, W) Segmentation mask (int32)
        """
        # Simple threshold-based segmentation
        mask = np.zeros_like(depth, dtype=np.int32)
        
        # Filter points within valid depth range
        # L515 has better accuracy at close range (0.25-2m)
        valid_mask = np.logical_and(depth > min_dist, depth < max_dist)
        mask[valid_mask] = 1
        
        # Optional: Add connected component analysis to segment different objects
        # This can be replaced with SAM or other advanced methods
        
        return mask
    
    def set_extrinsics(self, extrinsics: np.ndarray):
        """
        Set camera extrinsics (transformation from world to camera)
        
        Args:
            extrinsics: (4, 4) transformation matrix
        """
        self.extrinsics = extrinsics
    
    def stop(self):
        """Stop the camera pipeline"""
        self.pipeline.stop()
    
    def get_params(self) -> Dict:
        """
        Get camera parameters
        
        Returns:
            Dictionary containing intrinsics and extrinsics
        """
        return {
            "intrinsics": self.intrinsics,
            "extrinsics": self.extrinsics
        }


def test_camera():
    """
    Test function to verify camera connection
    For L515 camera users
    """
    try:
        print("="*60)
        print("RealSense L515 Camera Test")
        print("="*60)
        print("\nInitializing L515 LiDAR camera...")
        print("Note: Ensure firmware is >= 1.5.8.1")
        print("Download: https://dev.realsenseai.com/docs/firmware-releases-l500\n")
        
        camera = RealSenseCamera(width=640, height=480)
        
        print("Capturing frame...")
        obs = camera.get_obs()
        
        if obs is None:
            print("ERROR: Failed to capture frame")
            print("\nTroubleshooting:")
            print("1. Check USB 3.0 connection")
            print("2. Ensure camera is powered on")
            print("3. Update firmware if needed")
            return
        
        rgb = obs['rgb']
        depth = obs['depth']
        
        print(f"✓ RGB shape: {rgb.shape}")
        print(f"✓ Depth shape: {depth.shape}")
        print(f"✓ Depth range: [{depth.min():.3f}, {depth.max():.3f}] meters")
        print(f"\nL515 effective range: 0.25-9.0m")
        print(f"Recommended for desktop: 0.25-2.0m")
        
        # Display images
        cv2.imshow("RGB - L515", rgb)
        cv2.imshow("Depth - L515 (Close to exit)", (depth / depth.max() * 255).astype(np.uint8))
        
        print("\nPress any key to exit...")
        cv2.waitKey(0)
        cv2.destroyAllWindows()
        
        camera.stop()
        print("\n✓ Test completed successfully!")
        
    except Exception as e:
        print(f"\nERROR: {e}")
        print("\nTroubleshooting for L515:")
        print("1. Ensure L515 is connected via USB 3.0")
        print("2. Check pyrealsense2 installation: pip install pyrealsense2")
        print("3. Verify firmware version: rs-enumerate-devices")
        print("4. See L515_Camera_Setup.md for detailed guide")
        import traceback
        traceback.print_exc()


if __name__ == "__main__":
    test_camera()

