import time
import cv2
import numpy as np
from pyorbbecsdk import  * 

from driver.utils import frame_to_bgr_image

ESC_KEY = 27
PRINT_INTERVAL = 1  # seconds
MIN_DEPTH = 20      # mm
MAX_DEPTH = 6000   # mm

class TemporalFilter:
    def __init__(self, alpha=0.5):
        self.alpha = alpha
        self.previous = None

    def process(self, frame: np.ndarray) -> np.ndarray:
        # Only smooth valid depth pixels; keep zeros as zeros
        if self.previous is None:
            result = frame
        else:
            # compute weighted average
            result = cv2.addWeighted(frame, self.alpha, self.previous, 1 - self.alpha, 0)
            # mask out positions where current frame has zero (no echo)
            result = np.where(frame > 0, result, 0).astype(frame.dtype)
        self.previous = result
        return result

class OrbbecCamera:
    """
    Unified Orbbec camera handling for color and depth streams.
    Provides methods to start, read frames, and query depth at pixel coordinates.
    """
    def __init__(
        self,
        color: bool = True,
        depth: bool = True,
        width: int = 640,
        height: int = 480,
        fps: int = 60,
        depth_timeout: int = 10,
        temporal_alpha: float = 0.5
    ):
        self.color_enabled = color
        self.depth_enabled = depth
        self.width = width
        self.height = height
        self.fps = fps
        self.depth_timeout = depth_timeout
        self.temporal_filter = TemporalFilter(alpha=temporal_alpha)

        self.pipeline = Pipeline()
        self.config = Config()
        self._configure_streams()

    def _configure_streams(self):
        # Enable color stream
        if self.color_enabled:
            profiles = self.pipeline.get_stream_profile_list(OBSensorType.COLOR_SENSOR)
            try:
                color_prof = profiles.get_video_stream_profile(
                    self.width, self.height, OBFormat.RGB, self.fps
                )
            except OBError:
                color_prof = profiles.get_default_video_stream_profile()
            self.config.enable_stream(color_prof)

        # Enable depth stream
        if self.depth_enabled:
            threshold_filter = ThresholdFilter()
            threshold_filter.set_value_range(10,10000)
            profiles = self.pipeline.get_stream_profile_list(OBSensorType.DEPTH_SENSOR)
            try:
                depth_prof = profiles.get_video_stream_profile(
                    self.width, self.height, OBFormat.Y16, self.fps
                )
            except OBError:
                depth_prof = profiles.get_default_video_stream_profile()
            self.config.enable_stream(depth_prof)

    def start(self):
        """Start the configured streams."""
        self.pipeline.start(self.config)

    def read_frames(self):
        """
        Retrieve the latest color and depth frames (if enabled).
        :return: tuple (color_img, depth_img) where either may be None
        """
        color_img = None
        depth_img = None
        frames: FrameSet = self.pipeline.wait_for_frames(self.depth_timeout)
        if not frames:
            return None, None

        # Color
        if self.color_enabled:
            cf = frames.get_color_frame()
            if cf:
                color_img = frame_to_bgr_image(cf)

        # Depth
        if self.depth_enabled:
            df = frames.get_depth_frame()
            if df and df.get_format() == OBFormat.Y16:
                w = df.get_width()
                h = df.get_height()
                scale = df.get_depth_scale()
                raw = np.frombuffer(df.get_data(), dtype=np.uint16).reshape((h, w))
                # Print true center raw depth
                # cy, cx = h // 2, w // 2

                # Convert to mm and mask invalid
                data = raw.astype(np.float32) * scale
                data = np.where((data > MIN_DEPTH) & (data < MAX_DEPTH), data, 0).astype(np.uint16)
                # Apply temporal smoothing, but keep invalid zeros
                data = self.temporal_filter.process(data)
                # Keep raw for query
                self.latest_depth = data
                # Normalize & colormap for display
                norm = cv2.normalize(data, None, 0, 255, cv2.NORM_MINMAX, dtype=cv2.CV_8U)
                depth_img = cv2.applyColorMap(norm, cv2.COLORMAP_JET)

        return color_img, depth_img

    def get_depth_at(self, x: int, y: int) -> float:
        """
        Get the measured depth (in mm) at pixel (x, y).
        Requires depth stream enabled and read_frames() called at least once.
        """
        if not hasattr(self, 'latest_depth'):
            raise RuntimeError("No depth frame available. Call read_frames() first.")
        h, w = self.latest_depth.shape
        if x < 0 or x >= w or y < 0 or y >= h:
            raise ValueError(f"Coordinates out of bounds: (0,0) to ({w-1},{h-1})")
        return float(self.latest_depth[y, x])

    def stop(self):
        """Stop the streams and cleanup."""
        self.pipeline.stop()

# # 示例：
# if __name__ == '__main__':
#     cam = OrbbecCamera(color=True, depth=True)
#     cam.start()
#     last_print = time.time()
#     try:
#         while True:
#             color_img, depth_img = cam.read_frames()
#             if color_img is not None:
#                 cv2.imshow('Color', color_img)
#             if depth_img is not None:
#                 cv2.imshow('Depth', depth_img)
#                 # 每秒打印中心点深度
#                 if time.time() - last_print >= PRINT_INTERVAL:
#                     h, w = cam.latest_depth.shape
#                     d = cam.get_depth_at(w//2, h//2)
#                     print(f"Center depth: {d:.1f} mm")
#                     last_print = time.time()

#             key = cv2.waitKey(1)
#             if key in (ord('q'), ESC_KEY):
#                 break
#     except KeyboardInterrupt:
#         pass
#     finally:
#         cam.stop()
#         cv2.destroyAllWindows()
