# camera.py

import cv2
from pyorbbecsdk import Pipeline, Config, OBSensorType, OBFormat, OBError
from utils import frame_to_bgr_image
import time
import os
import numpy as np

ESC_KEY = 27


class Camera:
    def __init__(self):
        self.config = Config()
        self.pipeline = Pipeline()

    def configure_pipeline(self):
        try:
            profile_list = self.pipeline.get_stream_profile_list(
                OBSensorType.COLOR_SENSOR
            )
            try:
                color_profile = profile_list.get_video_stream_profile(
                    640, 0, OBFormat.RGB, 30
                )
            except OBError as e:
                print(e)
                color_profile = profile_list.get_default_video_stream_profile()
                print("Using default color profile:", color_profile)
            self.config.enable_stream(color_profile)
        except Exception as e:
            print("Failed to configure pipeline:", e)
            return False
        return True

    def start_pipeline(self):
        try:
            self.pipeline.start(self.config)
        except Exception as e:
            print("Failed to start pipeline:", e)
            return False
        return True

    def stop_pipeline(self):
        self.pipeline.stop()

    def get_rgb_cam_param(self):
        camera_param = self.pipeline.get_camera_param()
        fx = camera_param.rgb_intrinsic.fx
        fy = camera_param.rgb_intrinsic.fy
        cx = camera_param.rgb_intrinsic.cx
        cy = camera_param.rgb_intrinsic.cy
        k1 = camera_param.rgb_distortion.k1
        k2 = camera_param.rgb_distortion.k2
        k3 = camera_param.rgb_distortion.k3
        p1 = camera_param.rgb_distortion.p1
        p2 = camera_param.rgb_distortion.p2
        # 相机内参矩阵
        camera_matrix = np.array(
            [[fx, 0, cx], 
             [ 0,fy, cy], 
             [ 0, 0,  1]], dtype=np.float32
        )
        # 畸变系数
        dist_coeffs = np.array([k1, k2, p1, p2, k3], dtype=np.float32)
        return {"mtx": camera_matrix.tolist(), "dist": dist_coeffs.tolist()}

    def get_frame(self):
        frames = self.pipeline.wait_for_frames(100)
        if frames is None:
            return None
        color_frame = frames.get_color_frame()
        if color_frame is None:
            return None

        color_image = frame_to_bgr_image(color_frame)
        if color_image is None:
            print("Failed to convert frame to image")
            return None

        return color_image

    def save_frame(self, frame, directory="img"):
        if not os.path.exists(directory):
            os.makedirs(directory)

        timestamp = time.strftime("%Y%m%d_%H%M%S")
        filename = os.path.join(directory, f"frame_{timestamp}.jpg")
        cv2.imwrite(filename, frame)
        print(f"Saved frame as {filename}")


if __name__ == "__main__":
    camera = Camera()
    if not camera.configure_pipeline():
        exit()
    if not camera.start_pipeline():
        exit()

    start_time = time.time()
    frame_count = 0
    try:
        while True:
            color_image = camera.get_frame()
            if color_image is None:
                continue
            cv2.imshow("Color Viewer", color_image)

            frame_count += 1
            elapsed_time = time.time() - start_time
            if elapsed_time > 1.0:
                fps = frame_count / elapsed_time
                print(f"FPS: {fps:.2f}")
                start_time = time.time()
                frame_count = 0

            key = cv2.waitKey(1)
            if key == ord("q") or key == ESC_KEY:
                break
            elif key == ord("w"):
                camera.save_frame(color_image)  # 保存当前帧到默认目录
    except KeyboardInterrupt:
        pass
    finally:
        camera.stop_pipeline()
        cv2.destroyAllWindows()
