import json
import logging
from pathlib import Path
from typing import Optional, Tuple

import cv2
import numpy as np
import pyrealsense2 as rs

log = logging.getLogger(__name__)


class CameraBaseTransforamtion:
    def __init__(self, calib_dir: str, suffix: str = ""):

        calib_file = Path(calib_dir) / f"cali_matrix{suffix}.json"

        with open(calib_file, "r") as f:
            data = json.load(f)
        self.camera_base_matrix = np.array(data)

        log.info(f"loading CameraBaseTransforamtion from {f}")

    def __call__(self, camera_xyz: np.ndarray) -> np.ndarray:
        return np.append(camera_xyz, 1) @ self.camera_base_matrix


class Camera:
    wait_timeout: int = 10000

    def __init__(self, model: str = "L515"):
        print("Use camera model: L515")

        intrinsic_file = f"camera/{model}/intrinsics.npz"
        self.model = model

        # load camera intrinsics
        self.camera_matrix, self.dist_coeffs = self._load_camera_intrinsics(intrinsic_file)
        log.info("\n已加载相机内参：")
        log.info("\n相机内参矩阵:")
        log.info(self.camera_matrix)
        log.info("\n畸变系数:")
        log.info(self.dist_coeffs)

        # Initialize the camera: image data can be read from pipeline
        self.pipeline = rs.pipeline()
        self.config = rs.config()
        self.config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30)
        self.config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
        self.profile = self.pipeline.start(self.config)

        # 获取深度到彩色的对齐对象
        self.align = rs.align(rs.stream.color)

    @staticmethod
    def _load_calibration(calib_dir: Path) -> np.ndarray:
        """加载标定结果"""
        calib_file = Path(calib_dir) / "cali_matrix.json"
        with open(calib_file, "r") as f:
            data = json.load(f)
        return np.array(data)

    @staticmethod
    def _load_camera_intrinsics(intrinsic_file: Path) -> Tuple[np.ndarray, np.ndarray]:
        """加载相机内参"""
        print(f"load camera intrinsics from {intrinsic_file}")
        data = np.load(intrinsic_file)
        return data["camera_matrix"], data["dist_coeffs"]

    def capture_current_frame(self):
        # 获取对齐的帧
        frames = self.pipeline.wait_for_frames(self.wait_timeout)
        aligned_frames = self.align.process(frames)

        # 获取彩色帧
        color_frame = aligned_frames.get_color_frame()
        color_image = np.asanyarray(color_frame.get_data())
        return color_image  # (HxWxC)

    def annotate_image(self, color_image, pixel_xy, text: str, color: Tuple[int, int, int]):
        x, y = pixel_xy
        # annotate PICK point in green color
        cv2.circle(color_image, pixel_xy, 5, color, -1)
        cv2.putText(color_image, text, (x - 20, y - 15), cv2.FONT_HERSHEY_SIMPLEX, 0.7, color, 2)
        return color_image

    def save_current_image(self, filename: str = "/home/knowin/codes/knowin-robot/imgs/current_view.jpg") -> Tuple[str, np.ndarray]:
        """保存当前相机图像"""
        color_image = self.capture_current_frame()

        # 保存图像
        cv2.imwrite(filename, color_image)
        log.info(f"Save current frame to: {filename}")
        return filename, color_image

    def save_annotated_image(
        self,
        pick_point_pixel: Tuple[int, int],
        place_point_pixel: Tuple[int, int],
        save_file: str = "/home/knowin/codes/knowin-robot/imgs/annotated_current_view.jpg",
        src_file: str = None,
        tag: str = "",
    ):
        if src_file is None:
            color_image = self.capture_current_frame()
        else:
            color_image = cv2.imread(src_file)
        if pick_point_pixel:
            pick_x, pick_y = pick_point_pixel
            # annotate PICK point in green color
            cv2.circle(color_image, pick_point_pixel, 5, (0, 255, 0), -1)
            cv2.putText(
                color_image, f"{tag}PICK", (pick_x - 20, pick_y - 15), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2
            )

        if place_point_pixel:
            place_x, place_y = place_point_pixel
            # annotate PLACE point in blue color
            cv2.circle(color_image, place_point_pixel, 5, (255, 0, 0), -1)
            cv2.putText(
                color_image, f"{tag}PLACE", (place_x - 20, place_y - 15), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2
            )

        # save images
        cv2.imwrite(save_file, color_image)
        log.info(f"Saved annotated image to: {save_file}")
        return save_file

    def get_3d_point_from_pixel(self, x: int, y: int) -> Optional[np.ndarray]:
        """从像素坐标获取3D点坐标（相机坐标系）"""
        # 获取对齐的帧
        if x is None or y is None:
            return None

        frames = self.pipeline.wait_for_frames(self.wait_timeout)
        aligned_frames = self.align.process(frames)

        # 获取深度帧和彩色帧
        depth_frame = aligned_frames.get_depth_frame()
        color_frame = aligned_frames.get_color_frame()

        if not depth_frame or not color_frame:
            print("无法获取帧")
            return None

        # 获取深度图像
        # depth_image = np.asanyarray(depth_frame.get_data())

        # 获取点击位置的深度值
        depth_value = depth_frame.get_distance(x, y)
        if depth_value == 0:
            print("无法获取有效深度值")
            return None

        # 获取相机内参
        intrinsics = depth_frame.profile.as_video_stream_profile().get_intrinsics()

        # 反投影到3D空间
        point_3d = rs.rs2_deproject_pixel_to_point(intrinsics, [x, y], depth_value)
        return np.array(point_3d)


if __name__ == "__main__":
    self = Camera()
    current_view = self.capture_current_frame()
