from pyorbbecsdk import *
import cv2
import numpy as np
from utils import frame_to_bgr_image
import sys
import time
import copy
import argparse
from PIL import Image

sys.path.append("yolox")
# print(sys.path)
from yolo import YOLO

ESC_KEY = 27
RATE_INTERVAL = 30  # 帧
MIN_DEPTH = 20  # 20mm
MAX_DEPTH = 10000  # 10000mm


class TemporalFilter:
    def __init__(self, alpha):
        self.alpha = alpha
        self.previous_frame = None

    def process(self, frame):
        if self.previous_frame is None:
            result = frame
        else:
            result = cv2.addWeighted(
                frame, self.alpha, self.previous_frame, 1 - self.alpha, 0
            )
        self.previous_frame = result
        return result


def main(argv):
    pipeline = Pipeline()
    device = pipeline.get_device()
    device_info = device.get_device_info()
    device_pid = device_info.get_pid()
    config = Config()
    temporal_filter = TemporalFilter(alpha=0.5)
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "-m",
        "--mode",
        help="align mode, HW=hardware mode,SW=software mode,NONE=disable align",
        type=str,
        default="HW",
    )
    parser.add_argument(
        "-s", "--enable_sync", help="enable sync", type=bool, default=True
    )
    args = parser.parse_args()
    align_mode = args.mode
    enable_sync = args.enable_sync
    try:
        profile_list = pipeline.get_stream_profile_list(OBSensorType.COLOR_SENSOR)
        color_profile = profile_list.get_default_video_stream_profile()
        config.enable_stream(color_profile)
        profile_list = pipeline.get_stream_profile_list(OBSensorType.DEPTH_SENSOR)
        assert profile_list is not None
        depth_profile = profile_list.get_default_video_stream_profile()
        assert depth_profile is not None
        print(
            "color profile : {}x{}@{}_{}".format(
                color_profile.get_width(),
                color_profile.get_height(),
                color_profile.get_fps(),
                color_profile.get_format(),
            )
        )
        print(
            "depth profile : {}x{}@{}_{}".format(
                depth_profile.get_width(),
                depth_profile.get_height(),
                depth_profile.get_fps(),
                depth_profile.get_format(),
            )
        )
        config.enable_stream(depth_profile)
    except Exception as e:
        print(e)
        return
    if align_mode == "HW":
        if device_pid == 0x066B:
            # Femto Mega does not support hardware D2C, and it is changed to software D2C
            config.set_align_mode(OBAlignMode.SW_MODE)
        else:
            config.set_align_mode(OBAlignMode.HW_MODE)
    elif align_mode == "SW":
        config.set_align_mode(OBAlignMode.SW_MODE)
    else:
        config.set_align_mode(OBAlignMode.DISABLE)
    if enable_sync:
        try:
            pipeline.enable_frame_sync()
        except Exception as e:
            print(e)
    try:
        pipeline.start(config)
    except Exception as e:
        print(e)
        return
    yolo = YOLO()
    rec_frames = 0
    last_time = time.time()
    # =============================================================
    # 帅：获取RGB相机参数
    camera_param: OBCameraParam = pipeline.get_camera_param()
    fx = camera_param.rgb_intrinsic.fx
    fy = camera_param.rgb_intrinsic.fy
    cx = camera_param.rgb_intrinsic.cx
    cy = camera_param.rgb_intrinsic.cy
    # fx = 303.155313
    # fy = 306.224939
    # cx = 311.192343
    # cy = 200.859069
    print(f"camera's parameter fx:{fx}, fy:{fy}, cx:{cx}, cy:{cy}")
    # =============================================================

    while True:
        try:
            frames: FrameSet = pipeline.wait_for_frames(100)
            if frames is None:
                continue
            # 帅：获取彩色图像
            color_frame = frames.get_color_frame()
            if color_frame is None:
                continue
            color_image = frame_to_bgr_image(color_frame)
            if color_image is None:
                print("failed to convert frame to image")
                continue

            # 帅：获取深度图像
            depth_frame = frames.get_depth_frame()
            if depth_frame is None:
                continue
            width = depth_frame.get_width()
            height = depth_frame.get_height()
            scale = depth_frame.get_depth_scale()
            # 帅：获取深度数据，深度图就是由深度数据生成的
            depth_data = np.frombuffer(depth_frame.get_data(), dtype=np.uint16)
            depth_data = depth_data.reshape((height, width))
            depth_data = depth_data.astype(np.float32) * scale
            # 帅：下面三行是对获取到的深度数据做滤波处理
            depth_data = np.where(
                (depth_data > MIN_DEPTH) & (depth_data < MAX_DEPTH), depth_data, 0
            )
            depth_data = depth_data.astype(np.uint16)
            depth_data = temporal_filter.process(depth_data)
            # 帅：进入yolo检测
            frame = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)
            frame = Image.fromarray(np.uint8(frame))
            frame, image_boxes = yolo.detect_image(frame)
            frame = np.array(frame)
            color_image = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
            # 帅：通过公式获取每一个像素点相对于相机坐标系的坐标值，参考博客：https://blog.csdn.net/weixin_46229691/article/details/121223128
            row, column, _ = color_image.shape
            u, v = np.meshgrid(np.arange(1, column + 1), np.arange(1, row + 1))
            im_Z = copy.deepcopy(depth_data)
            im_X = im_Z * (u - cx) / fx
            im_Y = im_Z * (v - cy) / fy
            # 帅：对每一帧图片中检测出来的目标框分别进行处理
            for image_box in image_boxes:
                y = int((image_box[0] + image_box[2]) / 2)
                x = int((image_box[1] + image_box[3]) / 2)
                point_x = im_X[y, x]
                point_y = im_Y[y, x]
                point_z = im_Z[y, x]
                dis = (point_x**2 + point_y**2 + point_z**2) ** 0.5
                text_cxy = "*"
                cv2.putText(
                    color_image,
                    text_cxy,
                    (int(x), int(y)),
                    cv2.FONT_ITALIC,
                    1.2,
                    (0, 0, 255),
                    3,
                )
                text_x = "x:%.1fmm" % (point_x)
                text_y = "y:%.1fmm" % (point_y)
                text_z = "z:%.1fmm" % (point_z)
                text_dis = "dis:%.1fmm" % (dis)
                # 帅：设置显示标签背景方框的大小
                rectangle_width = 180
                rectangle_height = 110
                cv2.rectangle(
                    color_image,
                    (
                        int(image_box[1] + (image_box[3] - image_box[1])),
                        int(image_box[0]),
                    ),
                    (
                        int(
                            image_box[1]
                            + (image_box[3] - image_box[1])
                            + rectangle_width
                        ),
                        int(image_box[0] + rectangle_height),
                    ),
                    image_box[6],
                    -1,
                )
                cv2.putText(
                    color_image,
                    text_x,
                    (
                        int(image_box[1] + (image_box[3] - image_box[1]) + 5),
                        int(image_box[0] + 25),
                    ),
                    cv2.FONT_ITALIC,
                    0.8,
                    (255, 255, 255),
                    2,
                )
                cv2.putText(
                    color_image,
                    text_y,
                    (
                        int(image_box[1] + (image_box[3] - image_box[1]) + 5),
                        int(image_box[0] + 50),
                    ),
                    cv2.FONT_ITALIC,
                    0.8,
                    (255, 255, 255),
                    2,
                )
                cv2.putText(
                    color_image,
                    text_z,
                    (
                        int(image_box[1] + (image_box[3] - image_box[1]) + 5),
                        int(image_box[0] + 75),
                    ),
                    cv2.FONT_ITALIC,
                    0.8,
                    (255, 255, 255),
                    2,
                )
                # 帅：这里Text的坐标值对应的是文字的左下角的坐标
                cv2.putText(
                    color_image,
                    text_dis,
                    (
                        int(image_box[1] + (image_box[3] - image_box[1]) + 5),
                        int(image_box[0] + 100),
                    ),
                    cv2.FONT_ITALIC,
                    0.8,
                    (255, 255, 255),
                    2,
                )

            rec_frames += 1
            if rec_frames > RATE_INTERVAL:
                current_time = time.time()
                print(f"camera rate: {rec_frames / (current_time - last_time)}")
                rec_frames = 0
                last_time = time.time()

            # 帅：将深度数据归一化并转成图片显示
            depth_image = cv2.normalize(
                depth_data, None, 0, 255, cv2.NORM_MINMAX, dtype=cv2.CV_8U
            )
            depth_image = cv2.applyColorMap(depth_image, cv2.COLORMAP_JET)
            # 帅：根据权重值将彩色图像叠加到深度图像上
            depth_image = cv2.addWeighted(color_image, 1, depth_image, 0, 0)

            cv2.imshow("SyncAlignViewer ", depth_image)
            key = cv2.waitKey(1)
            if key == ord("q") or key == ESC_KEY:
                break
        except KeyboardInterrupt:
            break
    pipeline.stop()


if __name__ == "__main__":
    main(sys.argv[1:])
