import cv2
import numpy as np
import pyrealsense2 as rs
from ultralytics import YOLO
import os
from time import time, sleep
from datetime import datetime
from flask import Flask, request, jsonify

app = Flask(__name__)


class CameraService:
    def __init__(self):
        # 创建图片保存目录
        self.image_save_dir = "captured_images"
        os.makedirs(self.image_save_dir, exist_ok=True)
        # 初始化相机和YOLO模型
        # 初始化管道
        self.pipeline = rs.pipeline()
        # 初始化相机配置
        self.config = rs.config()
        # 设置彩色图像和深度图像的分辨率等参数
        self.config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 30)
        self.config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
        # 启动管道
        self.profile = self.pipeline.start(self.config)

        # 获取深度传感器
        depth_sensor = self.profile.get_device().first_depth_sensor()
        # 获取深度比例参数
        self.depth_scale = depth_sensor.get_depth_scale()
        print(f"深度比例系数: {self.depth_scale}")

        # 获取相机内参
        color_profile = self.profile.get_stream(rs.stream.color)
        color_intr = color_profile.as_video_stream_profile().get_intrinsics()
        self.color_intr = {
            "fx": color_intr.fx,
            "fy": color_intr.fy,
            "ppx": color_intr.ppx,
            "ppy": color_intr.ppy
        }
        print("相机内参:", self.color_intr)

        # 相机到机械臂末端的旋转矩阵
        self.R_cam2gripper = np.array(
            [[0, -1, 0],
             [1, 0, 0],
             [0, 0, 1]]
        )
        # 相机到机械臂末端的平移向量
        self.T_cam2gripper = np.array([0.092, -0.017, -0.121])

        # 加载YOLO模型
        self.model = YOLO("/home/rm/Orsen/New_Retail/best.pt")
        self._first_predict_done = False  # 首次预测标志
        print("系统初始化完成")

    def get_frames(self):
        """获取对齐后的彩色和深度帧"""
        # 通过管道获取图像
        frames = self.pipeline.wait_for_frames()
        # 对齐彩色图与深度图
        align = rs.align(rs.stream.color)
        aligned_frames = align.process(frames)
        # 获取彩色图和深度图
        color_frame = aligned_frames.get_color_frame()
        depth_frame = aligned_frames.get_depth_frame()
        # 如果彩色图和深度图有一个为空，raise报错
        if not color_frame or not depth_frame:
            raise RuntimeError("无法获取相机帧")
        # 反之返回彩色图和深度图
        return (
            # 将图像帧转换成数组格式
            np.asanyarray(color_frame.get_data()),
            np.asanyarray(depth_frame.get_data())
        )

    def save_images(self, color_image, box, target_name):
        """保存原始图片和标记后的图片"""
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        filename_base = f"{timestamp}_{target_name}"

        # 保存原始图片
        original_path = os.path.join(self.image_save_dir, f"{filename_base}_original.jpg")
        cv2.imwrite(original_path, cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR))

        # 如果有检测框，保存标记后的图片
        if box is not None:
            marked_img = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
            cv2.rectangle(marked_img,
                          (int(box[0]), int(box[1])),
                          (int(box[2]), int(box[3])),
                          (0, 255, 0), 2)
            marked_path = os.path.join(self.image_save_dir, f"{filename_base}_marked.jpg")
            cv2.imwrite(marked_path, marked_img)

    def predict(self, color_image, depth_image, target_name, stream_mode=False, timeout=60):
        """
        改进的物体检测方法
        返回: (center, box, depth_image) 或 (None, None, None)
        """
        # 记录预测时间
        start_time = time()

        while True:
            # 视频流模式处理
            if stream_mode:
                try:
                    # 获取彩色图和深度图
                    color_image, depth_image = self.get_frames()
                    # 将RGB通道转换到BGR通道
                    color_image_bgr = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
                except Exception as e:
                    print(f"帧获取失败: {str(e)}")
                    return None, None, None
            else:
                color_image_bgr = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)

            # 模型预热
            if not self._first_predict_done:
                _ = self.model.predict(color_image_bgr, conf=0.5, imgsz=640, verbose=False)
                self._first_predict_done = True

            # 执行预测
            results = self.model.predict(color_image_bgr, conf=0.5, imgsz=640, verbose=False)

            # 结果解析
            # 检测框解析
            boxes = results[0].boxes
            # 如果检测框为空或者检测框列表长度为0
            if boxes is None or len(boxes) == 0:
                # 如果没有视频流或超时
                end_time = time()
                if not stream_mode or (end_time - start_time) > timeout:
                    return None, None, None
                sleep(0.1)
                continue

            # 匹配目标物体
            labels = [self.model.names[int(cls)] for cls in boxes.cls.cpu().numpy()]
            matched_indices = [i for i, lbl in enumerate(labels) if lbl == target_name]

            # 如果没有匹配成功
            if not matched_indices:
                if not stream_mode or (time() - start_time) > timeout:
                    return None, None, None
                sleep(0.1)
                continue

            # 选择最近的物体
            # 检测框的四个角点（左上，右上，右下，左下）
            xyxy = boxes.xyxy.cpu().numpy()
            # 置信度提取
            confidences = boxes.conf.cpu().numpy()
            candidates = []

            # 遍历匹配到的检测框
            for idx in matched_indices:
                # 获取多个检测框
                box = xyxy[idx]
                # 计算检测框中心点像素值坐标
                center = ((box[0] + box[2]) / 2, (box[1] + box[3]) / 2)
                # 计算深度
                depth = depth_image[int(center[1]), int(center[0])]

                if depth > 0:  # 有效深度
                    # 存放列表
                    candidates.append({
                        "center": center,
                        "box": box,
                        "depth": depth,
                        "confidence": confidences[idx]
                    })
            # 如果列表不为空有值
            if candidates:
                candidates.sort(key=lambda x: x["depth"])  # 按深度排序
                # 深度值最近的在第一个
                best = candidates[0]
                # 打印结果
                print(
                    f"检测到 {target_name}, 深度: {best['depth'] * self.depth_scale:.3f}m, 置信度: {best['confidence']:.2f}")
                # 返回中心点，检测框，深度图像
                return best["center"], best["box"], depth_image

            if not stream_mode or (time() - start_time) > timeout:
                return None, None, None
            sleep(0.1)

    def robust_depth_sampling(self, depth_img, center_x, center_y):
        """
        鲁棒深度采样核心算法
        返回: 有效深度值(mm) 或 None
        """
        # 十字形采样
        y, x = int(center_y), int(center_x)
        h_samples = depth_img[y, max(x - 3, 0):x + 4]  # 水平采样
        v_samples = depth_img[max(y - 3, 0):y + 4, x]  # 垂直采样
        samples = np.concatenate([h_samples, v_samples])

        # 动态范围约束
        rough_depth = depth_img[y, x]
        valid_range = (
            max(100, rough_depth - 200),  # 下限 (mm)
            min(5000, rough_depth + 200)  # 上限 (mm)
        )

        # 过滤有效深度
        valid_depths = samples[
            (samples > valid_range[0]) &
            (samples < valid_range[1])
            ]

        return np.percentile(valid_depths, 10) if len(valid_depths) > 0 else None

    def dynamic_diameter_estimation(self, box, z_c):
        """基于检测框动态计算瓶径(mm)"""
        # 计算检测框宽度
        pixel_width = box[2] - box[0]
        estimated_mm = (pixel_width * z_c * 1000) / self.color_intr["fx"]
        return np.clip(estimated_mm, 30, 100)  # 限制在合理范围

    def compute_gripper_pose(self, center, depth_image, box):
        """
        完整的夹爪位姿计算流程
        返回: [x,y,z] 或 None
        """
        # 如果没有返回值或但凡有一个为空
        if None in (center, depth_image, box):
            return None

        # 1. 鲁棒深度采样
        depth_value = self.robust_depth_sampling(
            depth_image, center[0], center[1])
        if depth_value is None:
            print("深度采样失败: 无有效深度值")
            return None
        z_c = depth_value * self.depth_scale  # 转为米单位
        print(f"有效深度: {depth_value}mm -> {z_c:.3f}m")

        # 2. 动态瓶径计算
        diameter_mm = self.dynamic_diameter_estimation(box, z_c)
        radius_m = diameter_mm / 2000  # 转为半径(米)
        print(f"动态瓶径: {diameter_mm:.1f}mm")

        # 3. 圆柱补偿
        fx, ppx = self.color_intr["fx"], self.color_intr["ppx"]
        theta = np.arctan((center[0] - ppx) / fx)
        z_corrected = z_c - radius_m * np.cos(theta)
        print(f"深度补偿: {z_c:.3f}m -> {z_corrected:.3f}m")

        # 4. 坐标系转换
        fy, ppy = self.color_intr["fy"], self.color_intr["ppy"]
        x_c = (center[0] - ppx) * z_corrected / fx
        y_c = (center[1] - ppy) * z_corrected / fy
        p_gripper = self.R_cam2gripper @ np.array([x_c, y_c, z_corrected]) + self.T_cam2gripper
        # 返回前打印详细诊断信息
        print(f"最终位姿: X={p_gripper[0]:.3f}, Y={p_gripper[1]:.3f}, Z={p_gripper[2]:.3f}m")
        print(f"=== 位姿诊断 ===")
        print(f"深度采样值: {depth_value}mm")
        print(f"估算直径: {diameter_mm:.1f}mm")
        print(f"补偿角度: {np.degrees(theta):.1f}°")
        print(f"原始Z: {z_c:.3f}m → 补偿后: {z_corrected:.3f}m")

        return p_gripper.tolist()

    def detect(self, target_name, stream_mode=False):
        """
        检测目标物体并计算机械夹爪位姿
        :param target_name: 要检测的目标物体名称
        :param stream_mode: 是否启用视频流模式
        :return: 夹爪坐标系下的位置 [X, Y, Z]，失败返回None
        """
        try:
            # 获取初始帧
            color_image, depth_image = self.get_frames()
            # 调用预测模型（启用流模式）
            center, box, depth_image = self.predict(color_image, depth_image, target_name, stream_mode=True)
            # 保存图片
            self.save_images(color_image, box, target_name)
            if center is not None:
                gripper_pose = self.compute_gripper_pose(center, depth_image, box)
                if gripper_pose is not None:
                    print(
                        f"夹爪坐标系位置: X={gripper_pose[0]:.3f}, Y={gripper_pose[1]:.3f}, Z={gripper_pose[2]:.3f} 米")
                    return gripper_pose
                else:
                    print("警告：计算出的夹爪位姿无效（可能因深度值异常）")
                    return None
            else:
                print(f"警告：未检测到目标物体 '{target_name}' 或检测失败")
                return None
        except Exception as e:
            print(f"检测过程中发生异常: {str(e)}")
            return None


camera = CameraService()


@app.route('/detect', methods=['GET'])
def detect():
    target_name = request.args.get("target_name")
    print(f" /detect   ----  target_name = {target_name}")
    if not target_name:
        return jsonify({"error": "没有找到target_name"}), 400
    try:
        gripper_pose = camera.detect(target_name)
        if gripper_pose is not None:
            return jsonify({
                "status": "success",
                "gripper_pose": {
                    "x": gripper_pose[0],
                    "y": gripper_pose[1],
                    "z": gripper_pose[2]
                }
            })
        else:
            return jsonify({"status": "failure", "message": "获取失败"}), 200
    except Exception as e:
        return jsonify({"error": str(e)}), 500


if __name__ == '__main__':
    app.run(host='0.0.0.0', port=6687, debug=False)
