import base64
import cv2
import numpy as np
from time import *
from datetime import datetime
import pyrealsense2 as rs
from flask import request, Flask, jsonify
from ultralytics import YOLO
from segment_anything import SamPredictor, sam_model_registry
import torch
import os

app = Flask(__name__)

class CameraService:
    def __init__(self):
        # 创建图片保存目录
        self.image_save_dir = "captured_images"
        os.makedirs(self.image_save_dir, exist_ok=True)

        # 初始化 RealSense 相机
        self.pipeline = rs.pipeline()
        self.config = rs.config()
        self.config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 30)
        self.config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
        self.profile = self.pipeline.start(self.config)

        depth_sensor = self.profile.get_device().first_depth_sensor()
        self.depth_scale = depth_sensor.get_depth_scale()
        print(f"深度比例: {self.depth_scale}")

        self.color_profile = self.profile.get_stream(rs.stream.color)
        self.color_intr = self.color_profile.as_video_stream_profile().get_intrinsics()
        self.color_intr = {
            "ppx": self.color_intr.ppx,
            "ppy": self.color_intr.ppy,
            "fx": self.color_intr.fx,
            "fy": self.color_intr.fy
        }
        print("相机内参:", self.color_intr)

        self.R_cam2gripper = np.array([
            [0, -1, 0],
            [1, 0, 0],
            [0, 0, 1]
        ])
        self.T_cam2gripper = np.array([0.092, -0.017, -0.121])

        # 加载 YOLOv11 模型
        self.yolo_model = YOLO("/home/rm/Orsen/New_Retail/best.pt")

        # 加载 SAM 模型
        sam_checkpoint = "C:\\Users\HUAWEI\PycharmProjects\PythonProject\Pick_and_Put\sam_vit_h_4b8939.pth"
        model_type = "vit_h"
        device = "cuda" if torch.cuda.is_available() else "cpu"
        self.sam = sam_model_registry[model_type](checkpoint=sam_checkpoint).to(device)
        self.sam_predictor = SamPredictor(self.sam)
        print("初始化完成")

    def save_images(self, color_image, box, mask, target_name):
        """保存原始图片、检测框图片和掩码图片"""
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        filename_base = f"{timestamp}_{target_name}"

        # 保存原始图片
        original_path = os.path.join(self.image_save_dir, f"{filename_base}_original.jpg")
        cv2.imwrite(original_path, cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR))

        # 保存标记检测框的图片
        if box is not None:
            marked_img = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
            cv2.rectangle(marked_img,
                          (int(box[0]), int(box[1])),
                          (int(box[2]), int(box[3])),
                          (0, 255, 0), 2)
            marked_path = os.path.join(self.image_save_dir, f"{filename_base}_marked.jpg")
            cv2.imwrite(marked_path, marked_img)

        # 保存掩码图片
        if mask is not None:
            mask_img = color_image.copy()
            mask_img[mask > 0] = [255, 0, 0]  # 红色掩码
            mask_path = os.path.join(self.image_save_dir, f"{filename_base}_mask.jpg")
            cv2.imwrite(mask_path, cv2.cvtColor(mask_img, cv2.COLOR_RGB2BGR))

    def get_frames(self):
        frames = self.pipeline.wait_for_frames()
        align = rs.align(rs.stream.color)
        aligned_frames = align.process(frames)

        color_frame = aligned_frames.get_color_frame()
        depth_frame = aligned_frames.get_depth_frame()
        if not color_frame or not depth_frame:
            raise Exception("无法获取相机帧")

        color_image = np.asanyarray(color_frame.get_data())
        depth_image = np.asanyarray(depth_frame.get_data())
        print("图像成功获取")
        return color_image, depth_image

    def predict(self, color_image, depth_image, target_name, stream_mode=False, timeout=3):
        """预测目标物体，调用 SAM 分割并返回掩码中心"""
        start_time = time()
        while True:
            if stream_mode:
                try:
                    color_image, depth_image = self.get_frames()
                except Exception as e:
                    print(f"获取帧失败: {str(e)}")
                    return None, None, None

            # 转换颜色空间
            color_image_bgr = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)

            # YOLOv11 预测
            results = self.yolo_model.predict(color_image_bgr, conf=0.5, imgsz=640)
            print("YOLOv11 预测完成")

            boxes = results[0].boxes
            if boxes is None or len(boxes) == 0:
                print("没有检测到任何物体")
                if not stream_mode or (time() - start_time) > timeout:
                    return None, None, None
                sleep(0.1)
                continue

            labels = boxes.cls.cpu().numpy()
            confidences = boxes.conf.cpu().numpy()
            xyxy = boxes.xyxy.cpu().numpy()

            label_names = [self.yolo_model.names[int(cls)] for cls in labels]
            matched_indices = [i for i, label in enumerate(label_names) if label == target_name]
            if not matched_indices:
                print(f"没有检测到目标物体: {target_name}")
                if not stream_mode or (time() - start_time) > timeout:
                    return None, None, None
                sleep(0.1)
                continue

            # 存储候选物体
            candidates = []
            for idx in matched_indices:
                box = xyxy[idx]
                center_x = (box[0] + box[2]) / 2
                center_y = (box[1] + box[3]) / 2
                depth_value = depth_image[int(center_y), int(center_x)] * self.depth_scale
                if depth_value <= 0:
                    continue

                candidates.append({
                    "index": idx,
                    "center": (center_x, center_y),
                    "box": box,
                    "depth": depth_value,
                    "confidence": confidences[idx]
                })

            if not candidates:
                print("检测到的目标物体深度值无效")
                if not stream_mode or (time() - start_time) > timeout:
                    return None, None, None
                sleep(0.1)
                continue

            # 按深度排序，选择最近的物体
            candidates.sort(key=lambda x: x["depth"])
            best_candidate = candidates[0]
            print(f"选择最近的 {target_name}, 深度: {best_candidate['depth']:.3f}m, 置信度: {best_candidate['confidence']:.2f}")

            # 调用 SAM 进行分割
            self.sam_predictor.set_image(color_image)
            masks, scores, _ = self.sam_predictor.predict(
                box=np.array(best_candidate["box"]),
                multimask_output=True
            )
            mask = masks[np.argmax(scores)]  # 选择得分最高的掩码

            # 后处理掩码
            kernel = np.ones((5, 5), np.uint8)
            mask_dilated = cv2.dilate(mask.astype(np.uint8), kernel, iterations=1)
            num_labels, labels, stats, _ = cv2.connectedComponentsWithStats(mask_dilated)
            largest_label = 1 + np.argmax(stats[1:, cv2.CC_STAT_AREA])
            mask_cleaned = (labels == largest_label).astype(np.uint8)

            # 计算掩码中心
            coords = np.where(mask_cleaned)
            if len(coords[0]) > 0:
                center_x = int(np.mean(coords[1]))
                center_y = int(np.mean(coords[0]))
                print(f"掩码中心坐标: ({center_x}, {center_y})")
                return (center_x, center_y), best_candidate["box"], mask_cleaned
            else:
                print("掩码为空，无法计算中心")
                return None, None, None

    def compute_gripper_pose(self, center, depth_image):
        """基于掩码中心计算夹爪位姿"""
        if center is None:
            return None

        center_x, center_y = center
        print(f"掩码中心点坐标: ({center_x:.1f}, {center_y:.1f})")

        # 获取中心点周围区域的深度平均值
        y, x = int(center_y), int(center_x)
        depth_patch = depth_image[max(y - 2, 0):y + 3, max(x - 2, 0):x + 3]
        valid_depths = depth_patch[(depth_patch > 0) & (depth_patch < 10000)]
        if len(valid_depths) == 0:
            print("警告：掩码中心周围无有效深度值")
            return None

        depth_value = np.mean(valid_depths)
        Z_c = depth_value * self.depth_scale
        print(f"有效深度值: {depth_value} mm → 相机坐标系Z: {Z_c:.3f} m")

        # 相机坐标系计算
        fx = self.color_intr["fx"]
        fy = self.color_intr["fy"]
        ppx = self.color_intr["ppx"]
        ppy = self.color_intr["ppy"]

        X_c = (center_x - ppx) * Z_c / fx
        Y_c = (center_y - ppy) * Z_c / fy
        P_camera = np.array([X_c, Y_c, Z_c])
        print(f"相机坐标系位置: X={X_c:.3f}, Y={Y_c:.3f}, Z={Z_c:.3f} m")

        # 转换到夹爪坐标系
        P_gripper = self.R_cam2gripper @ P_camera + self.T_cam2gripper
        print(f"夹爪坐标系位置: X={P_gripper[0]:.3f}, Y={P_gripper[1]:.3f}, Z={P_gripper[2]:.3f} m")

        return P_gripper.tolist()

    def image_to_base64(self, image):
        _, buffer = cv2.imencode('.jpg', image)
        return base64.b64encode(buffer).decode('utf-8')

    def detect(self, target_name, stream_mode=False):
        """检测目标物体并计算夹爪位姿"""
        try:
            color_image, depth_image = self.get_frames()
            center, box, mask = self.predict(color_image, depth_image, target_name, stream_mode=True)

            # 保存图片
            self.save_images(color_image, box, mask, target_name)

            if center is not None:
                gripper_pose = self.compute_gripper_pose(center, depth_image)
                if gripper_pose is not None:
                    print(f"夹爪坐标系位置: X={gripper_pose[0]:.3f}, Y={gripper_pose[1]:.3f}, Z={gripper_pose[2]:.3f} 米")
                    return gripper_pose
                else:
                    print("警告：计算出的夹爪位姿无效")
                    return None
            else:
                print(f"警告：未检测到目标物体 '{target_name}' 或分割失败")
                return None

        except Exception as e:
            print(f"检测过程中发生异常: {str(e)}")
            return None

# 创建实例
camera_service = CameraService()

@app.route('/detect', methods=['GET'])
def detect():
    target_name = request.args.get("target_name")
    print(f" /detect   ----  target_name = {target_name}")
    if not target_name:
        return jsonify({"error": "没有找到target_name"}), 400
    try:
        gripper_pose = camera_service.detect(target_name)
        if gripper_pose is not None:
            return jsonify({
                "status": "success",
                "gripper_pose": {
                    "x": gripper_pose[0],
                    "y": gripper_pose[1],
                    "z": gripper_pose[2]
                }
            })
        else:
            return jsonify({"status": "failure", "message": "获取失败"}), 200

    except Exception as e:
        return jsonify({"error": str(e)}), 500

if __name__ == '__main__':
    app.run(host='0.0.0.0', port=6687, debug=False)