import torch
import numpy as np
import cv2
import open3d as o3d
from graspnetAPI import GraspGroup

from pathlib import Path
import sys
PATH_GRASPNET_BASELINE = Path("/root/Coding/graspnet-baseline")
sys.path.append(str(PATH_GRASPNET_BASELINE / "models"))
sys.path.append(str(PATH_GRASPNET_BASELINE / "dataset"))
sys.path.append(str(PATH_GRASPNET_BASELINE / "utils"))

from graspnet import GraspNet, pred_decode
from data_utils import CameraInfo, create_point_cloud_from_depth_image
from collision_detector import ModelFreeCollisionDetector

# === Ultralytics YOLO ===
try:
    from ultralytics import YOLO
except Exception as e:
    raise ImportError("请先安装 ultralytics：pip install -U ultralytics") from e

class GraspNetYoloProcessor:
    def __init__(
        self,
        checkpoint_path="/root/Coding/graspnet-baseline/logs/checkpoint-rs.tar",
        num_view=300,
        num_point=20000,
        collision_thresh=0.01,
        voxel_size=0.01,
        yolo_weight_name="/ws_618/src/manipulation/scripts/detection/yolo/models/618_surface_detect_m.pt",   # 若是 best.pt 就改成 "best.pt"
        yolo_conf=0.30,         # YOLO 推理的最小置信度
        conf_keep=0.80,         # 最终保留的框需 ≥ 这个阈值（你要的0.9）
        # —— 框筛选参数（避免整幅/贴边/极小框）——
        min_area_ratio=0.001,   # <0.1% 视为噪声
        max_area_ratio=0.35,    # >35% 视为“整幅”误检
        border_tol=6,           # 距四边 ≤6px 的框剔除
        # —— 放大参数 —— 
        enlarge_ratio=1.5      
    ):
        self.num_point = num_point
        self.collision_thresh = collision_thresh
        self.voxel_size = voxel_size

        self.yolo_conf = yolo_conf
        self.conf_keep = conf_keep
        self.min_area_ratio = min_area_ratio
        self.max_area_ratio = max_area_ratio
        self.border_tol = border_tol
        self.enlarge_ratio = enlarge_ratio

        # ----- GraspNet -----
        self.net = GraspNet(
            input_feature_dim=0,
            num_view=num_view,
            num_angle=12,
            num_depth=4,
            cylinder_radius=0.05,
            hmin=-0.02,
            hmax_list=[0.01, 0.02, 0.03, 0.04],
            is_training=False
        )
        self.device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
        self.net.to(self.device)
        ckpt = torch.load(checkpoint_path, map_location=self.device)
        self.net.load_state_dict(ckpt['model_state_dict'])
        print(f"[INFO] Loaded GraspNet checkpoint {checkpoint_path} (epoch: {ckpt.get('epoch', 'N/A')})")
        self.net.eval()

        # ----- YOLO -----
        try:
            base_dir = Path(__file__).resolve().parent
        except NameError:
            base_dir = Path.cwd()
        yolo_w = base_dir / yolo_weight_name
        if not yolo_w.exists():
            raise FileNotFoundError(f"未找到 YOLO 权重：{yolo_w}。请放到脚本同目录或传绝对路径。")
        self.detector = YOLO(str(yolo_w))
        print(f"[INFO] Loaded YOLO weights from: {yolo_w}")

    # YOLO 检测（返回 xyxy、置信度）
    def _detect_boxes(self, color_bgr_uint8, imgsz_w):
        results = self.detector.predict(
            source=color_bgr_uint8,
            imgsz=imgsz_w,
            conf=self.yolo_conf,
            device=0 if self.device.startswith('cuda') else 'cpu',
            verbose=False
        )
        if not results:
            self.render_img = color_bgr_uint8
            return np.zeros((0, 4), dtype=int), np.zeros((0,), dtype=float)
        r0 = results[0]
        if (r0.boxes is None) or (len(r0.boxes) == 0):
            return np.zeros((0, 4), dtype=int), np.zeros((0,), dtype=float)
        boxes = r0.boxes.xyxy.detach().cpu().numpy().astype(int)
        confs = r0.boxes.conf.detach().cpu().numpy().astype(float)
        self.render_img = r0.plot()
        return boxes, confs

    def _filter_multi(self, boxes, confs, W, H):
        img_area = W * H
        kept_boxes, kept_confs = [], []
        for (x1, y1, x2, y2), cf in zip(boxes, confs):
            if cf < self.conf_keep:
                continue
            x1 = max(0, min(W - 1, int(x1)))
            y1 = max(0, min(H - 1, int(y1)))
            x2 = max(0, min(W - 1, int(x2)))
            y2 = max(0, min(H - 1, int(y2)))
            if x2 <= x1 or y2 <= y1:
                continue
            area = (x2 - x1) * (y2 - y1)
            if area < self.min_area_ratio * img_area:
                continue
            if area > self.max_area_ratio * img_area:
                continue
            if (x1 <= self.border_tol or y1 <= self.border_tol or
                (W - 1 - x2) <= self.border_tol or (H - 1 - y2) <= self.border_tol):
                continue
            kept_boxes.append((x1, y1, x2, y2))
            kept_confs.append(cf)
        return kept_boxes, kept_confs


    @staticmethod
    def _enlarge_box_centered(box, W, H, ratio=1.5):
        x1, y1, x2, y2 = box
        w = max(1, x2 - x1)
        h = max(1, y2 - y1)
        cx = (x1 + x2) / 2.0
        cy = (y1 + y2) / 2.0

        nw = w * float(ratio)
        nh = h * float(ratio)

        nx1 = int(round(cx - nw / 2.0))
        ny1 = int(round(cy - nh / 2.0))
        nx2 = int(round(cx + nw / 2.0))
        ny2 = int(round(cy + nh / 2.0))

        nx1 = max(0, min(W - 1, nx1))
        ny1 = max(0, min(H - 1, ny1))
        nx2 = max(0, min(W - 1, nx2))
        ny2 = max(0, min(H - 1, ny2))

        if nx2 <= nx1:
            nx2 = min(W - 1, nx1 + 1)
        if ny2 <= ny1:
            ny2 = min(H - 1, ny1 + 1)
        return (nx1, ny1, nx2, ny2)

    def process(
        self,
        color, depth, intrinsic,
        camera_size=(640, 480),
        show=False
    ):
        """
        Args:
            color: np.ndarray, (H, W, 3), uint8 或 float32
            depth: np.ndarray, (H, W), uint16, 毫米
            intrinsic: np.ndarray, (3, 3), float32
            camera_size: tuple, (W, H)
            show: bool, 是否显示
        """

        print("in process")

        if color.dtype == np.uint8:
            color_norm = color.astype(np.float32) / 255.0
            color_bgr = color.copy()
        else:
            color_norm = (np.clip(color, 0, 1.0)).astype(np.float32)
            color_bgr = (color_norm * 255.0).astype(np.uint8)

        cam_w, cam_h = camera_size
        camera = CameraInfo(cam_w, cam_h, intrinsic[0][0], intrinsic[1][1], intrinsic[0][2], intrinsic[1][2], scale=1000.0)
        cloud = create_point_cloud_from_depth_image(depth, camera, organized=True)  # (H, W, 3)

        # 尺寸对齐
        H, W = depth.shape
        if color_bgr.shape[:2] != (H, W):
            color_bgr = cv2.resize(color_bgr, (W, H), interpolation=cv2.INTER_LINEAR)
            color_norm = cv2.resize((color_norm * 255).astype(np.uint8), (W, H), interpolation=cv2.INTER_LINEAR).astype(np.float32) / 255.0

        # boxes_xyxy, confs = self._detect_boxes(color_bgr, imgsz_w=W)
        # boxes_kept = self._filter_multi(boxes_xyxy, confs, W, H)
        boxes_xyxy, confs = self._detect_boxes(color_bgr, imgsz_w=W)
        boxes_kept, confs_kept = self._filter_multi(boxes_xyxy, confs, W, H)


        if len(boxes_kept) > 0:
            top_idx = int(np.argmax(confs_kept))
            boxes_kept = [boxes_kept[top_idx]]
            confs_kept = [confs_kept[top_idx]]
        else:
            boxes_kept = []
            confs_kept = []

        enlarged_boxes = []
        for b in boxes_kept:
            enlarged_boxes.append(self._enlarge_box_centered(b, W, H, ratio=self.enlarge_ratio))

        # === workspace_mask：多个框的并集 ===
        workspace_mask = np.zeros((H, W), dtype=np.uint8)
        for (x1, y1, x2, y2) in enlarged_boxes:
            workspace_mask[y1:y2, x1:x2] = 1

        # if show:
        #     vis = color_bgr.copy()
        #     for (x1, y1, x2, y2) in enlarged_boxes:
        #         cv2.rectangle(vis, (x1, y1), (x2, y2), (0, 255, 0), 2)

        #     win = "workspace_mask"
        #     cv2.namedWindow(win, cv2.WINDOW_NORMAL)
        #     cv2.resizeWindow(win, W, H)
        #     cv2.imshow(win, vis)
        #     cv2.waitKey(1000)

        if show:
            # vis = color_bgr.copy()
            # for (x1, y1, x2, y2) in enlarged_boxes:
            #     cv2.rectangle(vis, (x1, y1), (x2, y2), (0, 255, 0), 2)

            win = "workspace_mask"
            cv2.namedWindow(win, cv2.WINDOW_NORMAL)
            cv2.resizeWindow(win, W, H)
            cv2.imshow(win, self.render_img)

            # ✅ 非阻塞显示：给一点时间让窗口刷新，然后自动关闭
            cv2.waitKey(0)           # 30ms 足够驱动窗口刷新事件
            cv2.destroyWindow(win)    # 关闭这个窗口（或用 destroyAllWindows）


        # === 利用 mask 选点并下采样 ===
        mask = (workspace_mask.astype(bool) & (depth > 0))
        cloud_masked = cloud[mask]
        color_masked = color_norm[mask]

        if len(cloud_masked) >= self.num_point:
            idxs = np.random.choice(len(cloud_masked), self.num_point, replace=False)
        else:
            idxs1 = np.arange(len(cloud_masked))
            need = max(0, self.num_point - len(cloud_masked))
            if len(cloud_masked) == 0 and need > 0:
                cloud_masked = np.zeros((1, 3), dtype=np.float32)
                color_masked = np.zeros((1, 3), dtype=np.float32)
                idxs1 = np.arange(len(cloud_masked))
                need = self.num_point - len(cloud_masked)
            idxs2 = np.random.choice(len(cloud_masked), need, replace=True)
            idxs = np.concatenate([idxs1, idxs2], axis=0)
        cloud_masked = cloud_masked[idxs]
        color_masked = color_masked[idxs]

        # Open3D & Tensor
        cloud_o3d = o3d.geometry.PointCloud()
        cloud_o3d.points = o3d.utility.Vector3dVector(cloud_masked.astype(np.float32))
        cloud_o3d.colors = o3d.utility.Vector3dVector(color_masked.astype(np.float32))

        cloud_sampled = torch.from_numpy(cloud_masked[np.newaxis].astype(np.float32)).to(self.device)
        end_points = {'point_clouds': cloud_sampled, 'cloud_colors': color_masked}

        # GraspNet 前向
        with torch.no_grad():
            end_points = self.net(end_points)
            grasp_preds = pred_decode(end_points)
        gg_array = grasp_preds[0].detach().cpu().numpy()
        gg = GraspGroup(gg_array)

        # 碰撞检测
        if self.collision_thresh > 0:
            mfc_detector = ModelFreeCollisionDetector(np.asarray(cloud_o3d.points), voxel_size=self.voxel_size)
            collision_mask = mfc_detector.detect(gg, approach_dist=0.05, collision_thresh=self.collision_thresh)
            gg = gg[~collision_mask]

        gg.nms()
        gg.sort_by_score()

        # 夹爪限制
        W_MIN, W_MAX = 0.06, 0.088
        mask = (gg.widths >= W_MIN) & (gg.widths <= W_MAX)
        gg = gg[mask]

        # print("-"*50)
        # print(gg[0])

        # 3D 可视
        # if show:
        #     gg_vis = gg[:5]
        #     grippers = gg_vis.to_open3d_geometry_list()
        #     o3d.visualization.draw_geometries([cloud_o3d, *grippers])

        # return gg, cloud_o3d

        # 3D 可视化（Open3D）
        # if show:
        #     import time
        #     gg_vis = gg[:10]
        #     grippers = gg_vis.to_open3d_geometry_list()

        #     vis3d = o3d.visualization.Visualizer()
        #     vis3d.create_window(window_name="GraspNet Grasps", width=960, height=720, visible=True)
        #     vis3d.add_geometry(cloud_o3d)
        #     for g in grippers:
        #         vis3d.add_geometry(g)

        #     # ✅ 非阻塞循环固定时长（例如 2 秒），之后自动关闭窗口
        #     t_end = time.time() + 2.0
        #     while time.time() < t_end:
        #         vis3d.poll_events()
        #         vis3d.update_renderer()
        #         time.sleep(0.016)  # ~60 FPS

        #     vis3d.destroy_window()
        # 3D 可视化（Open3D）


        # if show:
        #     import time
        #     try:
        #         gg_vis = gg[:5]
        #         grippers = gg_vis.to_open3d_geometry_list()

        #         vis3d = o3d.visualization.Visualizer()
        #         vis3d.create_window(window_name="GraspNet Grasps", width=960, height=720, visible=True)
        #         vis3d.add_geometry(cloud_o3d)
        #         for g in grippers:
        #             vis3d.add_geometry(g)

        #         t_end = time.time() + 2
        #         while time.time() < t_end:
        #             vis3d.poll_events()
        #             vis3d.update_renderer()
        #             time.sleep(0.016)
        #             cv2.waitKey(0)
        #         vis3d.destroy_window()
        #     except Exception as e:
        #         print(f"[WARN] Open3D visualize failed: {e}")

        # return gg, cloud_o3d

        # 3D 可视化（Open3D + OpenCV）
        if show:
            import time
            try:
                gg_vis = gg[:5]
                grippers = gg_vis.to_open3d_geometry_list()

                # --- Open3D 窗口 ---
                vis3d = o3d.visualization.Visualizer()
                vis3d.create_window(window_name="GraspNet Grasps", width=960, height=720, visible=True)
                vis3d.add_geometry(cloud_o3d)
                for g in grippers:
                    vis3d.add_geometry(g)

                # --- OpenCV 窗口（显示检测框渲染图）---
                win = "workspace_mask"
                cv2.namedWindow(win, cv2.WINDOW_NORMAL)
                cv2.resizeWindow(win, W, H)
                cv2.imshow(win, self.render_img)

                while True:
                    vis3d.poll_events()
                    vis3d.update_renderer()

                    key = cv2.waitKey(10)
                    if key != -1:
                        cv2.destroyWindow()
                        break

                    try:
                        if hasattr(vis3d, "get_window_visible"):
                            vis_visible = vis3d.get_window_visible()
                            if vis_visible is not None and not vis_visible:
                                break
                    except Exception:
                        pass

                    time.sleep(0.01)

                try:
                    cv2.destroyWindow(win)
                except:
                    pass
                vis3d.destroy_window()
            except Exception as e:
                print(f"[WARN] Open3D visualize failed: {e}")
        
        return gg, cloud_o3d
        


if __name__ == '__main__':
    data = np.load("/root/Coding/graspnet-baseline/mytest/files/20251003_140157/20251003_140248_204.npz")
    print(data.keys())
    rgb = data['rgb']   # uint8
    depth = data['depth']  # uint16 (毫米)
    intrinsic = np.array([
        [607.8534 ,   0.     , 327.83737],
        [  0.     , 607.93445, 252.96724],
        [  0.     ,   0.     ,   1.     ]
    ], np.float32)

    graspnet_processor = GraspNetYoloProcessor(
        yolo_weight_name="/ws_618/src/manipulation/scripts/detection/yolo/models/618_surface_detect_m.pt",
        yolo_conf=0.30,      
        conf_keep=0.80,      
        max_area_ratio=0.35,
        min_area_ratio=0.001,
        border_tol=6,
        enlarge_ratio=1.3
    )
    gg, cloud = graspnet_processor.process(rgb, depth, intrinsic, show=True)
    print(gg[0] if len(gg) > 0 else "No grasp found")
    
    try:
        cv2.destroyAllWindows()
    except:
        pass

    import sys
    sys.exit(0)

