import pyrealsense2 as rs
import numpy as np
import cv2

import torch
from graspnetAPI import GraspGroup
from models.graspnet import GraspNet, pred_decode


ckpt_path = "/home/lin/code/graspnet-baseline/ckpt/checkpoint-rs.tar"



def get_net(ckpt_path):
    # Init the model
    net = GraspNet(input_feature_dim=0, num_view=300, num_angle=12, num_depth=4,
            cylinder_radius=0.05, hmin=-0.02, hmax_list=[0.01,0.02,0.03,0.04], is_training=False)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    net.to(device)
    # Load checkpoint
    checkpoint = torch.load(ckpt_path)
    net.load_state_dict(checkpoint['model_state_dict'])
    start_epoch = checkpoint['epoch']
    print("-> loaded checkpoint %s (epoch: %d)"%(ckpt_path, start_epoch))
    # set model to eval mode
    net.eval()
    return net


def rs_grasper():
    # 配置深度和颜色流
    pipeline = rs.pipeline()
    config = rs.config()

    # 配置流
    config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

    # 开始流
    profile = pipeline.start(config)
    device = profile.get_device()
    device.hardware_reset()
    align = rs.align(rs.stream.color)  # Create align object for depth-color alignment

    try:
        while True:
            # 等待一组新的帧数据
            frames = pipeline.wait_for_frames()
            aligned_frames = align.process(frames)
            if not aligned_frames:
                continue  # If alignment fails, go back to the beginning of the loop

            depth_frame = frames.get_depth_frame()
            color_frame = frames.get_color_frame()


            if not depth_frame or not color_frame:
                continue

            # 将图像转换为numpy数组
            depth_image = np.asanyarray(depth_frame.get_data())
            color_image = np.asanyarray(color_frame.get_data())
            print(depth_image.shape, color_image.shape)
            # 应用颜色映射以更好地可视化深度图像
            depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)

            # 显示图像
            images = np.hstack((color_image, depth_colormap))
            cv2.imshow('RealSense', images)

            # 按键退出
            if cv2.waitKey(30) & 0xFF == ord('q'):
                break
    finally:
        # 停止流
        pipeline.stop()
        cv2.destroyAllWindows()

from models.graspnet import GraspNet, pred_decode
from graspnet_utils import ModelFreeCollisionDetector
from graspnet_utils import CameraInfo, create_point_cloud_from_depth_image

net = get_net(ckpt_path)
rs_grasper()