#!/home/agilex/miniconda3/envs/graspnet/bin/python
# -- coding: UTF-8
import pyrealsense2 as rs
import numpy as np
import cv2
import open3d as o3d
from cam_cnofig import image_size, intrinsics_matrix, CameraInfo, factor_depth
import torch
from models.graspnet import GraspNet, pred_decode
from graspnetAPI import GraspGroup
from graspnet_utils import ModelFreeCollisionDetector


import rospy
from sensor_msgs.msg import PointCloud2, PointField
import std_msgs
import struct
from visualization_msgs.msg import Marker
from geometry_msgs.msg import Point

import threading


from scipy.spatial.transform import Rotation

import tf
from geometry_msgs.msg import TransformStamped
from tf.transformations import quaternion_from_euler, quaternion_from_matrix, euler_from_matrix

import keyboard

np.set_printoptions(suppress=True)

def rad2deg(rad):
    return rad / np.pi * 180. 


T_cam_object = np.eye(4)
cloud_msg = PointCloud2()

def publish_cloud(msg_cloud):
    pub_cloud = rospy.Publisher('/point_cloud2', PointCloud2, queue_size=10)
    rate = rospy.Rate(10)
    while not rospy.is_shutdown():
        pub_cloud.publish(msg_cloud)
        rate.sleep()

def publish_tf(frame_id="cam_link", child_fram_id = "object_link", T=np.eye(4)):
    br = tf.TransformBroadcaster()
    translation = T[:3, 3]
    print(T)
    # 旋转（欧拉角转换为四元数）
    q = quaternion_from_matrix(T)
    
    # print("1111111111")
    print(euler_from_matrix(T, axes='rzyx'))
    print(translation)

    rate = rospy.Rate(5)
    # 发布转换
    while not rospy.is_shutdown():
        br.sendTransform(
            translation,
            q,
            rospy.Time.now(),
            frame_id,
            child_fram_id)
    rate.sleep()


def pub_cloud_marker(pub_cloud, pub_marker, cloud_msg, marker_msg, hz):
    rate = rospy.Rate(hz)
    while not rospy.is_shutdown():
        pub_cloud.publish(cloud_msg)
        pub_marker.publish(marker_msg)
        rate.sleep()


def create_arrow_marker(start_point, end_point):
    marker = Marker()
    marker.header.frame_id = "map"
    marker.header.stamp = rospy.Time.now()
    marker.ns = "arrow"
    marker.id = 0
    marker.type = Marker.ARROW
    marker.action = Marker.ADD

    # 设置箭头的起点和终点
    marker.points.append(start_point)
    marker.points.append(end_point)

    # 设置箭头的颜色
    marker.color.r = 1.0
    marker.color.g = 0.0
    marker.color.b = 0.0
    marker.color.a = 1.0

    # 设置箭头的比例
    marker.scale.x = 0.01  # 箭头的轴
    marker.scale.y = 0.05  # 箭头的头部宽度
    marker.scale.z = 0.05  # 箭头的头部高度

    return marker

def o3d_vis(object_list):
   o3d.visualization.draw_geometries(object_list)

# 初始化深度相机
pipeline = rs.pipeline()
config = rs.config()

# 创建一个坐标系
coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(
    size=0.5,  # 坐标系的大小
    origin=[0, 0, 0]  # 坐标系的原点
)


def create_point_cloud_from_depth_image(color, depth, camera, rgb_flag = False):
    assert(depth.shape[0] == camera.height and depth.shape[1] == camera.width)
    xmap = np.arange(camera.width)
    ymap = np.arange(camera.height)
    xmap, ymap = np.meshgrid(xmap, ymap)  # 创建图像网格
    points_z = depth / camera.scale       # 深度信息  使用了广播机制
   
    points_x = (xmap - camera.cx) * points_z / camera.fx  # X = (u - cx) * Z /fx
    points_y = (ymap - camera.cy) * points_z / camera.fy  # Y = (v - cx) * Y /fy
    cloud = np.stack([points_x, points_y, points_z], axis=-1)  # 沿着最后一个轴增加维度
    if rgb_flag:
        color = color[:, :, ::-1]   # bgr2rgb
        cloud = np.concatenate((cloud, color), axis=2)
        # print("cloud: ", cloud.shape)
        cloud = cloud.reshape([-1, 6])
    else:
        cloud = cloud.reshape([-1, 3])
    cloud = cloud[(cloud[:, 2] > 0.2) & (cloud[:, 2] < 2)]
    return cloud

def get_net(ckpt_path):
    # Init the model
    net = GraspNet(input_feature_dim=0, num_view=300, num_angle=12, num_depth=4,
            cylinder_radius=0.05, hmin=-0.02, hmax_list=[0.01,0.02,0.03,0.04], is_training=False)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    net.to(device)
    # Load checkpoint
    checkpoint = torch.load(ckpt_path)
    net.load_state_dict(checkpoint['model_state_dict'])
    start_epoch = checkpoint['epoch']
    print("-> loaded checkpoint %s (epoch: %d)"%(ckpt_path, start_epoch))
    # set model to eval mode
    net.eval()
    return net

# 碰撞检测
def collision_detection(gg, cloud):
    # 碰撞检测器 输入是点云和下采样个数，在没有对象标签的场景中进行碰撞检测。当前手指宽度和长度是固定的。
    mfcdetector = ModelFreeCollisionDetector(cloud, voxel_size=0.01)
    
    """碰撞检测
        输入是检测结果 332*17条数据, 抓取前的距离5cm, 碰撞阈值
    Returns:
    """
    collision_mask = mfcdetector.detect(gg, approach_dist=0.05, collision_thresh=0.01)
    # 删除gg中mask的数据
    gg = gg[~collision_mask]
    return gg


def preprocess(cloud, rgb_flag = False):
    end_points = dict()
    # cloud_sampled[np.newaxis] 最前面增加一个维度 toCuda、toTorch
    xyz = cloud[:,:3]
    
    xyz = torch.from_numpy(xyz[np.newaxis].astype(np.float32))
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    xyz = xyz.to(device)
    
    end_points['point_clouds'] = xyz  # 点云放到gpu上即可
    if rgb_flag:
        rgb = cloud[:,3:]
        end_points['cloud_colors'] = rgb
    return end_points

# o3d2msg
def open3d_to_ros(open3d_cloud):
    points = np.asarray(open3d_cloud.points, dtype=np.float32)
    colors = (np.asarray(open3d_cloud.colors) * 255).astype(np.uint8) if len(open3d_cloud.colors) > 0 else np.full((points.shape[0], 3), 255, dtype=np.uint8)

    # Prepare data for PointCloud2
    cloud_data = np.zeros((points.shape[0], 4), dtype=np.float32)
    cloud_data[:, 0:3] = points

    for i in range(colors.shape[0]):
        r, g, b = colors[i]
        rgb_packed = (r << 16) | (g << 8) | b
        rgb_float = struct.unpack('f', struct.pack('I', rgb_packed))[0]
        cloud_data[i, 3] = rgb_float

    # Flatten data to byte array
    cloud_data = cloud_data.flatten().tobytes()

    # Create PointCloud2 message
    header = std_msgs.msg.Header()
    header.stamp = rospy.Time.now()
    header.frame_id = 'cam_link'

    fields = [
        PointField('x', 0, PointField.FLOAT32, 1),
        PointField('y', 4, PointField.FLOAT32, 1),
        PointField('z', 8, PointField.FLOAT32, 1),
        PointField('rgb', 12, PointField.FLOAT32, 1),
    ]

    point_cloud_msg = PointCloud2(
        header=header,
        height=1,
        width=points.shape[0],
        is_dense=False,
        is_bigendian=False,
        fields=fields,
        point_step=16,
        row_step=16 * points.shape[0],
        data=cloud_data
    )

    return point_cloud_msg


rospy.init_node('open3d_to_ros_pointcloud')
pub_cloud = rospy.Publisher('/point_cloud2', PointCloud2, queue_size=10)
pub_marker = rospy.Publisher('/marker', Marker, queue_size=10)
camera = CameraInfo(640, 480, intrinsics_matrix[0][0], intrinsics_matrix[1][1], intrinsics_matrix[0][2], intrinsics_matrix[1][2], factor_depth)

# 配置深度和颜色流
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

# 开始更新配置和同步设置
pipeline.start(config)
align = rs.align(rs.stream.color)  # Create align object for depth-color alignment




ckpt_path = "/home/lin/code/graspnet-baseline/ckpt/checkpoint-rs.tar"
net = get_net(ckpt_path)


try:
    while not rospy.is_shutdown():
        try:
        
            # 等待深度数据帧和RGB数据帧，设置等待时间为10秒
            frames = pipeline.wait_for_frames(timeout_ms=10000)
            aligned_frames = align.process(frames)
            if not aligned_frames:
                continue  # If alignment fails, go bac
            
            depth_frame = frames.get_depth_frame()
            color_frame = frames.get_color_frame()
            
            if not depth_frame or not color_frame:
                continue

            # 获取深度图像的原始数据
            depth_data = np.asanyarray(depth_frame.get_data())
 
            # 获取RGB图像的原始数据
            color_data = np.asanyarray(color_frame.get_data())
 
            cloud = create_point_cloud_from_depth_image(color_data, depth_data, camera, rgb_flag=True)
            num_point = 10000
            cloud_size = cloud.shape[0]
            if(cloud_size >= num_point):
                idxs = np.random.choice(cloud.shape[0] , num_point, replace=False)
            else:
                idxs1 = np.arange(cloud_size)  # 补点
                idxs2 = np.random.choice(cloud_size, num_point - cloud_size, replace=True)
                idxs = np.concatenate([idxs1, idxs2], axis=0)
                idxs = np.random.choice(cloud.shape[0] , num_point, replace=False)
            cloud = cloud[idxs]

            pcd = o3d.geometry.PointCloud()
            pcd.points = o3d.utility.Vector3dVector(cloud[:, :3].astype(np.float32))
            pcd.colors = o3d.utility.Vector3dVector(np.array(cloud[:, 3:], dtype=np.float32) / 255.0)


            end_points = preprocess(cloud, rgb_flag=True)
            with torch.no_grad():  # 不进行梯度回传
                end_points = net(end_points)
                grasp_preds = pred_decode(end_points) # 预测len[list(grasp_preds)]=1, grasp_preds[0].shape = [332, 17]
            gg_array = grasp_preds[0].detach().cpu().numpy()  # [332, 17]
            
            # print(gg_array.shape)
            
            gg = GraspGroup(gg_array)
            collision_thresh = 0.01
            # 碰撞阈值0.01筛选
            if collision_thresh > 0: 
                # 点云降采样和iou计算
                gg = collision_detection(gg, np.array(pcd.points))
            # print(gg.__len__())
            
            gg.nms()
            gg.sort_by_score()  # 根据分数排序
            if(len(gg) < 1):
                break
            gg = gg[:1]
            # print(gg)
            # print(gg.rotation_matrices)
            # print("t: ", gg.translations)
            
            grippers = gg.to_open3d_geometry_list()

            R = gg.rotation_matrices
            t = gg.translations
            T = np.eye(4)
            T[:3, :3] = R
            T[:3, 3] = t
            
            q_ros = quaternion_from_matrix(T)
            eu_ros = euler_from_matrix(T, axes='rzyx')  # zyx
            # print("eu_ros:\n", eu_ros)
            
            print("euler(RPY) : ", rad2deg(eu_ros[2]), rad2deg(eu_ros[1]), rad2deg(eu_ros[0]))
            print("translation: ", t[0][0], t[0][1], t[0][2])

            flattened_eu = np.ravel(eu_ros)
            flattened_t = np.ravel(t)
            flattened_q = np.ravel(q_ros)
            np.set_printoptions(precision=3)
            flattened = np.concatenate((flattened_t, flattened_eu), axis=0)
            print("xyz-ypr:", flattened)
            flattened = np.concatenate((flattened_t, flattened_q), axis=0)
            print("xyz-xyzw:", flattened)
            
            gripper_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(
                size=0.3,  # 坐标系的大小
                # origin=[p[0], p[1], p[2]]  # 坐标系的原点
                origin=[0, 0, 0]
            )
            gripper_frame.transform(T)

            # marker消息
            marker_msg = create_arrow_marker(Point(0, 0, 0), Point(gg.translations[0][0], gg.translations[0][1], gg.translations[0][2]))
            cloud_msg = open3d_to_ros(pcd)
            
            object_list = [pcd, *grippers, coordinate_frame, gripper_frame]

            o3d_vis(object_list)
            rate = rospy.Rate(5)
            rate.sleep()     
            # exit(-1)
        except RuntimeError as e:
            print(f"等待帧时发生错误: {e}")
 
finally:
    pipeline.stop()
    cv2.destroyAllWindows()