""" Demo to show prediction results.
    Author: chenxi-wang
"""

import os
import sys
import numpy as np
import open3d as o3d
import argparse
import importlib
import scipy.io as scio
from PIL import Image

import torch
from graspnetAPI import GraspGroup

ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(ROOT_DIR, 'models'))
sys.path.append(os.path.join(ROOT_DIR, 'dataset'))
sys.path.append(os.path.join(ROOT_DIR, 'utils'))


from models.graspnet import GraspNet, pred_decode
from graspnet_utils import ModelFreeCollisionDetector
from graspnet_utils import CameraInfo, create_point_cloud_from_depth_image



import pyrealsense2 as rs
import cv2
from matplotlib import pyplot as plt


parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=False,default="ckpt/checkpoint-rs.tar", help='Model checkpoint path')
parser.add_argument('--num_point', type=int, default=20000, help='Point Number [default: 20000]')
parser.add_argument('--num_view', type=int, default=300, help='View Number [default: 300]')
parser.add_argument('--collision_thresh', type=float, default=0.01, help='Collision Threshold in collision detection [default: 0.01]')
parser.add_argument('--voxel_size', type=float, default=0.01, help='Voxel Size to process point clouds before collision detection [default: 0.01]')
cfgs = parser.parse_args()


def get_net():
    # Init the model
    net = GraspNet(input_feature_dim=0, num_view=cfgs.num_view, num_angle=12, num_depth=4,
            cylinder_radius=0.05, hmin=-0.02, hmax_list=[0.01,0.02,0.03,0.04], is_training=False)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    net.to(device)
    # Load checkpoint
    checkpoint = torch.load(cfgs.checkpoint_path)
    net.load_state_dict(checkpoint['model_state_dict'])
    start_epoch = checkpoint['epoch']
    print("-> loaded checkpoint %s (epoch: %d)"%(cfgs.checkpoint_path, start_epoch))
    # set model to eval mode
    net.eval()
    return net

# 获取数据和数据处理
def get_and_process_data(data_dir):

    # 图像归一化
    color = np.array(Image.open(os.path.join(data_dir, 'color.png')), dtype=np.float32) / 255.0
    depth = np.array(Image.open(os.path.join(data_dir, 'depth.png')))
    
    workspace_mask = np.array(Image.open(os.path.join(data_dir, 'workspace_mask.png')))
    meta = scio.loadmat(os.path.join(data_dir, 'meta.mat'))
    # for k,v in meta.items():
    #     print(k,": ", v)
    
    # 相机内参和深度因子
    # intrinsic = meta['intrinsic_matrix']
    # factor_depth = meta['factor_depth']
    intrinsic = np.array([[631.548, 0., 638.435],
                         [ 0.,631.207, 366.499],
                         [0,0,1]], dtype=np.float32)
    print(intrinsic.shape)

    factor_depth = np.array([[1000.]]) 
  
    
    # generate cloud rgbd2cloud
    camera = CameraInfo(1280.0, 720.0, intrinsic[0][0], intrinsic[1][1], intrinsic[0][2], intrinsic[1][2], factor_depth)
    cloud = create_point_cloud_from_depth_image(depth, camera, organized=True)

    # get valid points
    mask = (workspace_mask & (depth > 0))  # 掩码(工作空间)和深度图相同的索引位置都满足, 取交集的索引
    cloud_masked = cloud[mask]
    color_masked = color[mask]

    # sample points  如果大于20000个点 从工作区域cloud中取随机取2w个点的的作为索引
    if len(cloud_masked) >= cfgs.num_point: 
        idxs = np.random.choice(len(cloud_masked), cfgs.num_point, replace=False)
    else:  # 小于2w个点
        idxs1 = np.arange(len(cloud_masked))  # 补点
        idxs2 = np.random.choice(len(cloud_masked), cfgs.num_point-len(cloud_masked), replace=True)
        idxs = np.concatenate([idxs1, idxs2], axis=0)
    
    cloud_sampled = cloud_masked[idxs]  
    color_sampled = color_masked[idxs]

    # convert data
    cloud = o3d.geometry.PointCloud()
    cloud.points = o3d.utility.Vector3dVector(cloud_masked.astype(np.float32))
    cloud.colors = o3d.utility.Vector3dVector(color_masked.astype(np.float32))
    
    end_points = dict()
    # cloud_sampled[np.newaxis] 最前面增加一个维度 toCuda、toTorch
    cloud_sampled = torch.from_numpy(cloud_sampled[np.newaxis].astype(np.float32))
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    cloud_sampled = cloud_sampled.to(device)
    
    end_points['point_clouds'] = cloud_sampled  # 点云放到gpu上即可
    end_points['cloud_colors'] = color_sampled

    return end_points, cloud

def get_grasps(net, end_points):
    # Forward pass
    with torch.no_grad():  # 不进行梯度回传
        end_points = net(end_points)
        grasp_preds = pred_decode(end_points) # 预测len[list(grasp_preds)]=1, grasp_preds[0].shape = [332, 17]
    gg_array = grasp_preds[0].detach().cpu().numpy()  # [332, 17]
    
    """ 
        class GraspGroup类
        
        Grasp Group, Number=332:
       
        
            Returns: 
                总共17维: score, width, height, depth, translation(3),rotation(9), id
    """
    gg = GraspGroup(gg_array)
    return gg

# 碰撞检测
def collision_detection(gg, cloud):
    # 碰撞检测器 输入是点云和下采样个数，在没有对象标签的场景中进行碰撞检测。当前手指宽度和长度是固定的。
    mfcdetector = ModelFreeCollisionDetector(cloud, voxel_size=cfgs.voxel_size)
    
    """碰撞检测
        输入是检测结果 332*17条数据, 抓取前的距离5cm, 碰撞阈值
    Returns:
    """
    collision_mask = mfcdetector.detect(gg, approach_dist=0.05, collision_thresh=cfgs.collision_thresh)
    # 删除gg中mask的数据
    gg = gg[~collision_mask]
    return gg

def vis_grasps(gg, cloud):
    gg.nms()
    gg.sort_by_score()  # 根据分数排序
    gg = gg[:2]
    grippers = gg.to_open3d_geometry_list()
    o3d.visualization.draw_geometries([cloud, *grippers])

def demo(data_dir):
    # 获得网络
    net = get_net()
    
    # 数据处理 end_points是(xyz(gpu), rgb); cloud是xyzrgb
    end_points, cloud = get_and_process_data(data_dir)
    
    # 推理 
    gg = get_grasps(net, end_points)
    
    # 碰撞阈值0.01筛选
    if cfgs.collision_thresh > 0: 
        # 点云降采样和iou计算
        gg = collision_detection(gg, np.array(cloud.points))
    print(gg.__len__())
    vis_grasps(gg, cloud)

def rs_grasper(net):
    # 配置深度和颜色流
    pipeline = rs.pipeline()
    config = rs.config()

    # 配置流
    config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

    # 开始流
    pipeline.start(config)
    align = rs.align(rs.stream.color)  # Create align object for depth-color alignment

   

    
    try:
        while True:
            # Wait for a coherent pair of frames: color and depth
            frames = pipeline.wait_for_frames()
            aligned_frames = align.process(frames)
            
            if not aligned_frames:
                continue  # If alignment fails, go back to the beginning of the loop
 
            depth_frame = aligned_frames.get_color_frame()
            color_frame = aligned_frames.get_depth_frame()

            if not color_frame or not depth_frame:
                continue
            depth = np.asanyarray(depth_frame.get_data())
            color = np.asanyarray(color_frame.get_data())
            print(depth.shape, color.shape)
            # 应用颜色映射以更好地可视化深度图像
            depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth, alpha=0.03), cv2.COLORMAP_JET)
            # 显示图像

            images = np.hstack((color, depth_colormap))
            cv2.imshow('RealSense', images)
            if cv2.waitKey(30) == ord('q'):
                break
    finally:
        # Stop the pipeline and close all windows
        pipeline.stop()
        cv2.destroyAllWindows()




if __name__=='__main__':
    # data_dir = 'doc/example_data'
    # demo(data_dir)
    net = get_net()

    rs_grasper(net)