import argparse
import cv2
import numpy as np

from infer_yolo_om import SampleYOLOV10, image_infer, infer_image, generate_mask
from infer_graspnet_om import get_om_nets, get_cloud_points, get_grasps, transfer_gg_to_list, collision_detection

def main():

    parser = argparse.ArgumentParser()
    parser.add_argument('--checkpoint_path', default="checkpoint-rs.tar", help='Model checkpoint path')
    parser.add_argument('--num_point', type=int, default=20000, help='Point Number [default: 20000]')
    parser.add_argument('--num_view', type=int, default=300, help='View Number [default: 300]')
    parser.add_argument('--collision_thresh', type=float, default=0.01, help='Collision Threshold in collision detection [default: 0.01]')
    parser.add_argument('--voxel_size', type=float, default=0.01, help='Voxel Size to process point clouds before collision detection [default: 0.01]')

    cfgs = parser.parse_args()
    # 目标类别ID（例如：46为“香蕉”）
    target_id = 46
    # 1. 创建初始化模型
    yolo_model_path = 'models/yolov10s_310P1.om'
    yolo_model = SampleYOLOV10(yolo_model_path, 640, 640)
    yolo_model.init_resource()
    
    # 2. 输入输出路径
    rgb_path = 'inputs/color.png'
    depth_path = 'inputs/depth_gp.npy'
    meta_path = 'inputs/camera_intrinsics.json'
    mask_path = f"outputs/mask_{target_id}.png"
    
    output_path = 'outputs/yolo_color.png'

    # 3. yolo推理
    image_infer(rgb_path, yolo_model, output_path)

    # 4. 生成mask
    frame = cv2.imread(rgb_path)
    class_ids, boxes, _= infer_image(yolo_model, frame)
    mask = generate_mask(frame, class_ids, boxes, target_id)
    # 保存mask
    cv2.imwrite(mask_path, mask)

    # 5. load graspnet models
    sa1_model, remaining_model, vp_session, grasp_gen_session = get_om_nets()

    # 6. 输入点云数据
    end_points, cloud = get_cloud_points(rgb_path, depth_path, meta_path, mask_path, view=False)
    # 7. 推理抓点
    gg = get_grasps(sa1_model, remaining_model, vp_session, grasp_gen_session, end_points)
    if cfgs.collision_thresh > 0:
        gg = collision_detection(gg, np.array(cloud.points))
    # 8. 输出抓点置信度，宽度，抓点列表
    confidences, widths, gg_list = transfer_gg_to_list(gg, cloud, vis=True)

    # 9. 释放资源
    yolo_model.release_resource()
    # cv2.destroyAllWindows()


if __name__ == '__main__':
    main()