import os
import sys
import time
import argparse
import json

import cv2
import torch
import onnxruntime as ort
import open3d as o3d
import scipy.io as scio
import numpy as np
from PIL import Image
from ais_bench.infer.interface import InferSession
from graspnetAPI import GraspGroup

ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
PARENT_DIR = os.path.dirname(ROOT_DIR)
sys.path.append(os.path.join(PARENT_DIR, 'models'))

# sys.path.append("/home/huawei/Ascend-YoloGraspnet/models")

from collision_detector import ModelFreeCollisionDetector
# from models import backbone
from data_utils import CameraInfo, create_point_cloud_from_depth_image, GRASP_MAX_WIDTH, GRASP_MAX_TOLERANCE

parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', default="checkpoint-rs.tar", help='Model checkpoint path')
parser.add_argument('--num_point', type=int, default=20000, help='Point Number [default: 20000]')
parser.add_argument('--num_view', type=int, default=300, help='View Number [default: 300]')
parser.add_argument('--collision_thresh', type=float, default=0.01, help='Collision Threshold in collision detection [default: 0.01]')
parser.add_argument('--voxel_size', type=float, default=0.01, help='Voxel Size to process point clouds before collision detection [default: 0.01]')

cfgs = parser.parse_args()

device = torch.device("cpu")
# device2 = torch.device("npu:0")
def infer_vpmodule_om(session, end_points):
    # 根据onnx获得om的预期输入和输出key和shape
    ort_session = ort.InferenceSession("models/vpmodule.onnx", providers=['CPUExecutionProvider'])
    input_names = [input.name[:-2] for input in ort_session.get_inputs()]       # -2是因为onnx中每个key都有.1后缀
    # input_names = ['point_clouds', 'input_xyz', 'sa1_inds', 'sa1_xyz', 'sa1_features', 
    #                'sa2_inds', 'sa2_xyz', 'sa2_features', 'sa3_xyz', 'sa3_features', 
    #                'sa4_xyz', 'sa4_features', 'fp2_features', 'fp2_xyz', 'fp2_inds']
    om_input = [end_points[name].cpu().numpy() for name in input_names]

    output_tensors = ort_session.get_outputs()  # 该 API 会返回列表

    t1 = time.perf_counter()
    model_output = session.infer(feeds=om_input)
    t2 = time.perf_counter()
    print("infer vpmodule om time: ", (t2 - t1) * 1000, "ms")

    i = 0
    for o in output_tensors:
        end_points[o.name] = torch.tensor(model_output[i].reshape(o.shape), device=device)
        # print(o.name, ":", end_points[o.name].shape)
        i+=1
    return end_points
def infer_grasp_om(session, end_points):
    # dev = torch.device("npu:0")
    input_names = ['point_clouds', 'input_xyz', 'sa1_inds', 'sa1_xyz', 'sa1_features', 'sa2_inds', 'sa2_xyz', 
                   'sa2_features', 'sa3_xyz', 'sa3_features', 'sa4_xyz', 'sa4_features', 'fp2_features',
                   'fp2_xyz', 'fp2_inds', 'objectness_score', 'view_score', 'grasp_top_view_inds', 
                   'grasp_top_view_score', 'grasp_top_view_xyz', 'grasp_top_view_rot']
    onnx_input = [end_points[name].cpu().numpy() for name in input_names]
    
    t1 = time.perf_counter()
    model_output = session.infer(feeds=onnx_input)
    t2 = time.perf_counter()
    print("infer grasp omtime: ", (t2 - t1) * 1000, "ms")

    output_names = ['point_clouds', 'input_xyz', 'sa1_inds', 'sa1_xyz', 'sa1_features', 'sa2_inds', 
                    'sa2_xyz', 'sa2_features', 'sa3_xyz', 'sa3_features', 'sa4_xyz', 'sa4_features', 
                    'fp2_features', 'fp2_xyz', 'fp2_inds', 'objectness_score', 'view_score', 'grasp_top_view_inds', 
                    'grasp_top_view_score', 'grasp_top_view_xyz', 'grasp_top_view_rot', 'grasp_score_pred', 
                    'grasp_angle_cls_pred', 'grasp_width_pred', 'grasp_tolerance_pred']
    index = 0
    for o in output_names:
        # print(o, model_output[i].shape)
        end_points[o] = torch.tensor(model_output[index], device=device)
        index += 1
    return end_points

def batch_viewpoint_params_to_matrix(batch_towards, batch_angle):
    """ Transform approach vectors and in-plane rotation angles to rotation matrices.

        Input:
            batch_towards: [torch.FloatTensor, (N,3)]
                approach vectors in batch
            batch_angle: [torch.floatTensor, (N,)]
                in-plane rotation angles in batch
                
        Output:
            batch_matrix: [torch.floatTensor, (N,3,3)]
                rotation matrices in batch
    """
    axis_x = batch_towards
    ones = torch.ones(axis_x.shape[0], dtype=axis_x.dtype, device=axis_x.device)
    zeros = torch.zeros(axis_x.shape[0], dtype=axis_x.dtype, device=axis_x.device)
    axis_y = torch.stack([-axis_x[:,1], axis_x[:,0], zeros], dim=-1)
    mask_y = (torch.norm(axis_y, dim=-1) == 0)
    axis_y[mask_y,1] = 1
    axis_x = axis_x / torch.norm(axis_x, dim=-1, keepdim=True)
    axis_y = axis_y / torch.norm(axis_y, dim=-1, keepdim=True)
    axis_z = torch.cross(axis_x, axis_y, dim=-1)
    sin = torch.sin(batch_angle)
    cos = torch.cos(batch_angle)
    R1 = torch.stack([ones, zeros, zeros, zeros, cos, -sin, zeros, sin, cos], dim=-1)
    R1 = R1.reshape([-1,3,3])
    R2 = torch.stack([axis_x, axis_y, axis_z], dim=-1)
    batch_matrix = torch.matmul(R2, R1)
    return batch_matrix

def pred_decode(end_points):
    batch_size = len(end_points['point_clouds'])
    grasp_preds = []
    for i in range(batch_size):
        ## load predictions
        objectness_score = end_points['objectness_score'][i].float()
        grasp_score = end_points['grasp_score_pred'][i].float()
        grasp_center = end_points['fp2_xyz'][i].float()
        approaching = -end_points['grasp_top_view_xyz'][i].float()
        grasp_angle_class_score = end_points['grasp_angle_cls_pred'][i]
        grasp_width = 1.2 * end_points['grasp_width_pred'][i]
        grasp_width = torch.clamp(grasp_width, min=0, max=GRASP_MAX_WIDTH)
        grasp_tolerance = end_points['grasp_tolerance_pred'][i]

        ## slice preds by angle
        # grasp angle
        grasp_angle_class = torch.argmax(grasp_angle_class_score, 0)
        grasp_angle = grasp_angle_class.float() / 12 * np.pi
        # grasp score & width & tolerance
        grasp_angle_class_ = grasp_angle_class.unsqueeze(0)
        grasp_score = torch.gather(grasp_score, 0, grasp_angle_class_).squeeze(0)
        grasp_width = torch.gather(grasp_width, 0, grasp_angle_class_).squeeze(0)
        grasp_tolerance = torch.gather(grasp_tolerance, 0, grasp_angle_class_).squeeze(0)

        ## slice preds by score/depth
        # grasp depth
        grasp_depth_class = torch.argmax(grasp_score, 1, keepdims=True)
        grasp_depth = (grasp_depth_class.float()+1) * 0.01
        # grasp score & angle & width & tolerance
        grasp_score = torch.gather(grasp_score, 1, grasp_depth_class)
        grasp_angle = torch.gather(grasp_angle, 1, grasp_depth_class)
        grasp_width = torch.gather(grasp_width, 1, grasp_depth_class)
        grasp_tolerance = torch.gather(grasp_tolerance, 1, grasp_depth_class)

        ## slice preds by objectness
        objectness_pred = torch.argmax(objectness_score, 0)
        objectness_mask = (objectness_pred==1)
        grasp_score = grasp_score[objectness_mask]
        grasp_width = grasp_width[objectness_mask]
        grasp_depth = grasp_depth[objectness_mask]
        approaching = approaching[objectness_mask]
        grasp_angle = grasp_angle[objectness_mask]
        grasp_center = grasp_center[objectness_mask]
        grasp_tolerance = grasp_tolerance[objectness_mask]
        grasp_score = grasp_score * grasp_tolerance / GRASP_MAX_TOLERANCE

        ## convert to rotation matrix
        Ns = grasp_angle.size(0)
        approaching_ = approaching.view(Ns, 3)
        grasp_angle_ = grasp_angle.view(Ns)
        rotation_matrix = batch_viewpoint_params_to_matrix(approaching_, grasp_angle_)
        rotation_matrix = rotation_matrix.view(Ns, 9)

        # merge preds
        grasp_height = 0.02 * torch.ones_like(grasp_score)
        obj_ids = -1 * torch.ones_like(grasp_score)
        grasp_preds.append(torch.cat([grasp_score, grasp_width, grasp_height, grasp_depth, rotation_matrix, grasp_center, obj_ids], axis=-1))
    return grasp_preds

def get_grasps(sa1_model, remaining_model, vpmodule_om, grasp_gen_om, end_points):
    # Forward pass
    with torch.no_grad():
        pointcloud = end_points['point_clouds']
        end_points = sa1_model(pointcloud, end_points)
        end_points = remaining_model(end_points)
        end_points = infer_vpmodule_om(vpmodule_om, end_points)
        end_points = infer_grasp_om(grasp_gen_om, end_points)
        grasp_preds = pred_decode(end_points)
    gg_array = grasp_preds[0].detach().cpu().numpy()
    gg = GraspGroup(gg_array)
    return gg

def split_infer(sa1_model, remaining_model, vp_session, grasp_gen_session, data_dir=None):
    
    # end_points, cloud = get_and_process_data(data_dir)
    end_points, cloud = get_data(data_dir, view=False)

    # gg = infer_split(end_points)
    gg = get_grasps(sa1_model, remaining_model, vp_session, grasp_gen_session, end_points)
    print(len(gg))

    # gg = get_grasps_onnx(end_points)
    if cfgs.collision_thresh > 0:
        gg = collision_detection(gg, np.array(cloud.points))
    return transfer_gg_to_list(gg, cloud, vis=True)

def collision_detection(gg, cloud):
    mfcdetector = ModelFreeCollisionDetector(cloud, voxel_size=cfgs.voxel_size)
    collision_mask = mfcdetector.detect(gg, approach_dist=0.05, collision_thresh=cfgs.collision_thresh)
    gg = gg[~collision_mask]
    return gg

def transfer_gg_to_list(gg, cloud,vis=False):
    gg.nms()
    gg.sort_by_score()
    gg = gg[:50]
    
    # 可视化
    if vis:
        # grippers = gg.to_open3d_geometry_list()
        gt = gg[:10]
        grippers = gt.to_open3d_geometry_list()
        o3d.visualization.draw_geometries([cloud, *grippers])

    confidences = []
    widths = []
    gg_list = []


    for g in gg:
        confidences.append(g.score)
        widths.append(g.width)
        new_m = np.hstack((g.rotation_matrix, g.translation.reshape(-1, 1)))
        new_m = np.vstack((new_m, np.array([0, 0, 0, 1])))
        new_m = new_m.tolist()
        gg_list.append(new_m)
    
    
    return confidences, widths, gg_list

def get_data(data_dir, view=False):
    # load data
    rgb_name = "rgb_yolo_gp.png"
    depth_name = "depth_gp.npy"
    mask_name = "outputs/mask_banana.png"
    meta_name = "camera_intrinsics.json"
    
    rgb = cv2.imread(data_dir + rgb_name, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
    color = rgb.astype(np.float32) / 255.0
    print("color shape",color.shape)
    
    depth = np.load(data_dir + depth_name)
    depth = depth.astype(np.uint16)
    print("depth shape",depth.shape)
    
    mask = cv2.imread(mask_name, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
    mask = np.array(mask, dtype=np.uint8)
    # 当前是白为true，黑为false
    # mask = cv2.bitwise_not(mask)
    # 如果size不一样，resize
    # mask = cv2.resize(mask, (rgb.shape[1], rgb.shape[0]), interpolation=cv2.INTER_NEAREST)
    # bool的mask
    workspace_mask = mask.astype(bool)
    print("mask shape",mask.shape)
    
    # 相机内参
    meta = json.load(open(data_dir + meta_name, 'r'))
    print(workspace_mask.shape)
    intrinsic = np.zeros((3, 3))
    intrinsic[0][0] = meta['fx']
    intrinsic[1][1] = meta['fy']
    intrinsic[0][2] = meta['cx']
    intrinsic[1][2] = meta['cy']
    intrinsic[2][2] = 1.0

    # 深度单位，mm
    factor_depth = np.zeros((1, 1))
    factor_depth[0][0] = 1000.0

    # generate cloud
    camera = CameraInfo(1280.0, 720.0, intrinsic[0][0], intrinsic[1][1], intrinsic[0][2], intrinsic[1][2], factor_depth)
    cloud = create_point_cloud_from_depth_image(depth, camera, organized=True)

    # get valid points
    # mask = (workspace_mask & (depth > 10.0) & (depth < 3000.0))
    mask = (workspace_mask & (depth > 0))

    cloud_masked = cloud[mask]
    color_masked = color[mask]

    # 去除离群点
    # 先创建Open3D点云对象
    # pcd = o3d.geometry.PointCloud()
    # pcd.points = o3d.utility.Vector3dVector(cloud_masked.astype(np.float32))
    
    # # 统计离群点去除 - 参数说明：
    # # nb_neighbors：用于计算每个点的平均距离的邻近点数量
    # # std_ratio：标准差倍数阈值，大于此值的点被认为是离群点
    # pcd_filtered, inlier_indices = pcd.remove_statistical_outlier(nb_neighbors=50, std_ratio=1.0)
    
    # # 根据过滤结果更新点云和颜色
    # cloud_masked = np.asarray(pcd_filtered.points)
    # color_masked = color_masked[inlier_indices]

    # sample points
    if len(cloud_masked) >= cfgs.num_point:
        idxs = np.random.choice(len(cloud_masked), cfgs.num_point, replace=False)
    else:
        idxs1 = np.arange(len(cloud_masked))
        idxs2 = np.random.choice(len(cloud_masked), cfgs.num_point-len(cloud_masked), replace=True)
        idxs = np.concatenate([idxs1, idxs2], axis=0)
    cloud_sampled = cloud_masked[idxs]
    # color_sampled = color_masked[idxs]

    # convert data
    cloud = o3d.geometry.PointCloud()
    cloud.points = o3d.utility.Vector3dVector(cloud_masked.astype(np.float32))
    cloud.colors = o3d.utility.Vector3dVector(color_masked.astype(np.float32))
    end_points = dict()
    cloud_sampled = torch.from_numpy(cloud_sampled[np.newaxis].astype(np.float32))
    
    cloud_sampled = cloud_sampled.to(device)
    end_points['point_clouds'] = cloud_sampled
    # end_points['cloud_colors'] = color_sampled
    # 点云可视化代码
    if view:
        o3d.visualization.draw_geometries([cloud], window_name="点云可视化", width=800, height=600)

    return end_points, cloud

def get_cloud_points(rgb_path, depth_path, meta_path, mask_path, view=False):
    
    rgb = cv2.imread(rgb_path, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
    color = rgb.astype(np.float32) / 255.0
    print("color shape",color.shape)
    
    depth = np.load(depth_path)
    depth = depth.astype(np.uint16)
    print("depth shape",depth.shape)
    
    mask = cv2.imread(mask_path, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
    mask = np.array(mask, dtype=np.uint8)
    # 当前是白为true，黑为false
    # mask = cv2.bitwise_not(mask)
    # 如果size不一样，resize
    # mask = cv2.resize(mask, (rgb.shape[1], rgb.shape[0]), interpolation=cv2.INTER_NEAREST)
    # bool的mask
    workspace_mask = mask.astype(bool)
    print("mask shape",mask.shape)
    
    # 相机内参
    meta = json.load(open(meta_path, 'r'))
    print(workspace_mask.shape)
    intrinsic = np.zeros((3, 3))
    intrinsic[0][0] = meta['fx']
    intrinsic[1][1] = meta['fy']
    intrinsic[0][2] = meta['cx']
    intrinsic[1][2] = meta['cy']
    intrinsic[2][2] = 1.0

    # 深度单位，mm
    factor_depth = np.zeros((1, 1))
    factor_depth[0][0] = 1000.0

    # generate cloud
    camera = CameraInfo(1280.0, 720.0, intrinsic[0][0], intrinsic[1][1], intrinsic[0][2], intrinsic[1][2], factor_depth)
    cloud = create_point_cloud_from_depth_image(depth, camera, organized=True)

    # get valid points
    # mask = (workspace_mask & (depth > 10.0) & (depth < 3000.0))
    mask = (workspace_mask & (depth > 0))

    cloud_masked = cloud[mask]
    color_masked = color[mask]

    # sample points
    if len(cloud_masked) >= cfgs.num_point:
        idxs = np.random.choice(len(cloud_masked), cfgs.num_point, replace=False)
    else:
        idxs1 = np.arange(len(cloud_masked))
        idxs2 = np.random.choice(len(cloud_masked), cfgs.num_point-len(cloud_masked), replace=True)
        idxs = np.concatenate([idxs1, idxs2], axis=0)
    cloud_sampled = cloud_masked[idxs]
    # color_sampled = color_masked[idxs]

    # convert data
    cloud = o3d.geometry.PointCloud()
    cloud.points = o3d.utility.Vector3dVector(cloud_masked.astype(np.float32))
    cloud.colors = o3d.utility.Vector3dVector(color_masked.astype(np.float32))
    end_points = dict()
    cloud_sampled = torch.from_numpy(cloud_sampled[np.newaxis].astype(np.float32))
    
    cloud_sampled = cloud_sampled.to(device)
    end_points['point_clouds'] = cloud_sampled
    # end_points['cloud_colors'] = color_sampled
    # 点云可视化代码
    if view:
        o3d.visualization.draw_geometries([cloud], window_name="点云可视化", width=800, height=600)

    return end_points, cloud

def get_om_nets():
    sa1_model = torch.load("models/sa1_model.pt", map_location=device).eval()
    remaining_model = torch.load("models/remaining_model.pt", map_location=device).eval()
    vp_session = InferSession(device_id=0, model_path="models/vpmodule_linux_x86_64.om")
    grasp_gen_session = InferSession(device_id=0, model_path="models/grasp_generator.om")
    return sa1_model, remaining_model, vp_session, grasp_gen_session

if __name__ == '__main__':
    data_dir = 'inputs/'
    # get_data(data_dir, view=True)
    
    # load model
    sa1_model, remaining_model, vp_session, grasp_gen_session = get_om_nets()
    print("------------------load model success-------------")
    # Infer
    confidences, widths, gg_list = split_infer(sa1_model, remaining_model, vp_session, grasp_gen_session, data_dir)
