from cv2 import waitKey

from d435 import D435
import cv2
from ultralytics import YOLO
import numpy as np
import o3d_util as o3d
import cv_util as cvu
import open3d as op3d
import spatialmath.base as smb
from  arm import  Rokae
from util_3d import *


# input current
# 1)base_t_end
# 2)end_t_cam
# 3)cam_t_tool
# 4)cam_aim_point

# s1: base_aim_point = base_t_end @ end_t_cam @ cam_aim_point
# s2: base_t_aim_tool = (base_t_tool , base_aim_point)
# s3: base_t_aim_end = base_t_aim_tool @ aim_tool_t_aim_cam @ aim_cam_t_aim_end
#     base_t_aim_end = base_t_aim_tool @ tool_t_cam @ cam_t_end
def grab(base_t_end , end_t_cam, cam_t_tool, cam_aim_point):
    base_aim_point = base_t_end @ end_t_cam @ cam_aim_point
    base_t_tool = base_t_end @ end_t_cam @ cam_t_tool
    base_t_aim_tool = base_t_tool
    base_t_aim_tool[:,3] = base_aim_point
    base_t_aim_end = base_t_aim_tool @ smb.trinv(cam_t_tool) @ smb.trinv(end_t_cam)
    return base_t_aim_end

def yolomask_to_pointcloud(color_image, depth_image, yolo_mask):
    yolo_mask_float = yolo_mask.reshape(yolo_mask.shape[1], -1)
    print(yolo_mask.shape)
    size_image = (color_image.shape[1], color_image.shape[0])
    yolo_mask_resize= cv2.resize(yolo_mask_float, size_image, interpolation=cv2.INTER_LINEAR)
    yolo_mask_resize_uint8 = yolo_mask_resize.astype(np.uint8)
    yolo_mask_resize_uint8 = yolo_mask_resize_uint8*255
    color_image_masked = cv2.add(color_image, np.zeros(np.shape(color_image), dtype=np.uint8), mask=yolo_mask_resize_uint8)
    depth_image_masked = cv2.add(depth_image, np.zeros(np.shape(depth_image), dtype=np.uint16), mask=yolo_mask_resize_uint8)
    cvu.depth_filter(depth_image_masked)
    pcd = o3d.gen_point_cloud(color_image_masked, depth_image_masked)
    return pcd

init_pos = np.array ([0.23553320870561686, 0.13599433438178785, 0.33854353132547865, -3.1411474368438133, -0.00030757519895313585, 2.34705996088468])

if __name__ == '__main__':
    # model
    model_path = r'./best.pt'
    model = YOLO(model_path)
    # camera
    camera = D435()
    camera.set_param_outer('./param_outer.txt')
    end_t_cam = halcon_pose_to_T(camera.out_param_halcon_type)
    inr = camera.color_intrinsics
    # arm
    arm = Rokae()
    arm.init()
    print(arm.getPose())
    arm.setPose2(lambda st: True, 50, 50, 50, init_pos.tolist())
    while True:
        camera.update()
        color_image, depth_image = camera.get_images()
        results = model(color_image)
        annotation_frame = results[0].plot()
        cv2.imshow("result", annotation_frame)
        waitKey(30)
        if results[0].masks is not None:
            name_dict = results[0].names
            for i in range(len(results[0].masks)):
                mask_np = results[0].masks[i].data.cpu().numpy()
                class_index = results[0].boxes.cls[i].data.cpu().numpy()
                if class_index == 47:
                    pcd = yolomask_to_pointcloud(color_image, depth_image, mask_np)
                    center = pcd.get_center()
                    cam_aim_point = np.zeros(4)
                    cam_aim_point[:3] = center
                    cam_aim_point[3] = 1
                    current_pose = arm.getPose()
                    base_t_end = rokae_pose_to_T(current_pose)
                    cam_t_tool = transl(0.04, 0.08, 0.16)
                    base_t_aim_end = grab(base_t_end, end_t_cam, cam_t_tool, cam_aim_point)
                    base_t_aim_end_rokae = T_to_rokae_pose(base_t_aim_end)
                    base_t_aim_end_front_rokae =  base_t_aim_end_rokae.copy()
                    base_t_aim_end_front_rokae[0] = base_t_aim_end_front_rokae[0] - 0.15
                    arm.setPose2(lambda st: True, 50, 50, 50, base_t_aim_end_front_rokae)
                    arm.setPose2(lambda st: True, 50, 50, 50, base_t_aim_end_rokae)
                    arm.setPose2(lambda st: True, 50, 50, 50, init_pos.tolist())
