|
import torch |
|
import torch.nn as nn |
|
import numpy as np |
|
import os |
|
import cv2 |
|
import dvrk |
|
import PyKDL |
|
from PIL import Image |
|
import matplotlib.pyplot as plt |
|
import yaml |
|
import math |
|
from scipy.spatial.transform import Rotation as R |
|
from easydict import EasyDict as edict |
|
import sys |
|
sys.path.append('IGEV/core') |
|
sys.path.append('IGEV') |
|
from igev_stereo import IGEVStereo |
|
from IGEV.core.utils.utils import InputPadder |
|
from rl.agents.ddpg import DDPG |
|
import rl.components as components |
|
|
|
import argparse |
|
from FastSAM.fastsam import FastSAM, FastSAMPrompt |
|
import ast |
|
from PIL import Image |
|
from FastSAM.utils.tools import convert_box_xywh_to_xyxy |
|
|
|
import torch.nn.functional as F |
|
import queue, threading |
|
|
|
from vmodel import vismodel |
|
from config import opts |
|
|
|
from rectify import my_rectify |
|
|
|
from surrol.robots.ecm import Ecm |
|
import pybullet as p |
|
import numpy as np |
|
from surrol.utils.pybullet_utils import * |
|
|
|
class Sim_ECM(): |
|
ACTION_SIZE = 3 |
|
ACTION_MODE = 'cVc' |
|
DISTANCE_THRESHOLD = 0.005 |
|
POSE_ECM = ((0.15, 0.0, 0.7524), (0, 20 / 180 * np.pi, 0)) |
|
QPOS_ECM = (0, 0.6, 0.04, 0) |
|
WORKSPACE_LIMITS = ((0.45, 0.55), (-0.05, 0.05), (0.60, 0.70)) |
|
SCALING = 1. |
|
p = p.connect(p.GUI) |
|
def __init__(self, render_mode: str = None, cid = -1): |
|
|
|
self.workspace_limits = np.asarray(self.WORKSPACE_LIMITS) |
|
self.workspace_limits *= self.SCALING |
|
|
|
|
|
self.use_camera = False |
|
|
|
|
|
self.has_object = False |
|
self.obj_id = None |
|
|
|
|
|
|
|
|
|
self._duration = 0.1 |
|
|
|
|
|
self.distance_threshold = self.DISTANCE_THRESHOLD * self.SCALING |
|
|
|
|
|
self._view_matrix = p.computeViewMatrixFromYawPitchRoll( |
|
cameraTargetPosition=(0.27 * self.SCALING, -0.20 * self.SCALING, 0.55 * self.SCALING), |
|
distance=1.80 * self.SCALING, |
|
yaw=150, |
|
pitch=-30, |
|
roll=0, |
|
upAxisIndex=2 |
|
) |
|
|
|
def reset_env(self): |
|
assert self.ACTION_MODE in ('cVc', 'dmove', 'droll') |
|
|
|
|
|
reset_camera(yaw=150.0, pitch=-30.0, dist=1.50 * self.SCALING, |
|
target=(0.27 * self.SCALING, -0.20 * self.SCALING, 0.55 * self.SCALING)) |
|
|
|
|
|
self.ecm = Ecm(self.POSE_ECM[0], p.getQuaternionFromEuler(self.POSE_ECM[1]), |
|
scaling=self.SCALING) |
|
|
|
|
|
def SetPoints(windowname, img): |
|
|
|
points = [] |
|
|
|
def onMouse(event, x, y, flags, param): |
|
if event == cv2.EVENT_LBUTTONDOWN: |
|
cv2.circle(temp_img, (x, y), 10, (102, 217, 239), -1) |
|
points.append([x, y]) |
|
cv2.imshow(windowname, temp_img) |
|
|
|
temp_img = img.copy() |
|
cv2.namedWindow(windowname) |
|
cv2.imshow(windowname, temp_img) |
|
cv2.setMouseCallback(windowname, onMouse) |
|
key = cv2.waitKey(0) |
|
if key == 13: |
|
print('select point: ', points) |
|
del temp_img |
|
cv2.destroyAllWindows() |
|
return points |
|
elif key == 27: |
|
print('quit!') |
|
del temp_img |
|
cv2.destroyAllWindows() |
|
return |
|
else: |
|
|
|
print('retry') |
|
return SetPoints(windowname, img) |
|
|
|
def resize_with_pad(image, target_width, target_height): |
|
|
|
|
|
|
|
|
|
height, width = image.shape[:2] |
|
scale = min(target_width / width, target_height / height) |
|
|
|
|
|
resized_image = cv2.resize(image, None, fx=scale, fy=scale, interpolation=cv2.INTER_AREA) |
|
|
|
|
|
pad_height = target_height - resized_image.shape[0] |
|
pad_width = target_width - resized_image.shape[1] |
|
|
|
|
|
padded_image = cv2.copyMakeBorder(resized_image, 0, pad_height, 0, pad_width, cv2.BORDER_CONSTANT, value=[154,149,142 ]) |
|
|
|
return padded_image |
|
|
|
def crop_img(img): |
|
crop_img = img[:,100: ] |
|
crop_img = crop_img[:,: -100] |
|
|
|
crop_img=cv2.resize(crop_img ,(256,256)) |
|
return crop_img |
|
|
|
|
|
class VideoCapture: |
|
|
|
def __init__(self, name): |
|
self.cap = cv2.VideoCapture(name) |
|
video_name='test_record/{}.mp4'.format(name.split('/')[-1]) |
|
self.output_video = cv2.VideoWriter(video_name, cv2.VideoWriter_fourcc(*'mp4v'), 30, (800, 600)) |
|
|
|
self.q = queue.Queue() |
|
t = threading.Thread(target=self._reader) |
|
t.daemon = True |
|
t.start() |
|
|
|
|
|
|
|
def _reader(self): |
|
while True: |
|
ret, frame = self.cap.read() |
|
if not ret: |
|
break |
|
self.output_video.write(frame) |
|
if not self.q.empty(): |
|
try: |
|
self.q.get_nowait() |
|
except queue.Empty: |
|
pass |
|
self.q.put(frame) |
|
|
|
def read(self): |
|
return self.q.get() |
|
|
|
def release(self): |
|
|
|
self.cap.release() |
|
self.output_video.release() |
|
|
|
|
|
def transf_DH_modified(alpha, a, theta, d): |
|
trnsf = np.array([[math.cos(theta), -math.sin(theta), 0., a], |
|
[math.sin(theta) * math.cos(alpha), math.cos(theta) * math.cos(alpha), -math.sin(alpha), -d * math.sin(alpha)], |
|
[math.sin(theta) * math.sin(alpha), math.cos(theta) * math.sin(alpha), math.cos(alpha), d * math.cos(alpha)], |
|
[0., 0., 0., 1.]]) |
|
return trnsf |
|
|
|
|
|
|
|
basePSM_T_cam =np.array([[-0.89330132, 0.3482998 , -0.28407746, -0.0712333 ], |
|
[ 0.44895017, 0.72151095, -0.52712968, 0.08994234], |
|
[ 0.02136583, -0.59842226, -0.80089594, -0.06478026], |
|
[ 0. , 0. , 0. , 1. ]]) |
|
|
|
|
|
|
|
cam_T_basePSM =np.array([[-0.89330132, 0.44895017, 0.02136583, -0.10262834], |
|
[ 0.3482998 , 0.72151095, -0.59842226, -0.07884979], |
|
[-0.28407746, -0.52712968, -0.80089594, -0.02470674], |
|
[ 0. , 0. , 0. , 1. ]]) |
|
|
|
|
|
class VisPlayer(nn.Module): |
|
def __init__(self): |
|
super().__init__() |
|
|
|
self.device='cuda:0' |
|
|
|
|
|
self._init_rcm() |
|
self.img_size=(320,240) |
|
self.scaling=1. |
|
self.calibration_data = { |
|
'baseline': 0.005500, |
|
'focal_length_left': 916.367081, |
|
'focal_length_right': 918.730361 |
|
} |
|
self.threshold=0.013 |
|
|
|
|
|
def _init_rcm(self): |
|
|
|
self.tool_T_tip=np.array([[0.0, 1.0, 0.0, 0.0], |
|
[-1.0, 0.0, 0.0, 0.0], |
|
[0.0, 0.0, 1.0, 0.0], |
|
[0.0, 0.0, 0.0, 1.0]]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
eul=np.array([np.deg2rad(-90), 0., 0.]) |
|
eul= self.get_matrix_from_euler(eul) |
|
self.rcm_init_eul=np.array([-2.94573084 , 0.15808114 , 1.1354972]) |
|
|
|
|
|
self.rcm_init_pos=np.array([ -0.0617016, -0.00715477, -0.0661465]) |
|
|
|
def _load_depth_model(self, checkpoint_path='pretrained_models/sceneflow.pth'): |
|
args=edict() |
|
args.restore_ckpt=checkpoint_path |
|
args.save_numpy=False |
|
args.mixed_precision=False |
|
args.valid_iters=32 |
|
args.hidden_dims=[128]*3 |
|
args.corr_implementation="reg" |
|
args.shared_backbone=False |
|
args.corr_levels=2 |
|
args.corr_radius=4 |
|
args.n_downsample=2 |
|
args.slow_fast_gru=False |
|
args.n_gru_layers=3 |
|
args.max_disp=192 |
|
|
|
self.depth_model = torch.nn.DataParallel(IGEVStereo(args), device_ids=[0]) |
|
|
|
self.depth_model.load_state_dict(torch.load(args.restore_ckpt)) |
|
|
|
self.depth_model = self.depth_model.module |
|
self.depth_model.to("cuda") |
|
self.depth_model.eval() |
|
|
|
|
|
def _load_policy_model(self, vmodel_file, filepath='./pretrained_models/state_dict.pt'): |
|
with open('rl/configs/agent/ddpg.yaml',"r") as f: |
|
agent_params=yaml.load(f.read(),Loader=yaml.FullLoader) |
|
agent_params=edict(agent_params) |
|
env_params = edict( |
|
obs=19, |
|
achieved_goal=3, |
|
goal=3, |
|
act=7, |
|
max_timesteps=10, |
|
max_action=1, |
|
act_rand_sampler=None, |
|
) |
|
|
|
|
|
self.agent=DDPG(env_params=env_params,agent_cfg=agent_params) |
|
checkpt_path=filepath |
|
checkpt = torch.load(checkpt_path, map_location=self.device) |
|
self.agent.load_state_dict(checkpt) |
|
|
|
|
|
|
|
|
|
self.agent.g_norm.std=self.agent.g_norm_v.numpy() |
|
self.agent.g_norm.mean=self.agent.g_norm_mean.numpy() |
|
self.agent.o_norm.std=self.agent.o_norm_v.numpy() |
|
self.agent.o_norm.mean=self.agent.o_norm_mean.numpy() |
|
|
|
|
|
|
|
''' |
|
|
|
self.agent.depth_norm.std=self.agent.d_norm_v.numpy() |
|
self.agent.depth_norm.mean=self.agent.d_norm_mean.numpy() |
|
s |
|
#print(self.agent.g_norm_v) |
|
''' |
|
self.agent.cuda() |
|
self.agent.eval() |
|
|
|
opts.device='cuda:0' |
|
self.v_model=vismodel(opts) |
|
ckpt=torch.load(vmodel_file, map_location=opts.device) |
|
self.v_model.load_state_dict(ckpt['state_dict']) |
|
self.v_model.to(opts.device) |
|
self.v_model.eval() |
|
|
|
def convert_disparity_to_depth(self, disparity, baseline, focal_length): |
|
depth = baseline * focal_length/ disparity |
|
return depth |
|
|
|
|
|
def _get_depth(self, limg, rimg): |
|
|
|
''' |
|
img = np.array(Image.open(imfile)).astype(np.uint8) |
|
img = torch.from_numpy(img).permute(2, 0, 1).float() |
|
return img[None].to(DEVICE) |
|
''' |
|
limg=torch.from_numpy(limg).permute(2, 0, 1).float().to(self.device).unsqueeze(0) |
|
rimg=torch.from_numpy(rimg).permute(2, 0, 1).float().to(self.device).unsqueeze(0) |
|
|
|
with torch.no_grad(): |
|
|
|
padder = InputPadder(limg.shape, divis_by=32) |
|
image1, image2 = padder.pad(limg, rimg) |
|
disp = self.depth_model(image1, image2, iters=32, test_mode=True) |
|
disp = disp.cpu().numpy() |
|
|
|
disp = padder.unpad(disp).squeeze() |
|
depth_map = self.convert_disparity_to_depth(disp, self.calibration_data['baseline'], self.calibration_data['focal_length_left']) |
|
|
|
return depth_map |
|
|
|
def _load_fastsam(self, model_path="./FastSAM/weights/FastSAM-x.pt"): |
|
|
|
self.seg_model=FastSAM(model_path) |
|
|
|
|
|
def _seg_with_fastsam(self, input, object_point): |
|
point_prompt=str([object_point,[200,200]]) |
|
point_prompt = ast.literal_eval(point_prompt) |
|
point_label = ast.literal_eval("[1,0]") |
|
everything_results = self.seg_model( |
|
input, |
|
device=self.device, |
|
retina_masks=True, |
|
imgsz=608, |
|
conf=0.25, |
|
iou=0.7 |
|
) |
|
|
|
prompt_process = FastSAMPrompt(input, everything_results, device=self.device) |
|
ann = prompt_process.point_prompt( |
|
points=point_prompt, pointlabel=point_label |
|
) |
|
|
|
return ann[0] |
|
|
|
def _seg_with_red(self, grid_RGB): |
|
|
|
grid_HSV = cv2.cvtColor(grid_RGB, cv2.COLOR_RGB2HSV) |
|
|
|
|
|
lower1 = np.array([0,59,25]) |
|
upper1 = np.array([20,255,255]) |
|
mask1 = cv2.inRange(grid_HSV, lower1, upper1) |
|
|
|
|
|
|
|
|
|
|
|
|
|
mask3 = mask1 |
|
|
|
return mask3 |
|
|
|
def _get_visual_state(self, seg, depth, robot_pos, robot_rot, jaw, goal): |
|
seg_d=np.concatenate([seg.reshape(1, self.img_size[0], self.img_size[1]), \ |
|
depth.reshape(1, self.img_size[0], self.img_size[1])],axis=0) |
|
|
|
inputs=torch.tensor(seg_d).unsqueeze(0).float().to(self.device) |
|
robot_pos=torch.tensor(robot_pos).to(self.device) |
|
robot_rot=torch.tensor(robot_rot).to(self.device) |
|
jaw=torch.tensor(jaw).to(self.device) |
|
goal=torch.tensor(goal).to(self.device) |
|
|
|
with torch.no_grad(): |
|
|
|
v_output=self.agent.v_processor(inputs).squeeze() |
|
|
|
waypoint_pos_rot=v_output[3:] |
|
|
|
return waypoint_pos_rot[:3].cpu().data.numpy().copy(), waypoint_pos_rot[3:].cpu().data.numpy().copy() |
|
|
|
def _get_action(self, seg, depth, robot_pos, robot_rot, ecm_wz, goal): |
|
|
|
''' |
|
input: seg (h,w); depth(h,w); robot_pos 3; robot_rot 3; jaw 1; goal 3 |
|
''' |
|
|
|
|
|
|
|
|
|
|
|
seg=torch.from_numpy(seg).to("cuda:0").float() |
|
depth=torch.from_numpy(depth).to("cuda:0").float() |
|
|
|
robot_pos=torch.tensor(robot_pos).to(self.device) |
|
robot_rot=torch.tensor(robot_rot).to(self.device) |
|
|
|
jaw=torch.tensor(jaw).to(self.device) |
|
goal=torch.tensor(goal).to(self.device) |
|
|
|
with torch.no_grad(): |
|
|
|
v_output=self.v_model.get_obs(seg.unsqueeze(0), depth.unsqueeze(0))[0] |
|
assert v_output.shape == (2,) |
|
|
|
o_new=torch.cat([ |
|
robot_pos, robot_rot, torch.tensor([0.0,0.0]), |
|
v_output, ecm_wz |
|
]) |
|
print('o_new: ',o_new) |
|
o_norm=self.agent.o_norm.normalize(o_new,device=self.device) |
|
|
|
g_norm=self.agent.g_norm.normalize(goal, device=self.device) |
|
|
|
input_tensor=torch.cat((o_norm, g_norm), axis=0).to(torch.float32) |
|
|
|
action = self.agent.actor(input_tensor).cpu().data.numpy().flatten() |
|
return action |
|
|
|
def get_euler_from_matrix(self, mat): |
|
""" |
|
:param mat: rotation matrix (3*3) |
|
:return: rotation in 'xyz' euler |
|
""" |
|
rot = R.from_matrix(mat) |
|
return rot.as_euler('xyz', degrees=False) |
|
|
|
def get_matrix_from_euler(self, ori): |
|
""" |
|
:param ori: rotation in 'xyz' euler |
|
:return: rotation matrix (3*3) |
|
""" |
|
rot = R.from_euler('xyz', ori) |
|
return rot.as_matrix() |
|
|
|
def wrap_angle(self, theta): |
|
return (theta + np.pi) % (2 * np.pi) - np.pi |
|
|
|
def convert_pos(self,pos,matrix): |
|
''' |
|
input: ecm pose matrix 4x4 |
|
output rcm pose matrix 4x4 |
|
''' |
|
return np.matmul(matrix[:3,:3],pos)+matrix[:3,3] |
|
|
|
|
|
|
|
|
|
|
|
def convert_rot(self, euler_angles, matrix): |
|
|
|
|
|
roll, pitch, yaw = euler_angles |
|
R_x = np.array([[1, 0, 0], [0, np.cos(roll), -np.sin(roll)], [0, np.sin(roll), np.cos(roll)]]) |
|
R_y = np.array([[np.cos(pitch), 0, np.sin(pitch)], [0, 1, 0], [-np.sin(pitch), 0, np.cos(pitch)]]) |
|
R_z = np.array([[np.cos(yaw), -np.sin(yaw), 0], [np.sin(yaw), np.cos(yaw), 0], [0, 0, 1]]) |
|
rotation_matrix = np.matmul(R_z, np.matmul(R_y, R_x)) |
|
|
|
|
|
extrinsic_matrix_inv = np.linalg.inv(matrix) |
|
|
|
|
|
rotation_matrix_inv = extrinsic_matrix_inv[:3, :3] |
|
|
|
|
|
position_rotated = np.matmul(rotation_matrix_inv, rotation_matrix) |
|
|
|
return position_rotated |
|
|
|
def get_bPSM_T_j6(self, joint_value): |
|
LRcc = 0.4318 |
|
LTool = 0.4162 |
|
LPitch2Yaw = 0.0091 |
|
|
|
base_T_j1 = transf_DH_modified( np.pi*0.5, 0. , joint_value[0] + np.pi*0.5 , 0. ) |
|
j1_T_j2 = transf_DH_modified(-np.pi*0.5, 0. , joint_value[1] - np.pi*0.5 , 0. ) |
|
j2_T_j3 = transf_DH_modified( np.pi*0.5, 0. , 0.0 , joint_value[2]-LRcc ) |
|
j3_T_j4 = transf_DH_modified( 0. , 0. , joint_value[3] , LTool ) |
|
j4_T_j5 = transf_DH_modified(-np.pi*0.5, 0. , joint_value[4] - np.pi*0.5 , 0. ) |
|
j5_T_j6 = transf_DH_modified(-np.pi*0.5 , LPitch2Yaw , joint_value[5] - np.pi*0.5 , 0. ) |
|
|
|
j6_T_j6f = np.array([[ 0.0, -1.0, 0.0, 0.0], |
|
[ 0.0, 0.0, 1.0, 0.0], |
|
[-1.0, 0.0, 0.0, 0.0], |
|
[ 0.0, 0.0, 0.0, 1.0]]) |
|
|
|
bPSM_T_j2 = np.matmul(base_T_j1, j1_T_j2) |
|
bPSM_T_j3 = np.matmul(bPSM_T_j2, j2_T_j3) |
|
bPSM_T_j4 = np.matmul(bPSM_T_j3, j3_T_j4) |
|
bPSM_T_j5 = np.matmul(bPSM_T_j4, j4_T_j5) |
|
bPSM_T_j6 = np.matmul(bPSM_T_j5, j5_T_j6) |
|
bPSM_T_j6f = np.matmul(bPSM_T_j6, j6_T_j6f) |
|
return bPSM_T_j6f |
|
|
|
def rcm2tip(self, rcm_action): |
|
return np.matmul(rcm_action, self.tool_T_tip) |
|
|
|
def _set_action(self, action, robot_pos, rot): |
|
|
|
''' |
|
robot_pos in cam coodinate |
|
robot_rot in rcm; matrix |
|
''' |
|
action[:3] *= 0.01 * self.scaling |
|
|
|
|
|
|
|
ecm_pos=robot_pos+action[:3] |
|
print('aft robot pos tip ecm: ',ecm_pos) |
|
psm_pose=np.zeros((4,4)) |
|
|
|
psm_pose[3,3]=1 |
|
psm_pose[:3,:3]=rot |
|
|
|
rcm_pos=self.convert_pos(ecm_pos,basePSM_T_cam) |
|
print('aft robot pos tip rcm: ',rcm_pos) |
|
psm_pose[:3,3]=rcm_pos |
|
|
|
|
|
|
|
|
|
return psm_pose |
|
|
|
|
|
''' |
|
def _set_action(self, action, rot, robot_pos): |
|
""" |
|
delta_position (6), delta_theta (1) and open/close the gripper (1) |
|
in the ecm coordinate system |
|
input: robot_rot, robot_pos in ecm |
|
""" |
|
# TODO: need to ensure to use this scaling |
|
action[:3] *= 0.01 * self.scaling # position, limit maximum change in position |
|
#ecm_pose=self.rcm2ecm(psm_pose) |
|
#ecm_pos=self.convert_pos(robot_pos, cam_T_basePSM) |
|
ecm_pos=robot_pos+action[:3] |
|
#ecm_pos[2]=ecm_pos[2]-2*action[2] |
|
|
|
#ecm_pose[:3,3]=ecm_pose[:3,3]+action[:3] |
|
#rot=self.get_euler_from_matrix(ecm_pose[:3,:3]) |
|
#rot=self.convert_rot(robot_rot, cam_T_basePSM) |
|
#rot=self.get_euler_from_matrix(robot_rot) |
|
|
|
#action[3:6] *= np.deg2rad(20) |
|
#rot =(self.wrap_angle(rot[0]+action[3]),self.wrap_angle(rot[1]+action[4]),self.wrap_angle(rot[2]+action[5])) |
|
#rcm_action_matrix=self.convert_rot(rot,basePSM_T_cam) # ecm2rcm rotation |
|
|
|
rcm_pos=self.convert_pos(ecm_pos,basePSM_T_cam) # ecm2rcm position |
|
|
|
rot_matrix=self.get_matrix_from_euler(rot) |
|
#rcm_action_matrix=self.convert_rot(ecm_pose) #self.ecm2rcm(ecm_pose) |
|
|
|
#rcm_action_eul=self.get_euler_from_matrix(rcm_action_matrix) |
|
#rcm_action_eul=(self.rcm_init_eul[0], self.rcm_init_eul[1],rcm_action_eul[2]) |
|
#rcm_action_matrix=self.get_matrix_from_euler(rcm_action_eul) |
|
|
|
psm_pose=np.zeros((4,4)) |
|
psm_pose[3,3]=1 |
|
psm_pose[:3,:3]=rot_matrix |
|
psm_pose[:3,3]=rcm_pos |
|
|
|
# TODO: use get_bPSM_T_j6 function |
|
rcm_action=self.rcm2tip(psm_pose) |
|
rcm_action=psm_pose |
|
|
|
return rcm_action |
|
''' |
|
def convert_point_to_camera_axis(self, x, y, depth, intrinsics_matrix): |
|
''' |
|
# Example usage |
|
x = 100 |
|
y = 200 |
|
depth = 5.0 |
|
intrinsics_matrix = np.array([[500, 0, 320], [0, 500, 240], [0, 0, 1]]) |
|
|
|
xc, yc, zc = convert_point_to_camera_axis(x, y, depth, intrinsics_matrix) |
|
print(f"Camera axis coordinates: xc={xc}, yc={yc}, zc={zc}") |
|
''' |
|
|
|
fx, fy, cx, cy = intrinsics_matrix[0, 0], intrinsics_matrix[1, 1], intrinsics_matrix[0, 2], intrinsics_matrix[1, 2] |
|
|
|
|
|
xn = (x - cx) / fx |
|
yn = (y - cy) / fy |
|
|
|
|
|
xc = xn * depth |
|
yc = yn * depth |
|
zc = depth |
|
|
|
return np.array([xc, yc, zc]) |
|
|
|
def goal_distance(self,goal_a, goal_b): |
|
assert goal_a.shape==goal_b.shape |
|
return np.linalg.norm(goal_a-goal_b,axis=-1) |
|
|
|
def is_success(self, curr_pos, desired_goal): |
|
d=self.goal_distance(curr_pos, desired_goal) |
|
d3=np.abs(curr_pos[2]-desired_goal[2]) |
|
print('distance: ',d) |
|
print('distance z-axis: ',d3) |
|
if d3<0.003: |
|
return True |
|
return (d<self.threshold and d3<0.003).astype(np.float32) |
|
|
|
def init_run(self): |
|
intrinsics_matrix=np.array([[916.367081, 1.849829, 381.430393], [0.000000, 918.730361, 322.704614], [0.000000, 0.000000, 1.000000]]) |
|
self.ecm = dvrk.ecm('ECM') |
|
self._finished=False |
|
|
|
|
|
self._load_depth_model() |
|
|
|
self._load_policy_model(vmodel_file='/home/kj/kj_demo/active/pretrained_models/best_model.pt',filepath='/home/kj/kj_demo/active/pretrained_models/s80_DDPG_demo0_traj_best_kj.pt') |
|
self._load_fastsam() |
|
|
|
self.cap_0=VideoCapture("/dev/video8") |
|
self.cap_2=VideoCapture("/dev/video6") |
|
|
|
|
|
for i in range(10): |
|
frame1=self.cap_0.read() |
|
frame2=self.cap_2.read() |
|
|
|
self.fs = cv2.FileStorage("/home/kj/ar/EndoscopeCalibration/calibration_new.yaml", cv2.FILE_STORAGE_READ) |
|
|
|
frame1, frame2 = my_rectify(frame1, frame2, self.fs) |
|
|
|
|
|
frame1=cv2.resize(frame1, self.img_size) |
|
frame2=cv2.resize(frame2, self.img_size) |
|
|
|
point=SetPoints("Goal Selection", frame1) |
|
|
|
self.object_point=point[0] |
|
|
|
|
|
frame1=cv2.cvtColor(frame1, cv2.COLOR_BGR2RGB) |
|
frame2=cv2.cvtColor(frame2, cv2.COLOR_BGR2RGB) |
|
|
|
goal= np.array([0.0,0.0,0.0]) |
|
|
|
self.goal=goal |
|
|
|
|
|
self.count=0 |
|
|
|
self.sim_ecm = Sim_ECM('human') |
|
self.sim_ecm.reset_env() |
|
|
|
current_dvrk_jp = self.ecm.measured_jp() |
|
self.sim_ecm.ecm.reset_joint(np.array(current_dvrk_jp)) |
|
|
|
def run_step(self): |
|
if self._finished: |
|
return True |
|
|
|
|
|
self.count+=1 |
|
print("--------step {}----------".format(self.count)) |
|
|
|
|
|
frame1=self.cap_0.read() |
|
frame2=self.cap_2.read() |
|
|
|
|
|
|
|
frame1, frame2 = my_rectify(frame1, frame2, self.fs) |
|
|
|
frame1=cv2.resize(frame1, self.img_size) |
|
frame2=cv2.resize(frame2, self.img_size) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
frame1=cv2.cvtColor(frame1, cv2.COLOR_BGR2RGB) |
|
frame2=cv2.cvtColor(frame2, cv2.COLOR_BGR2RGB) |
|
|
|
|
|
plt.imsave( 'test_record/frame1_{}.png'.format(self.count),frame1) |
|
plt.imsave( 'test_record/frame2_{}.png'.format(self.count),frame2) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
depth=self._get_depth(frame1, frame2)+0.09 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
plt.imsave('test_record/pred_depth_{}.png'.format(self.count),depth) |
|
|
|
seg=self._seg_with_fastsam(frame1,self.object_point) |
|
|
|
|
|
seg=np.array(seg==True).astype(int) |
|
|
|
np.save('test_record/seg.npy',seg) |
|
plt.imsave('test_record/seg_{}.png'.format(self.count),seg) |
|
|
|
print("finish seg") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
robot_pose=self.p.measured_cp() |
|
robot_pos=robot_pose.p |
|
print("pre action pos rcm: ",robot_pos) |
|
robot_pos=np.array([robot_pos[0],robot_pos[1],robot_pos[2]]) |
|
|
|
pre_robot_pos=np.array([robot_pos[0],robot_pos[1],robot_pos[2]]) |
|
|
|
|
|
transform_2=robot_pose.M |
|
np_m=np.array([[transform_2[0,0], transform_2[0,1], transform_2[0,2]], |
|
[transform_2[1,0], transform_2[1,1], transform_2[1,2]], |
|
[transform_2[2,0], transform_2[2,1], transform_2[2,2]]]) |
|
|
|
tip_ecm_pose=np.zeros((4,4)) |
|
|
|
tip_ecm_pose[3,3]=1 |
|
tip_ecm_pose[:3,:3]=np_m |
|
tip_ecm_pose[:3,3]=robot_pos |
|
|
|
tip_ecm_pose=self.rcm2tip(tip_ecm_pose) |
|
|
|
|
|
np_m=tip_ecm_pose[:3,:3] |
|
robot_pos=tip_ecm_pose[:3,3] |
|
|
|
|
|
|
|
|
|
robot_rot=self.get_euler_from_matrix(np_m) |
|
|
|
|
|
|
|
print("pre action pos tip ecm: ",robot_pos) |
|
|
|
|
|
action=self._get_action(seg, depth ,robot_pos, robot_rot, self.goal) |
|
print("finish get action") |
|
print("action: ",action) |
|
|
|
|
|
|
|
|
|
|
|
state=self._set_action(action, robot_pos, np_m) |
|
print("finish set action") |
|
print("state: ",state) |
|
|
|
|
|
|
|
|
|
PSM2_rotate = PyKDL.Rotation(state[0,0], state[0,1], state[0,2], |
|
state[1,0], state[1,1], state[1,2], |
|
state[2,0], state[2,1], state[2,2]) |
|
|
|
PSM2_pose = PyKDL.Vector(state[0,-1], state[1,-1], state[2,-1]) |
|
curr_robot_pos=np.array([state[0,-1], state[1,-1], state[2,-1]]) |
|
|
|
move_goal = PyKDL.Frame(PSM2_rotate, PSM2_pose) |
|
move_goal=PyKDL.Frame(robot_pose.M,PSM2_pose) |
|
|
|
|
|
|
|
self.p.move_cp(move_goal).wait() |
|
print("finish move") |
|
print('is sccess: ',self.is_success(curr_robot_pos, self.rcm_goal)) |
|
if self.is_success(curr_robot_pos, self.rcm_goal) or self.count>9: |
|
|
|
self._finished=True |
|
|
|
return self._finished |
|
''' |
|
if action[3] < 0: |
|
# close jaw |
|
p.jaw.move_jp(np.array(-0.5)).wait() |
|
else: |
|
# open jaw |
|
p.jaw.move_jp(np.array(0.5)).wait() |
|
''' |
|
|
|
|
|
|
|
def record_video(self, out1, out2): |
|
for i in range(10): |
|
frame1=self.cap_0.read() |
|
frame2=self.cap_2.read() |
|
out1.write(frame1) |
|
out2.write(frame2) |
|
return |
|
|
|
|
|
import threading |
|
|
|
if __name__=="__main__": |
|
|
|
|
|
player=VisPlayer() |
|
player.init_run() |
|
finished=False |
|
while not finished: |
|
|
|
finished=player.run_step() |
|
|
|
player.cap_0.release() |
|
player.cap_2.release() |
|
|