#! /usr/bin/env python
'''
Move follow.
'''
from math import sqrt
import sys, os, time

from numpy.lib.function_base import average
src_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../..')
sys.path.append(src_dir)
import rospy
import ros_np_multiarray as ros_nm
from visualization_msgs.msg import Marker, MarkerArray
import tf2_ros
############# perception module #################
sys.path.insert(0, '{}/perception/vision_perception/src/'.format(src_dir))
# from perception.vision_perception.src.perception_client import Perception_Client
from perception_client import Perception_Client
sys.path.pop(0)
############# planning module ###################
from planning.scripts.action_client import *
from utilities.tools.vis_utils import *
from utilities.tools.tf_utils import *



def get_pos_from_RT(RT, frame_name, tgt, tf):
    '''
    args: 
        RT: multiarray
    return:
        pos: list
    '''
    temp_pose = ros_nm.to_numpy_f32(RT)
    position, orientation = get_pos_and_orient_list(temp_pose)
    pose_stamped = get_posestamped_msg(position, orientation, frame_name)
    pose_stamped = transform_posestamped_to_tgt(pose_stamped, tgt, tf)
    pos = [pose_stamped.pose.position.x,  pose_stamped.pose.position.y, pose_stamped.pose.position.z]

    return pos

if __name__ == '__main__':

    vis = True
    move_flag = True
    robot_prefix = 'j2n6s300_'
    camera_frame = 'camera_color_optical_frame'
    src_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../..')
    rospy.init_node('move_follow_node')
    initial_joint_pose(robot_prefix)

    # vis topic
    obj_pose_topic = rospy.Publisher('/pour_water/multi_obj_pose', PoseArray, queue_size=1)
    obj_topic = rospy.Publisher('/pour_water/multi_obj_marker', MarkerArray, queue_size=1)
    gripper_grasp_pose_topic = rospy.Publisher('/pour_water/grasp_pose_marker', Marker, queue_size=1)
    gripper_pour_pose_topic = rospy.Publisher('/pour_water/pour_pose_marker', Marker, queue_size=1)

    last_average_pos = [0,0,0]

    while True:
        tfBuffer = tf2_ros.Buffer()
        tf_listenser = tf2_ros.TransformListener(tfBuffer)
        start_time = time.time()
        perception_client = Perception_Client()
        perception_results = perception_client.client() # msg type: SingleObjPose[]
        rospy.loginfo('2D+6D use time : {}'.format(time.time() - start_time))
        if perception_results is not None:
            
            bowl_idx = -1
            mug_idx = -1
            
            # get index of mug and bowl instances
            for i in range(len(perception_results.obj_pose_list)):
                perception_result = perception_results.obj_pose_list[i]
                obj_cate = perception_result.category_name
                if obj_cate == 'mug':
                    mug_idx = i
                elif obj_cate == 'bowl':
                    bowl_idx = i
                else:
                    continue
            
            if mug_idx == -1 or bowl_idx == -1:
                rospy.loginfo('The mug or bowl has been not detected!')
                continue
            
            # vis in rviz
            if vis:
                obj_pose_list = []
                marker_list = []
                for i in [mug_idx, bowl_idx]:
                    perception_result = perception_results.obj_pose_list[i] # msg type: SingObjPose
                    obj_pose = perception_result.RT
                    obj_pose = ros_nm.to_numpy_f32(obj_pose)
                    obj_cate = perception_result.category_name
                    rospy.loginfo('Detect Obj Category:  {}'.format(obj_cate))
                    
                    obj_position, obj_orientation = get_pos_and_orient_list(obj_pose)
                    obj_pose_list.append(get_pose_msg(obj_position, obj_orientation))
                    mesh_resource = 'file://{}/perception/vision_grasp_est/src/lib/shape_encoder/recon_meshes/{}_mesh.ply'.format(src_path, obj_cate)
                    scale = perception_result.scale * 0.5
                    marker_list.append(get_marker_msg(obj_position, obj_orientation, scale, i, mesh_resource, camera_frame, color=[0,0,1,1]))
                
                obj_pose_array_msg = get_posearray_msg(obj_pose_list, camera_frame)
                obj_pose_topic.publish(obj_pose_array_msg)
                obj_maker_array_msg = get_markerarray_msg(marker_list)
                obj_topic.publish(obj_maker_array_msg)
            
            mug_result = perception_results.obj_pose_list[mug_idx]
            bowl_result = perception_results.obj_pose_list[bowl_idx]

            # move #
            if move_flag:
                mug_pos = get_pos_from_RT(mug_result.RT, camera_frame, robot_prefix+'link_base', tfBuffer)
                bowl_pos = get_pos_from_RT(bowl_result.RT, camera_frame, robot_prefix+'link_base', tfBuffer)
                average_pos = [((mug_pos[i] + bowl_pos[i]) / 2) for i in range(3)]
                average_pos[1] -= 0.25
                average_pos[2] += 0.20

                delta = [abs(average_pos[i] - last_average_pos[i]) for i in range(3)]
                delta_norm = sqrt(delta[0]**2+delta[1]**2+delta[2]**2)
                if delta_norm > 0.02:
                    try:
                        result = arm_action(average_pos+[0.208926,0.924167,0.319505,-0.0134641], robot_prefix, relative=False)
                        pass
                    except rospy.ROSInterruptException:
                        print('program interrupted before completion')
                last_average_pos = average_pos
            else:
                rospy.loginfo('WARNING! No Grasp estimated!')
                continue
        else:
            rospy.loginfo('WARNING! No Object detected! Please check again!')
            continue
