import rclpy
from nav2_simple_commander.robot_navigator import BasicNavigator,TaskResult
from geometry_msgs.msg import PoseStamped
from rclpy.node import Node
import rclpy.time
from tf2_ros import TransformListener,Buffer
from tf_transformations import euler_from_quaternion,quaternion_from_euler
import math
from autopartol_interfaces.srv import SpeachText
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
import cv2

class Partol_Node(BasicNavigator):
    def __init__(self,name):
        super().__init__(name)
        #声明相关参数
        self.declare_parameter('initial_point',[0.0, 0.0, 0.0])
        self.declare_parameter('target_point',[0.0, 0.0, 0.0, 1.0, 1.0, 1.57])
        self.declare_parameter('img_save_path','')
        self.initial_point_=self.get_parameter('initial_point').value
        self.target_point_=self.get_parameter('target_point').value
        self.img_save_path_=self.get_parameter('img_save_path').value
        self.buffer=Buffer()
        self.lilstener=TransformListener(self.buffer,self)
        self.speaker_client_=self.create_client(SpeachText,"speach_text")
        self.last_img=None
        self.img_sub_=self.create_subscription(Image,"/camera_sensor/image_raw",self.img_callback,1)
    
    def img_callback(self,masg):
        self.last_img=masg
    
    def save_img(self):
        self.cv_brige=CvBridge()
        pose=self.get_current_pose()
        cv_image=self.cv_brige.imgmsg_to_cv2(self.last_img)
        cv2.imwrite(
            f'{self.img_save_path_}img_{pose.translation.x:.2f}_{pose.translation.y:.2f}.png',
            cv_image)


    def get_pose_by_xyyaw(self,x,y,yaw):

        """        
      #return PoseStamped 对象 
        """
        pose=PoseStamped()
        pose.header.frame_id='map'
        pose.pose.position.x=x
        pose.pose.position.y=y
        #返回的顺序是xyzw
        quat=quaternion_from_euler(0,0,yaw)
        pose.pose.orientation.x=quat[0]
        pose.pose.orientation.y=quat[1]
        pose.pose.orientation.z=quat[2]
        pose.pose.orientation.w=quat[3]

        return pose



    def init_robot_pose(self):
    
        """
        #初始化机器人的位置和姿态 
        """
        self.initial_point_=self.get_parameter('initial_point').value
        init_pose=self.get_pose_by_xyyaw(self.initial_point_[0],
                               self.initial_point_[1],self.initial_point_[2])
        self.setInitialPose(init_pose)
        self.waitUntilNav2Active()#等待导航可用




    def get_target_point(self):
        """
        通过参数值获取目标点的集合
        """
        points=[]
        self.target_point_=self.get_parameter('target_point').value
        for index in range(int(len(self.target_point_)/3)):
            x=self.target_point_[index*3]
            y=self.target_point_[index*3+1]
            yaw=self.target_point_[index*3+2]
            points.append([x,y,yaw])
            self.get_logger().info(f'获取到目标点的位置{index}->{x},{y},{yaw}')
        return points

    def nav_to_pose(self,target_point):
        """"导航到点的函数"""
        self.goToPose(target_point)
        while not self.isTaskComplete():
            feedback=self.getFeedback()
            self.get_logger().info(f'剩余距离{feedback.distance_remaining}')
        result=self.getResult()
        self.get_logger().info(f'导航结果：{result}')


    
    def get_current_pose(self):

        """"获取当前的位置姿态"""

        while rclpy.ok():
            try:
                result=self.buffer.lookup_transform("map","base_footprint",
                                rclpy.time.Time(seconds=0.0),rclpy.time.Duration(seconds=1.0))
                tranform=result.transform
                self.get_logger().info(f'平移：{tranform.translation}')
                self.get_logger().info(f'旋转：{tranform.rotation}')
                rotation_euler=euler_from_quaternion(
                    [
                        tranform.rotation.x,
                        tranform.rotation.y,
                        tranform.rotation.z,
                        tranform.rotation.w,
                    ])
                self.get_logger().info(f'旋转RPY:{rotation_euler}')
                return tranform
            except Exception as e:
                self.get_logger().info(f'获取旋转矩阵失败的原因{str(e)}')



    def speach_text(self,text):
        while not self.speaker_client_.wait_for_service(timeout_sec=1.0):
            self.get_logger().info('语音合成服务还未上线，等待中...')

        request=SpeachText.Request()
        request.text=text
        future=self.speaker_client_.call_async(request)
        rclpy.spin_until_future_complete(self,future)
        if future.result() is not Node:
            response=future.result()
            if response==True:
                self.get_logger().info(f'请求语言合成{text}成功')
            else:
                self.get_logger().info(f'请求语言合成{text}失败')
        else:
            self.get_logger().info('语音合成服务响应失败')

def main(args=None):
    rclpy.init(args=args)
    node=Partol_Node("partol_node")
    node.speach_text('正在初始化位置')
    node.init_robot_pose()
    node.speach_text('初始化位置完成')
    while rclpy.ok():
        points=node.get_target_point()
        for point in points:
            x,y,yaw=point[0],point[1],point[2]
            target_pose=node.get_pose_by_xyyaw(x,y,yaw)
            node.speach_text(f'正在导航前往{x},{y}目标点')
            node.nav_to_pose(target_pose)
            node.speach_text(f'已经到达{x},{y}目标点，正在准备采集图像')
            node.save_img()
            node.speach_text(f'已经保存图像')
    # node.destroy_node()
    rclpy.shutdown()