import rospy
from low_state_listener import LowStateListener
from config import Config
from publish_aliengo_real_state import publisher
import numpy as np
import time
from  onnx_model import onnx_model
from action_clip import extract_joint_limits
import json

def get_axis_params(value, axis_idx, x_value=0., dtype=np.float64, n_dims=3):
    """construct arguments to `Vec` according to axis index.
    """
    zs = np.zeros((n_dims,))
    assert axis_idx < n_dims, "the axis dim should be within the vector dimensions"
    zs[axis_idx] = 1.
    params = np.where(zs == 1., value, zs)
    params[0] = x_value
    return np.array(list(params.astype(dtype)))

import numpy as np

def quat_rotate(q, v):
    shape = q.shape
    q_w = q[:, -1]
    q_vec = q[:, :3]
    
    # 对应a的部分
    a = v * (2.0 * q_w ** 2 - 1.0)[:, np.newaxis]
    
    # 对应b的部分，使用np.cross进行叉乘
    b = np.cross(q_vec, v, axis=-1) * (q_w[:, np.newaxis] * 2.0)
    
    # 对应c的部分，这里利用np.outer和np.sum来模拟torch.bmm的行为
    dot_product = np.sum(q_vec * v, axis=-1)[:, np.newaxis]
    c = q_vec * dot_product * 2.0
    
    return a + b + c

def quat_rotate_inverse(q, v):
    shape = q.shape
    q_w = q[:, -1]
    q_vec = q[:, :3]
    
    # 对应a的部分
    a = v * (2.0 * q_w ** 2 - 1.0)[:, np.newaxis]
    
    # 对应b的部分，使用np.cross进行叉乘
    b = np.cross(q_vec, v, axis=-1) * (q_w[:, np.newaxis] * 2.0)
    
    # 对应c的部分，这里使用np.outer代替torch.bmm
    c = q_vec * np.sum(q_vec * v, axis=-1)[:, np.newaxis] * 2.0
    
    
    return a - b + c

def clip(value, min_val, max_val):
    return max(min_val, min(value, max_val))


def write2txt(file_name,data_list):
    # 使用 'a' 模式追加内容到文件，如果文件不存在则创建
    with open(file_name, 'a') as file:
        # 将列表转换为字符串，元素间用空格分隔
        line = ' '.join(str(x) for x in data_list)
        # 写入文件，每个列表后面添加一个换行符
        file.write(line + '\n')




import rosbag
from unitree_legged_msgs.msg import LowCmd
from unitree_legged_msgs.msg import LowState
class  Runner:
    def __init__(self) -> None:
        # self.init_config()
        self.init_buffer()
        rospy.init_node('motor_command_publisher',anonymous=True)
        self.low_state_listener=LowStateListener()
        self.position_publisher=publisher()
        # URDF文件路径
        # urdf_file_path = "/home/xjz/catkin_ws/src/unitree_ros/robots/aliengo_description/urdf/aliengo.urdf"
        # self.position_limits=extract_joint_limits(urdf_file_path,Config.policy_joint_index)
        # 创建一个bag文件
        self.bag = rosbag.Bag('recorded_data.bag', 'w')
        
        # 创建订阅者
        sub_low_state = rospy.Subscriber('/low_state_info', LowState, self.low_state_callback)
        sub_low_cmd_info = rospy.Subscriber('/low_cmd_info', LowCmd, self.low_cmd_info_callback)
        pass

    def low_state_callback(self,data):
        # 将接收到的LowState消息写入bag文件
        self.bag.write('/low_state_info', data, rospy.Time.now())
        pass

    def low_cmd_info_callback(self,data):
        # 将接收到的LowCmdInfo消息写入bag文件
        self.bag.write('/low_cmd_info', data, rospy.Time.now())
        pass

    def init_buffer(self):
            self.commands= Config.commands
            self.default_dof_pos=[ Config.defaultJointAngles[joint_name]  for joint_name,index in Config.policy_joint_index.items()]
            self.default_dof_pos=np.array(self.default_dof_pos)[None,:]            
            self.gravity_vec = np.repeat(get_axis_params(-1., 2)[np.newaxis, :],1,axis=0)
            self.actions = np.zeros((1,12),dtype=np.float32)            
            self.lin_vel_scale=2.0
            self.ang_vel_scale=0.25
            self.dof_pos_scale=1.0
            self.dof_vel_scale=0.05
            self.action_scale=0.5
            self.clip_obs=5.0
            self.action_clip=1.0
            self.onnx_model_path = Config.onnx_model_path
            self.model=onnx_model(self.onnx_model_path)

    def get_jointState(self):
        return self.low_state_listener.get_joint_states()
    
    def get_modelState(self):
        return self.low_state_listener.get_model_states()

    def publish_postion(self,position_dict):
        return  self.position_publisher.publish_topics(position_dict)

    def start_aliengo(self):
        self.set_start_postion()
        # self.stand()

    def  set_start_postion(self):
        # 将开局位置载入机器人
        positions_ditc, velocities_dict=None,None
        while positions_ditc is None or len(positions_ditc)==0:
            positions_ditc, velocities_dict = self.low_state_listener.get_joint_states()
        self.position_publisher.set_position(positions_ditc)        

    def stand(self):
        # 控制机器人站起来
        # joint_command_dict = {}
        # for joint_name,index in Config.joint2MotorIndex.items():
        #     joint_command_dict[joint_name]=Config.defaultJointAngles[joint_name]
        self.position_publisher.publish_topics_smooth(Config.defaultJointAngles,duration=5000)
        # self.position_publisher.publish_topics(Config.defaultJointAngles)

    def get_observation(self):
        root_state=np.array(self.get_modelState())[None, :]
        dof_pos,dof_vel=self.get_jointState()
        dof_pos=[dof_pos[joint_name]  for joint_name,index in Config.policy_joint_index.items()]
        dof_pos=np.array(dof_pos)[None,:]
        dof_vel=[dof_vel[joint_name]  for joint_name,index in Config.policy_joint_index.items()]
        dof_vel=np.array(dof_vel)[None,:]
        obs=self.compute_aliengo_observations(root_state,
                                    self.commands,
                                    dof_pos,
                                    self.default_dof_pos,
                                    dof_vel,
                                    self.gravity_vec,
                                    self.actions,
                                    self.lin_vel_scale,
                                    self.ang_vel_scale,
                                    self.dof_pos_scale,
                                    self.dof_vel_scale)
        obs=np.clip(obs, -self.clip_obs, self.clip_obs).astype(np.float32)
        return obs
    
    def compute_aliengo_observations(self,
                                    root_states,
                                    commands,
                                    dof_pos,
                                    default_dof_pos,
                                    dof_vel,
                                    gravity_vec,
                                    actions,
                                    lin_vel_scale,
                                    ang_vel_scale,
                                    dof_pos_scale,
                                    dof_vel_scale
                                    ):

        # type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, float, float, float, float) -> Tensor
        base_quat = root_states[:, 3:7]
        base_lin_vel = quat_rotate_inverse(base_quat, root_states[:, 7:10]) * lin_vel_scale
        # base_ang_vel = quat_rotate_inverse(base_quat, root_states[:, 10:13]) * ang_vel_scale
        base_ang_vel =  root_states[:, 10:13] * ang_vel_scale
        projected_gravity = quat_rotate(base_quat, gravity_vec)
        # projected_gravity = quat_rotate_inverse(base_quat, gravity_vec)


        dof_pos_scaled = (dof_pos - default_dof_pos) * dof_pos_scale

        commands_scaled = commands*np.array([lin_vel_scale, lin_vel_scale, ang_vel_scale])

        # xjz
        base_lin_vel=base_lin_vel*0
        #

        obs = np.concatenate((base_lin_vel,
                        base_ang_vel,
                        projected_gravity,
                        commands_scaled,
                        dof_pos_scaled,
                        dof_vel*dof_vel_scale,
                        actions
                        ), axis=-1)

        return obs

    def get_action(self,obs):
        action=self.model.infference(obs)
        action=  np.clip(action , -self.action_clip , self.action_clip)
        return action
    
    def take_action(self,action):
        self.actions = action
        targets=np.zeros_like(self.default_dof_pos,dtype=self.default_dof_pos.dtype)
        # targets = (self.action_scale * self.actions + self.default_dof_pos)[0]
        targets[:,(Config.hip_indices-1)]=  Config.hip_scale * self.actions[:,(Config.hip_indices-1)] + self.default_dof_pos[:,(Config.hip_indices-1)]
        targets[:,(Config.thigh_indices-1)]=  Config.thigh_scale * self.actions[:,(Config.thigh_indices-1)] + self.default_dof_pos[:,(Config.thigh_indices-1)]
        targets[:,(Config.calf_indices-1)]=  Config.calf_scale * self.actions[:,(Config.calf_indices-1)] + self.default_dof_pos[:,(Config.calf_indices-1)]

        targets=targets[0]
        targets_angles_dict={}
        for key,index in Config.policy_joint_index.items():
            # targets_angles_dict[key]=   targets[index]
            # 使得 action 符合 urdf 的范围
            # targets_angles_dict[key]=  clip(targets[index],self.position_limits[key].lower,self.position_limits[key].upper)
            targets_angles_dict[key]=  targets[index]

        self.publish_postion(targets_angles_dict)

    def inferrence(self,obs):
        return None

    def work_flow(self):
        # #  将躺下的狗站起来
        self.start_aliengo()
        # self.stand()
        rate = rospy.Rate(Config.frequency)  # 60Hz    
        count=0    
        step=1
        while not rospy.is_shutdown():           

            if count<=100:
                # self.stand()
                count=count+1
                print("count:{}".format(count))
            else:
                obs=self.get_observation()
                action=self.get_action(obs)
                self.take_action(action)
                # write2txt(file_name="obs_data.txt",data_list=obs[0].tolist())
                # write2txt(file_name="action_data.txt",data_list=action[0].tolist())
                # if  step>=2000:
                #     break
                # step=step+1
                pass

            #  control  logic
            # 休眠以维持循环频率
            rate.sleep()
        pass

if __name__=="__main__":
    aliengo_Runner=Runner()
    aliengo_Runner.work_flow()