import sys
sys.path.append("/home/mark/ChenHan/my_project/AMO/")
import os



from typing import Union
import numpy as np
import time
import torch
import types

from unitree_sdk2py.core.channel import ChannelPublisher, ChannelFactoryInitialize
from unitree_sdk2py.core.channel import ChannelSubscriber, ChannelFactoryInitialize
from unitree_sdk2py.idl.default import unitree_hg_msg_dds__LowCmd_, unitree_hg_msg_dds__LowState_
from unitree_sdk2py.idl.default import unitree_go_msg_dds__LowCmd_, unitree_go_msg_dds__LowState_
from unitree_sdk2py.idl.unitree_hg.msg.dds_ import LowCmd_ as LowCmdHG
from unitree_sdk2py.idl.unitree_go.msg.dds_ import LowCmd_ as LowCmdGo
from unitree_sdk2py.idl.unitree_hg.msg.dds_ import LowState_ as LowStateHG
from unitree_sdk2py.idl.unitree_go.msg.dds_ import LowState_ as LowStateGo
from unitree_sdk2py.utils.crc import CRC

from common.command_helper import create_damping_cmd, create_zero_cmd, init_cmd_hg, init_cmd_go, MotorMode
from common.rotation_helper import get_gravity_orientation, transform_imu_data
from common.remote_controller import RemoteController, KeyMap
from config import Config

from collections import deque
import mujoco, mujoco_viewer
import glfw
LEGGED_GYM_ROOT_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))


def quatToEuler(quat):
    # 初始化欧拉角向量 [横滚, 俯仰, 偏航]，quat是一个四元数数组 [w, x, y, z]
    eulerVec = np.zeros(3)
    #  分解四元数分量（MuJoCo的IMU传感器数据使用wxyz顺序）
    qw = quat[0] # 标量部分（实部）
    qx = quat[1] # x轴向量部分（虚部）
    qy = quat[2] # y轴向量部分（虚部）
    qz = quat[3] # z轴向量部分（虚部）
    # 计算横滚角（Roll，X轴旋转）
    # 使用arctan2确保角度范围在[-π, π]之间
    # roll (x-axis rotation)
    sinr_cosp = 2 * (qw * qx + qy * qz) # sin(roll)*cos(pitch)
    cosr_cosp = 1 - 2 * (qx * qx + qy * qy) # cos(roll)*cos(pitch)
    eulerVec[0] = np.arctan2(sinr_cosp, cosr_cosp)

    # pitch (y-axis rotation)
    # 计算俯仰角（Pitch，Y轴旋转）
    # 处理奇异值情况（当俯仰角接近±90度时）
    sinp = 2 * (qw * qy - qz * qx)  # sin(pitch)
    if np.abs(sinp) >= 1:
        # 当sin值超出[-1,1]范围时，取符号对应的90度值
        eulerVec[1] = np.copysign(np.pi / 2, sinp)  # use 90 degrees if out of range
    else:
        eulerVec[1] = np.arcsin(sinp)

    # yaw (z-axis rotation)
    # 计算偏航角（Yaw，Z轴旋转）
    # 使用arctan2确保角度范围正确
    siny_cosp = 2 * (qw * qz + qx * qy) # sin(yaw)*cos(pitch)
    cosy_cosp = 1 - 2 * (qy * qy + qz * qz) # cos(yaw)*cos(pitch)
    eulerVec[2] = np.arctan2(siny_cosp, cosy_cosp)
    
    return eulerVec # 返回弧度制的欧拉角向量

def _key_callback(self, window, key, scancode, action, mods):
    if action != glfw.PRESS:
        return
    if key == glfw.KEY_S:
        self.commands[0] -= 0.05
    elif key == glfw.KEY_W:
        self.commands[0] += 0.05
    elif key == glfw.KEY_A:
        self.commands[1] += 0.1
    elif key == glfw.KEY_D:
        self.commands[1] -= 0.1
    elif key == glfw.KEY_Q:
        self.commands[2] += 0.05
    elif key == glfw.KEY_E:
        self.commands[2] -= 0.05
    elif key == glfw.KEY_Z:
        self.commands[3] += 0.05
    elif key == glfw.KEY_X:
        self.commands[3] -= 0.05
    elif key == glfw.KEY_J:
        self.commands[4] += 0.1
    elif key == glfw.KEY_U:
        self.commands[4] -= 0.1
    elif key == glfw.KEY_K:
        self.commands[5] += 0.05
    elif key == glfw.KEY_I:
        self.commands[5] -= 0.05
    elif key == glfw.KEY_L:
        self.commands[6] += 0.05
    elif key == glfw.KEY_O:
        self.commands[6] -= 0.1
    elif key == glfw.KEY_T:
        self.commands[7] = not self.commands[7]
        if self.commands[7]:
            print("Toggled arm control ON")
        else:
            print("Toggled arm control OFF")
    elif key == glfw.KEY_ESCAPE:
        print("Pressed ESC")
        print("Quitting.")
        glfw.set_window_should_close(self.window, True)
        return
    print(
        f"vx: {self.commands[0]:<{8}.2f}"
        f"vy: {self.commands[2]:<{8}.2f}"
        f"yaw: {self.commands[1]:<{8}.2f}"
        f"height: {(0.75 + self.commands[3]):<{8}.2f}"
        f"torso yaw: {self.commands[4]:<{8}.2f}"
        f"torso pitch: {self.commands[5]:<{8}.2f}"
        f"torso roll: {self.commands[6]:<{8}.2f}"
    )

class HumanoidEnv:
    def __init__(self, policy_jit, robot_type="g1", device="cuda"):
        self.robot_type = robot_type
        self.device = device
        
        if robot_type == "g1":
            model_path = "g1.xml" # MuJoCo模型文件路径
            # 关节刚度系数（单位：Nm/rad）
            self.stiffness = np.array([
                150, 150, 150, 300, 80, 20, # 左腿关节（髋-膝-踝）
                150, 150, 150, 300, 80, 20, # 右腿关节（髋-膝-踝）
                400, 400, 400, # 腰部关节（横摆/横滚/俯仰）
                80, 80, 40, 60, # 左臂关节（肩部-肘部）
                80, 80, 40, 60, # 右臂关节（肩部-肘部）
            ])
            # 关节阻尼系数（单位：Nms/rad）
            self.damping = np.array([
                2, 2, 2, 4, 2, 1, # 腿部关节阻尼系数
                2, 2, 2, 4, 2, 1,
                15, 15, 15, # 腰部关节阻尼
                2, 2, 1, 1, # 手臂关节阻尼
                2, 2, 1, 1,
            ])
            # 控制参数
            self.num_actions = 15 # 策略网络输出动作维度（腿部控制）
            self.num_dofs = 23 # 总自由度（12腿+3腰+8臂）
            # 默认关节位置（站立姿态）
            self.default_dof_pos = np.array([
                -0.1, 0.0, 0.0, 0.3, -0.2, 0.0, # 左腿初始角度
                -0.1, 0.0, 0.0, 0.3, -0.2, 0.0, # 右腿初始角度
                0.0, 0.0, 0.0,# 腰部中立位
                0.5, 0.0, 0.2, 0.3, # 左臂初始位置
                0.5, 0.0, -0.2, 0.3, # 右臂初始位置
            ])
            # 关节扭矩限制（单位：Nm）
            self.torque_limits = np.array([
                88, 139, 88, 139, 50, 50, # 腿部扭矩限制
                88, 139, 88, 139, 50, 50, 
                88, 50, 50,         # 腰部扭矩限制
                25, 25, 25, 25,     # 手臂扭矩限制
                25, 25, 25, 25,
            ])
            # 机械臂关节运动范围（±0.4弧度）
            self.arm_dof_lower_range = -0.4 * np.ones(8)
            self.arm_dof_upper_range = 0.4* np.ones(8)
            # 关节名称映射（用于调试可视化） 各关节名称定义
            self.dof_names = ["left_hip_pitch", "left_hip_roll", "left_hip_yaw", "left_knee", "left_ankle_pitch", "left_ankle_roll",
                              "right_hip_pitch", "right_hip_roll", "right_hip_yaw", "right_knee", "right_ankle_pitch", "right_ankle_roll",
                              "waist_yaw", "waist_roll", "waist_pitch",
                              "left_shoulder_pitch", "left_shoulder_roll", "left_shoulder_yaw", "left_elbow",
                              "right_shoulder_pitch", "right_shoulder_roll", "right_shoulder_yaw", "right_elbow"]

        else:
            raise ValueError(f"Robot type {robot_type} not supported!")
        
        self.obs_indices = np.arange(self.num_dofs)
        # 仿真环境初始化 ---------------
        self.sim_duration = 100 * 20.0  
        self.sim_dt = 0.002         # 物理仿真步长（2000Hz）
        self.sim_decimation = 10    # 控制频率降采样（200Hz）
        self.control_dt = self.sim_dt * self.sim_decimation
        # MuJoCo模型初始化
        self.model = mujoco.MjModel.from_xml_path(model_path)
        self.model.opt.timestep = self.sim_dt   # 设置仿真时间步长
        self.data = mujoco.MjData(self.model) # 创建仿真数据实例
        mujoco.mj_resetDataKeyframe(self.model, self.data, 0)
        mujoco.mj_step(self.model, self.data)                   
        
        # 可视化查看器配置
        self.viewer = mujoco_viewer.MujocoViewer(self.model, self.data)
        self.viewer.commands = np.zeros(8, dtype=np.float32)
        self.viewer.cam.distance = 2.5 # 5.0
        self.viewer.cam.elevation = 0.0
        # 绑定键盘回调函数
        self.viewer._key_callback = types.MethodType(_key_callback, self.viewer)
        glfw.set_key_callback(self.viewer.window, self.viewer._key_callback)
        
        self.last_action = np.zeros(self.num_actions, dtype=np.float32)
        self.action_scale = 0.25 # 动作缩放系数
        self.arm_action = self.default_dof_pos[15:]
        self.prev_arm_action = self.default_dof_pos[15:]
        self.arm_blend = 0.0 # 机械臂动作插值系数
        self.toggle_arm = False # 机械臂控制开关


        self.scales_ang_vel = 0.25
        self.scales_dof_vel = 0.05
        
        self.nj = 23
        self.n_priv = 3
        self.n_proprio = 3 + 2 + 2 + 23 * 3 + 2 + 15
        self.history_len = 10
        self.extra_history_len = 25
        self._n_demo_dof = 8
        
        self.dof_pos = np.zeros(self.nj, dtype=np.float32)
        self.dof_vel = np.zeros(self.nj, dtype=np.float32)
        self.quat = np.zeros(4, dtype=np.float32)
        self.ang_vel = np.zeros(3, dtype=np.float32)
        self.last_action = np.zeros(self.nj)

        self.demo_obs_template = np.zeros((8 + 3 + 3 + 3, ))
        self.demo_obs_template[:self._n_demo_dof] = self.default_dof_pos[15:]
        self.demo_obs_template[self._n_demo_dof+6:self._n_demo_dof+9] = 0.75

        self.target_yaw = 0.0 

        self._in_place_stand_flag = True
        self.gait_cycle = np.array([0.25, 0.25])
        self.gait_freq = 1.3

        self.proprio_history_buf = deque(maxlen=self.history_len)
        self.extra_history_buf = deque(maxlen=self.extra_history_len)
        for i in range(self.history_len):
            self.proprio_history_buf.append(np.zeros(self.n_proprio))
        for i in range(self.extra_history_len):
            self.extra_history_buf.append(np.zeros(self.n_proprio))
    
        self.policy_jit = policy_jit

        self.adapter = torch.jit.load("adapter_jit.pt", map_location=self.device)
        self.adapter.eval()
        for param in self.adapter.parameters():
            param.requires_grad = False
        
        norm_stats = torch.load("adapter_norm_stats.pt",weights_only=False)
        self.input_mean = torch.tensor(norm_stats['input_mean'], device=self.device, dtype=torch.float32)
        self.input_std = torch.tensor(norm_stats['input_std'], device=self.device, dtype=torch.float32)
        self.output_mean = torch.tensor(norm_stats['output_mean'], device=self.device, dtype=torch.float32)
        self.output_std = torch.tensor(norm_stats['output_std'], device=self.device, dtype=torch.float32)

        self.adapter_input = torch.zeros((1, 8 + 4), device=self.device, dtype=torch.float32)
        self.adapter_output = torch.zeros((1, 15), device=self.device, dtype=torch.float32)

    def extract_data(self):
        self.dof_pos = self.data.qpos.astype(np.float32)[-self.num_dofs:]
        self.dof_vel = self.data.qvel.astype(np.float32)[-self.num_dofs:]
        self.quat = self.data.sensor('orientation').data.astype(np.float32)
        self.ang_vel = self.data.sensor('angular-velocity').data.astype(np.float32)
        
    def get_observation(self):
        # 姿态传感器数据处理
        rpy = quatToEuler(self.quat) # Convert quaternion to roll, pitch, yaw
        # 偏航角误差计算
        self.target_yaw = self.viewer.commands[1] # Get target yaw from viewer commands
        dyaw = rpy[2] - self.target_yaw
        dyaw = np.remainder(dyaw + np.pi, 2 * np.pi) - np.pi
        if self._in_place_stand_flag:
            dyaw = 0.0
        # 关节速度过滤（屏蔽不稳定的脚踝关节速度）
        obs_dof_vel = self.dof_vel.copy()
        obs_dof_vel[[4, 5, 10, 11, 13, 14]] = 0.0
        # 生成步态周期信号
        gait_obs = np.sin(self.gait_cycle * 2 * np.pi)

        self.adapter_input = np.concatenate([np.zeros(4), self.dof_pos[15:]])

        self.adapter_input[0] = 0.75 + self.viewer.commands[3] # 机器人高度调整
        self.adapter_input[1] = self.viewer.commands[4] # 躯干横滚角
        self.adapter_input[2] = self.viewer.commands[5] # 躯干俯仰角
        self.adapter_input[3] = self.viewer.commands[6] # 躯干偏航角
        # 适配器网络推理（用于预测躯干姿态补偿）
        self.adapter_input = torch.tensor(self.adapter_input).to(self.device, dtype=torch.float32).unsqueeze(0)
            
        self.adapter_input = (self.adapter_input - self.input_mean) / (self.input_std + 1e-8) # 数据标准化 （12）
        self.adapter_output = self.adapter(self.adapter_input.view(1, -1)) # 执行神经网络推理 （15）
        self.adapter_output = self.adapter_output * self.output_std + self.output_mean # 反标准化输出
        # 构建本体感知观测向量（93维）
        obs_prop = np.concatenate([
                    self.ang_vel * self.scales_ang_vel, # 缩放后的角速度,(3)
                    rpy[:2], # 横滚和俯仰角,(2)
                    (np.sin(dyaw), # 偏航误差的正余弦表示,(2)
                    np.cos(dyaw)), 
                    (self.dof_pos - self.default_dof_pos),# 23个关节的位置偏差 (23)
                    self.dof_vel * self.scales_dof_vel,  # 23个关节的缩放速度 (23)
                    self.last_action, # 上一时刻的23维动作 (23)
                    gait_obs, # 步态相位信号 (2)
                    self.adapter_output.cpu().numpy().squeeze(), # 神经网络预测输出 (15)
        ])
        # 其他观测组件 （3维）
        obs_priv = np.zeros((self.n_priv, ))
        obs_hist = np.array(self.proprio_history_buf).flatten()
        # 演示相关观测（17维）
        obs_demo = self.demo_obs_template.copy()
        obs_demo[:self._n_demo_dof] = self.dof_pos[15:] # 手臂关节位置
        obs_demo[self._n_demo_dof] = self.viewer.commands[0] # 前进速度指令
        obs_demo[self._n_demo_dof+1] = self.viewer.commands[2] # 横向速度指令
        self._in_place_stand_flag = np.abs(self.viewer.commands[0]) < 0.1 # 原地标志更新
        # 躯干姿态指令
        obs_demo[self._n_demo_dof+3] = self.viewer.commands[4] # 躯干横滚
        obs_demo[self._n_demo_dof+4] = self.viewer.commands[5] # 躯干俯仰
        obs_demo[self._n_demo_dof+5] = self.viewer.commands[6] # 躯干偏航
        obs_demo[self._n_demo_dof+6:self._n_demo_dof+9] = 0.75 + self.viewer.commands[3]# 目标高度
        # 更新历史缓冲区
        self.proprio_history_buf.append(obs_prop)
        self.extra_history_buf.append(obs_prop)
        # 最终观测向量拼接（总维度1043 = 93+17+3+930）
        return np.concatenate((obs_prop, obs_demo, obs_priv, obs_hist))

    def run(self):        
        for i in range(int(self.sim_duration / self.sim_dt)):
            self.extract_data()
            
            if i % self.sim_decimation == 0:
                obs = self.get_observation()
                
                obs_tensor = torch.from_numpy(obs).float().unsqueeze(0).to(self.device)
                
                with torch.no_grad():
                    extra_hist = torch.tensor(np.array(self.extra_history_buf).flatten().copy(), dtype=torch.float).view(1, -1).to(self.device)
                    raw_action = self.policy_jit(obs_tensor, extra_hist).cpu().numpy().squeeze()
                
                raw_action = np.clip(raw_action, -40., 40.)
                self.last_action = np.concatenate([raw_action.copy(), (self.dof_pos - self.default_dof_pos)[15:] / self.action_scale])
                scaled_actions = raw_action * self.action_scale
                
                if i % 300 == 0 and i > 0 and self.viewer.commands[7]:
                    self.arm_blend = 0
                    self.prev_arm_action = self.dof_pos[15:].copy()
                    self.arm_action = np.random.uniform(0, 1, 8) * (self.arm_dof_upper_range - self.arm_dof_lower_range) + self.arm_dof_lower_range
                    self.toggle_arm = True
                elif not self.viewer.commands[7]:
                    if self.toggle_arm:
                        self.toggle_arm = False
                        self.arm_blend = 0
                        self.prev_arm_action = self.dof_pos[15:].copy()
                        self.arm_action = self.default_dof_pos[15:]
                pd_target = np.concatenate([scaled_actions, np.zeros(8)]) + self.default_dof_pos
                pd_target[15:] = (1 - self.arm_blend) * self.prev_arm_action + self.arm_blend * self.arm_action
                self.arm_blend = min(1.0, self.arm_blend + 0.01)
                

                self.gait_cycle = np.remainder(self.gait_cycle + self.control_dt * self.gait_freq, 1.0)
                if self._in_place_stand_flag and ((np.abs(self.gait_cycle[0] - 0.25) < 0.05) or (np.abs(self.gait_cycle[1] - 0.25) < 0.05)):
                    self.gait_cycle = np.array([0.25, 0.25])
                if (not self._in_place_stand_flag) and ((np.abs(self.gait_cycle[0] - 0.25) < 0.05) and (np.abs(self.gait_cycle[1] - 0.25) < 0.05)):
                    self.gait_cycle = np.array([0.25, 0.75])
                
                # self.viewer.cam.azimuth += 0.1
                self.viewer.cam.lookat = self.data.qpos.astype(np.float32)[:3]
                self.viewer.render()
                
            torque = (pd_target - self.dof_pos) * self.stiffness - self.dof_vel * self.damping
            torque = np.clip(torque, -self.torque_limits, self.torque_limits)
            
            self.data.ctrl = torque
            
            mujoco.mj_step(self.model, self.data)
        
        self.viewer.close()

class Controller:
    def __init__(self, config: Config) -> None:
        # 初始化配置参数
        self.config = config
        self.remote_controller = RemoteController()# 远程遥控器接口

        # 加载预训练策略网络（JIT编译版本）
        self.policy = torch.jit.load(config.policy_path)
        # 初始化过程变量
        self.qj = np.zeros(config.num_actions, dtype=np.float32)# 关节位置观测
        self.dqj = np.zeros(config.num_actions, dtype=np.float32)# 关节速度观测
        self.action = np.zeros(config.num_actions, dtype=np.float32)# 策略输出动作
        self.target_dof_pos = config.default_angles.copy()# 目标关节位置
        self.obs = np.zeros(config.num_obs, dtype=np.float32)# 观测向量
        self.cmd = np.array([0.0, 0, 0])# 控制指令 [前进, 横向, 转向]
        self.counter = 0 # 控制循环计数器
        # 根据消息类型初始化通信接口
        if config.msg_type == "hg":
            # g1 and h1_2 use the hg msg type
            # G1/H1系列机器人消息协议
            self.low_cmd = unitree_hg_msg_dds__LowCmd_() # 底层控制指令
            self.low_state = unitree_hg_msg_dds__LowState_() # 底层状态反馈
            self.mode_pr_ = MotorMode.PR # 位置/力矩混合控制模式
            self.mode_machine_ = 0 # 机器状态机
            # 初始化DDS通信
            self.lowcmd_publisher_ = ChannelPublisher(config.lowcmd_topic, LowCmdHG)
            self.lowcmd_publisher_.Init() # 控制指令发布器

            self.lowstate_subscriber = ChannelSubscriber(config.lowstate_topic, LowStateHG)
            self.lowstate_subscriber.Init(self.LowStateHgHandler, 10) # 状态订阅（回调函数+缓冲区大小）

        elif config.msg_type == "go":
            # h1 uses the go msg type
            self.low_cmd = unitree_go_msg_dds__LowCmd_()
            self.low_state = unitree_go_msg_dds__LowState_()

            self.lowcmd_publisher_ = ChannelPublisher(config.lowcmd_topic, LowCmdGo)
            self.lowcmd_publisher_.Init()

            self.lowstate_subscriber = ChannelSubscriber(config.lowstate_topic, LowStateGo)
            self.lowstate_subscriber.Init(self.LowStateGoHandler, 10)

        else:
            raise ValueError("Invalid msg_type")

        # wait for the subscriber to receive data
        # 等待底层状态数据就绪
        self.wait_for_low_state()

        # Initialize the command msg
        # 初始化控制指令消息
        if config.msg_type == "hg":
            init_cmd_hg(self.low_cmd, self.mode_machine_, self.mode_pr_)
        elif config.msg_type == "go":
            init_cmd_go(self.low_cmd, weak_motor=self.config.weak_motor)
            
        # 新增模块：历史观测缓冲区（存储最近10帧本体感知数据）
        self.history_len = 10
        self.proprio_history_buf = deque(maxlen=self.history_len)
        for _ in range(self.history_len):
            self.proprio_history_buf.append(np.zeros(config.num_proprio))
        
        # 新增模块：适配器网络（用于补偿真实机器人传感器噪声）
        self.adapter = torch.jit.load("adapter_jit.pt")
        # 加载归一化参数（训练时统计值）
        self.input_mean = torch.load("adapter_norm_stats.pt")['input_mean']
        self.input_std = torch.load("adapter_norm_stats.pt")['input_std']
        self.output_mean = torch.load("adapter_norm_stats.pt")['output_mean'] 
        self.output_std = torch.load("adapter_norm_stats.pt")['output_std']

    
    def LowStateHgHandler(self, msg: LowStateHG):
        self.low_state = msg
        self.mode_machine_ = self.low_state.mode_machine
        self.remote_controller.set(self.low_state.wireless_remote)

    def LowStateGoHandler(self, msg: LowStateGo):
        self.low_state = msg
        self.remote_controller.set(self.low_state.wireless_remote)

    def send_cmd(self, cmd: Union[LowCmdGo, LowCmdHG]):
        cmd.crc = CRC().Crc(cmd)
        self.lowcmd_publisher_.Write(cmd)

    def wait_for_low_state(self):
        while self.low_state.tick == 0:
            time.sleep(self.config.control_dt)
        print("Successfully connected to the robot.")

    def zero_torque_state(self):
        print("Enter zero torque state.")
        print("Waiting for the start signal...")
        while self.remote_controller.button[KeyMap.start] != 1:
            create_zero_cmd(self.low_cmd)
            self.send_cmd(self.low_cmd)
            time.sleep(self.config.control_dt)

    def move_to_default_pos(self):
        print("Moving to default pos.")
        # move time 2s
        total_time = 2
        num_step = int(total_time / self.config.control_dt)
        
        dof_idx = self.config.leg_joint2motor_idx + self.config.arm_waist_joint2motor_idx
        kps = self.config.kps + self.config.arm_waist_kps
        kds = self.config.kds + self.config.arm_waist_kds
        default_pos = np.concatenate((self.config.default_angles, self.config.arm_waist_target), axis=0)
        dof_size = len(dof_idx)
        
        # record the current pos
        init_dof_pos = np.zeros(dof_size, dtype=np.float32)
        for i in range(dof_size):
            init_dof_pos[i] = self.low_state.motor_state[dof_idx[i]].q
        
        # move to default pos
        for i in range(num_step):
            alpha = i / num_step
            for j in range(dof_size):
                motor_idx = dof_idx[j]
                target_pos = default_pos[j]
                self.low_cmd.motor_cmd[motor_idx].q = init_dof_pos[j] * (1 - alpha) + target_pos * alpha
                self.low_cmd.motor_cmd[motor_idx].qd = 0
                self.low_cmd.motor_cmd[motor_idx].kp = kps[j]
                self.low_cmd.motor_cmd[motor_idx].kd = kds[j]
                self.low_cmd.motor_cmd[motor_idx].tau = 0
            self.send_cmd(self.low_cmd)
            time.sleep(self.config.control_dt)

    def default_pos_state(self):
        print("Enter default pos state.")
        print("Waiting for the Button A signal...")
        while self.remote_controller.button[KeyMap.A] != 1:
            for i in range(len(self.config.leg_joint2motor_idx)):
                motor_idx = self.config.leg_joint2motor_idx[i]
                self.low_cmd.motor_cmd[motor_idx].q = self.config.default_angles[i]
                self.low_cmd.motor_cmd[motor_idx].qd = 0
                self.low_cmd.motor_cmd[motor_idx].kp = self.config.kps[i]
                self.low_cmd.motor_cmd[motor_idx].kd = self.config.kds[i]
                self.low_cmd.motor_cmd[motor_idx].tau = 0
            for i in range(len(self.config.arm_waist_joint2motor_idx)):
                motor_idx = self.config.arm_waist_joint2motor_idx[i]
                self.low_cmd.motor_cmd[motor_idx].q = self.config.arm_waist_target[i]
                self.low_cmd.motor_cmd[motor_idx].qd = 0
                self.low_cmd.motor_cmd[motor_idx].kp = self.config.arm_waist_kps[i]
                self.low_cmd.motor_cmd[motor_idx].kd = self.config.arm_waist_kds[i]
                self.low_cmd.motor_cmd[motor_idx].tau = 0
            self.send_cmd(self.low_cmd)
            time.sleep(self.config.control_dt)


    def run(self):
        self.counter += 1
        # Get the current joint position and velocity
        # 获取关节传感器数据 -------------------------------------------------
        # 读取腿部电机实际位置和速度（索引对应仿真环境的关节顺序）
        for i in range(len(self.config.leg_joint2motor_idx)):
            self.qj[i] = self.low_state.motor_state[self.config.leg_joint2motor_idx[i]].q
            self.dqj[i] = self.low_state.motor_state[self.config.leg_joint2motor_idx[i]].dq

        # imu_state quaternion: w, x, y, z
        # IMU数据处理 ------------------------------------------------------
        quat = self.low_state.imu_state.quaternion # 四元数 (w,x,y,z)
        ang_vel = np.array([self.low_state.imu_state.gyroscope], dtype=np.float32)
        # 针对躯干安装的IMU进行坐标系转换（将IMU数据转换到骨盆坐标系）
        if self.config.imu_type == "torso":
            # h1 and h1_2 imu is on the torso
            # imu data needs to be transformed to the pelvis frame
            waist_yaw = self.low_state.motor_state[self.config.arm_waist_joint2motor_idx[0]].q
            waist_yaw_omega = self.low_state.motor_state[self.config.arm_waist_joint2motor_idx[0]].dq
            quat, ang_vel = transform_imu_data(waist_yaw=waist_yaw, waist_yaw_omega=waist_yaw_omega, imu_quat=quat, imu_omega=ang_vel)

        # create observation
        # 构建观测向量 -----------------------------------------------------
        # 重力方向估计（用于姿态反馈）
        gravity_orientation = get_gravity_orientation(quat)
        # 关节数据预处理（位置和速度缩放）
        qj_obs = self.qj.copy()
        dqj_obs = self.dqj.copy()
        qj_obs = (qj_obs - self.config.default_angles) * self.config.dof_pos_scale
        dqj_obs = dqj_obs * self.config.dof_vel_scale
        # 角速度处理（根据配置文件缩放）
        ang_vel = ang_vel * self.config.ang_vel_scale
        # 生成步态相位信号（用于周期性运动控制）
        period = 0.8
        count = self.counter * self.config.control_dt
        phase = count % period / period
        sin_phase = np.sin(2 * np.pi * phase)
        cos_phase = np.cos(2 * np.pi * phase)
        # 控制指令处理 -----------------------------------------------------
        # 从遥控器获取指令（ly: 前后倾斜，lx: 左右平移，rx: 转向）
        self.cmd[0] = self.remote_controller.ly         # 前进速度指令
        self.cmd[1] = self.remote_controller.lx * -1    # 横向速度指令
        self.cmd[2] = self.remote_controller.rx * -1    # 转向指令
        
        # 观测向量填充 -----------------------------------------------------
        num_actions = self.config.num_actions
        self.obs[:3] = ang_vel  # 角速度
        self.obs[3:6] = gravity_orientation # 重力方向
        self.obs[6:9] = self.cmd * self.config.cmd_scale * self.config.max_cmd # 控制指令
        self.obs[9 : 9 + num_actions] = qj_obs # 关节位置偏差
        self.obs[9 + num_actions : 9 + num_actions * 2] = dqj_obs # 关节速度
        self.obs[9 + num_actions * 2 : 9 + num_actions * 3] = self.action # 上一时刻动作
        self.obs[9 + num_actions * 3] = sin_phase # 步态正弦相位
        self.obs[9 + num_actions * 3 + 1] = cos_phase # 步态余弦相位

        # Get the action from the policy network
        # 策略网络推理 -----------------------------------------------------
        obs_tensor = torch.from_numpy(self.obs).unsqueeze(0)
        self.action = self.policy(obs_tensor).detach().numpy().squeeze()
        
        # transform action to target_dof_pos
        # 生成目标关节位置（默认位置 + 策略输出）
        target_dof_pos = self.config.default_angles + self.action * self.config.action_scale

        # Build low cmd
        # 构造底层控制指令 -------------------------------------------------
        # 腿部关节控制（PD控制）
        for i in range(len(self.config.leg_joint2motor_idx)):
            motor_idx = self.config.leg_joint2motor_idx[i]
            self.low_cmd.motor_cmd[motor_idx].q = target_dof_pos[i] # 目标位置
            self.low_cmd.motor_cmd[motor_idx].qd = 0    # 目标速度
            self.low_cmd.motor_cmd[motor_idx].kp = self.config.kps[i] # 刚度系数
            self.low_cmd.motor_cmd[motor_idx].kd = self.config.kds[i] # 阻尼系数
            self.low_cmd.motor_cmd[motor_idx].tau = 0 # 控制力矩
        # 手臂/腰部关节保持默认位置（未启用主动控制）
        for i in range(len(self.config.arm_waist_joint2motor_idx)):
            motor_idx = self.config.arm_waist_joint2motor_idx[i]
            self.low_cmd.motor_cmd[motor_idx].q = self.config.arm_waist_target[i]
            self.low_cmd.motor_cmd[motor_idx].qd = 0
            self.low_cmd.motor_cmd[motor_idx].kp = self.config.arm_waist_kps[i]
            self.low_cmd.motor_cmd[motor_idx].kd = self.config.arm_waist_kds[i]
            self.low_cmd.motor_cmd[motor_idx].tau = 0

        # send the command
         # 发送控制指令 ----------------
        self.send_cmd(self.low_cmd)

        time.sleep(self.config.control_dt) # 保持控制频率


if __name__ == "__main__":
    import argparse

    parser = argparse.ArgumentParser()
    parser.add_argument("net", type=str, help="network interface")
    parser.add_argument("config", type=str, help="config file name in the configs folder", default="g1.yaml")
    args = parser.parse_args()

    # Load config
    config_path = f"{LEGGED_GYM_ROOT_DIR}/deploy_real/configs/{args.config}"
    config = Config(config_path)

    # Initialize DDS communication
    ChannelFactoryInitialize(0, args.net)

    controller = Controller(config)

    # Enter the zero torque state, press the start key to continue executing
    controller.zero_torque_state()

    # Move to the default position
    controller.move_to_default_pos()

    # Enter the default position state, press the A key to continue executing
    controller.default_pos_state()

    while True:
        try:
            controller.run()
            # Press the select key to exit
            if controller.remote_controller.button[KeyMap.select] == 1:
                break
        except KeyboardInterrupt:
            break
    # Enter the damping state
    create_damping_cmd(controller.low_cmd)
    controller.send_cmd(controller.low_cmd)
    print("Exit")
