from legged_gym.envs import PointFootRoughCfg, PointFootRoughCfgPPO


class PointFootFlatCfg(PointFootRoughCfg):
    class env(PointFootRoughCfg.env):
        # 定义环境相关的参数
        num_privileged_obs = 27

    class terrain(PointFootRoughCfg.terrain):
        # 定义地形相关的参数
        mesh_type = 'plane'
        measure_heights_critic = False

    class asset(PointFootRoughCfg.asset):
        # 定义机器人模型的参数
        self_collisions = 0  # 1 to disable, 0 to enable...bitwise filter    1 表示禁用，0 表示启用...位字段过滤器

    class rewards(PointFootRoughCfg.rewards):
        # 定义奖励相关的参数
        max_contact_force = 350.

        class scales(PointFootRoughCfg.rewards.scales):
             # 定义每个奖励方程项的系数（负数为惩罚）
            orientation = -5.0
            torques = -0.000025
            feet_air_time = 5.
            unbalance_feet_air_time = 1.0
            no_fly = 1.
            # feet_contact_forces = -0.01

    class commands(PointFootRoughCfg.commands):
        # 定义命令相关的参数
        num_commands = 3
        heading_command = False
        resampling_time = 4.

        class ranges(PointFootRoughCfg.commands.ranges):
            # 定义不同命令参数的范围
            ang_vel_yaw = [-1.5, 1.5]

    class domain_rand(PointFootRoughCfg.domain_rand):
        # 定义领域随机化相关的参数
        friction_range = [0.,
                          1.5]  # on ground planes the friction combination mode is averaging, i.e total friction = (foot_friction + 1.)/2.   在地面平面上，摩擦力组合模式是平均值计算，即总摩擦力 = (脚部摩擦力 + 1.) / 2. 


class PointFootFlatCfgPPO(PointFootRoughCfgPPO):
     # 定义PPO（Proximal Policy Optimization）算法的配置
     
    class policy(PointFootRoughCfgPPO.policy):
        # 定义策略网络的配置
        actor_hidden_dims = [128, 64, 32]
        critic_hidden_dims = [128, 64, 32]

    class runner(PointFootRoughCfgPPO.runner):
        # 定义训练过程中的运行器的配置
        experiment_name = 'pointfoot_flat'
        max_iterations = 30000
