| |
| |
| |
| |
|
|
| """Script to train RL agent with RL-Games.""" |
|
|
| """Launch Isaac Sim Simulator first.""" |
|
|
| import argparse |
| import sys |
| from distutils.util import strtobool |
|
|
| from isaaclab.app import AppLauncher |
|
|
| |
| parser = argparse.ArgumentParser(description="Train an RL agent with RL-Games.") |
| parser.add_argument("--video", action="store_true", default=False, help="Record videos during training.") |
| parser.add_argument("--video_length", type=int, default=200, help="Length of the recorded video (in steps).") |
| parser.add_argument("--video_interval", type=int, default=2000, help="Interval between video recordings (in steps).") |
| parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.") |
| parser.add_argument("--task", type=str, default=None, help="Name of the task.") |
| parser.add_argument( |
| "--agent", type=str, default="rl_games_cfg_entry_point", help="Name of the RL agent configuration entry point." |
| ) |
| parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment") |
| parser.add_argument( |
| "--distributed", action="store_true", default=False, help="Run training with multiple GPUs or nodes." |
| ) |
| parser.add_argument("--checkpoint", type=str, default=None, help="Path to model checkpoint.") |
| parser.add_argument("--sigma", type=str, default=None, help="The policy's initial standard deviation.") |
| parser.add_argument("--max_iterations", type=int, default=None, help="RL Policy training iterations.") |
| parser.add_argument("--wandb-project-name", type=str, default=None, help="the wandb's project name") |
| parser.add_argument("--wandb-entity", type=str, default=None, help="the entity (team) of wandb's project") |
| parser.add_argument("--wandb-name", type=str, default=None, help="the name of wandb's run") |
| parser.add_argument( |
| "--track", |
| type=lambda x: bool(strtobool(x)), |
| default=False, |
| nargs="?", |
| const=True, |
| help="if toggled, this experiment will be tracked with Weights and Biases", |
| ) |
| parser.add_argument("--export_io_descriptors", action="store_true", default=False, help="Export IO descriptors.") |
| parser.add_argument( |
| "--ray-proc-id", "-rid", type=int, default=None, help="Automatically configured by Ray integration, otherwise None." |
| ) |
| |
| AppLauncher.add_app_launcher_args(parser) |
| |
| args_cli, hydra_args = parser.parse_known_args() |
| |
| if args_cli.video: |
| args_cli.enable_cameras = True |
|
|
| |
| sys.argv = [sys.argv[0]] + hydra_args |
|
|
| |
| app_launcher = AppLauncher(args_cli) |
| simulation_app = app_launcher.app |
|
|
| """Rest everything follows.""" |
|
|
| import logging |
| import math |
| import os |
| import random |
| import time |
| from datetime import datetime |
|
|
| import gymnasium as gym |
| from rl_games.common import env_configurations, vecenv |
| from rl_games.common.algo_observer import IsaacAlgoObserver |
| from rl_games.torch_runner import Runner |
|
|
| from isaaclab.envs import ( |
| DirectMARLEnv, |
| DirectMARLEnvCfg, |
| DirectRLEnvCfg, |
| ManagerBasedRLEnvCfg, |
| multi_agent_to_single_agent, |
| ) |
| from isaaclab.utils.assets import retrieve_file_path |
| from isaaclab.utils.dict import print_dict |
| from isaaclab.utils.io import dump_yaml |
|
|
| from isaaclab_rl.rl_games import MultiObserver, PbtAlgoObserver, RlGamesGpuEnv, RlGamesVecEnvWrapper |
|
|
| import isaaclab_tasks |
| from isaaclab_tasks.utils.hydra import hydra_task_config |
|
|
| |
| logger = logging.getLogger(__name__) |
|
|
| |
|
|
|
|
| @hydra_task_config(args_cli.task, args_cli.agent) |
| def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agent_cfg: dict): |
| """Train with RL-Games agent.""" |
| |
| env_cfg.scene.num_envs = args_cli.num_envs if args_cli.num_envs is not None else env_cfg.scene.num_envs |
| env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device |
| |
| if args_cli.distributed and args_cli.device is not None and "cpu" in args_cli.device: |
| raise ValueError( |
| "Distributed training is not supported when using CPU device. " |
| "Please use GPU device (e.g., --device cuda) for distributed training." |
| ) |
|
|
| |
| if args_cli.seed == -1: |
| args_cli.seed = random.randint(0, 10000) |
|
|
| agent_cfg["params"]["seed"] = args_cli.seed if args_cli.seed is not None else agent_cfg["params"]["seed"] |
| agent_cfg["params"]["config"]["max_epochs"] = ( |
| args_cli.max_iterations if args_cli.max_iterations is not None else agent_cfg["params"]["config"]["max_epochs"] |
| ) |
| if args_cli.checkpoint is not None: |
| resume_path = retrieve_file_path(args_cli.checkpoint) |
| agent_cfg["params"]["load_checkpoint"] = True |
| agent_cfg["params"]["load_path"] = resume_path |
| print(f"[INFO]: Loading model checkpoint from: {agent_cfg['params']['load_path']}") |
| train_sigma = float(args_cli.sigma) if args_cli.sigma is not None else None |
|
|
| |
| if args_cli.distributed: |
| agent_cfg["params"]["seed"] += app_launcher.global_rank |
| agent_cfg["params"]["config"]["device"] = f"cuda:{app_launcher.local_rank}" |
| agent_cfg["params"]["config"]["device_name"] = f"cuda:{app_launcher.local_rank}" |
| agent_cfg["params"]["config"]["multi_gpu"] = True |
| |
| env_cfg.sim.device = f"cuda:{app_launcher.local_rank}" |
|
|
| |
| |
| env_cfg.seed = agent_cfg["params"]["seed"] |
|
|
| |
| config_name = agent_cfg["params"]["config"]["name"] |
| log_root_path = os.path.join("logs", "rl_games", config_name) |
| if "pbt" in agent_cfg and agent_cfg["pbt"]["directory"] != ".": |
| log_root_path = os.path.join(agent_cfg["pbt"]["directory"], log_root_path) |
| else: |
| log_root_path = os.path.abspath(log_root_path) |
|
|
| print(f"[INFO] Logging experiment in directory: {log_root_path}") |
| |
| log_dir = agent_cfg["params"]["config"].get("full_experiment_name", datetime.now().strftime("%Y-%m-%d_%H-%M-%S")) |
| |
| |
| agent_cfg["params"]["config"]["train_dir"] = log_root_path |
| agent_cfg["params"]["config"]["full_experiment_name"] = log_dir |
| wandb_project = config_name if args_cli.wandb_project_name is None else args_cli.wandb_project_name |
| experiment_name = log_dir if args_cli.wandb_name is None else args_cli.wandb_name |
|
|
| |
| dump_yaml(os.path.join(log_root_path, log_dir, "params", "env.yaml"), env_cfg) |
| dump_yaml(os.path.join(log_root_path, log_dir, "params", "agent.yaml"), agent_cfg) |
| print(f"Exact experiment name requested from command line: {os.path.join(log_root_path, log_dir)}") |
|
|
| |
| rl_device = agent_cfg["params"]["config"]["device"] |
| clip_obs = agent_cfg["params"]["env"].get("clip_observations", math.inf) |
| clip_actions = agent_cfg["params"]["env"].get("clip_actions", math.inf) |
| obs_groups = agent_cfg["params"]["env"].get("obs_groups") |
| concate_obs_groups = agent_cfg["params"]["env"].get("concate_obs_groups", True) |
|
|
| |
| if isinstance(env_cfg, ManagerBasedRLEnvCfg): |
| env_cfg.export_io_descriptors = args_cli.export_io_descriptors |
| else: |
| logger.warning( |
| "IO descriptors are only supported for manager based RL environments. No IO descriptors will be exported." |
| ) |
|
|
| |
| env_cfg.log_dir = os.path.join(log_root_path, log_dir) |
|
|
| |
| env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array" if args_cli.video else None) |
|
|
| |
| if isinstance(env.unwrapped, DirectMARLEnv): |
| env = multi_agent_to_single_agent(env) |
|
|
| |
| if args_cli.video: |
| video_kwargs = { |
| "video_folder": os.path.join(log_root_path, log_dir, "videos", "train"), |
| "step_trigger": lambda step: step % args_cli.video_interval == 0, |
| "video_length": args_cli.video_length, |
| "disable_logger": True, |
| } |
| print("[INFO] Recording videos during training.") |
| print_dict(video_kwargs, nesting=4) |
| env = gym.wrappers.RecordVideo(env, **video_kwargs) |
|
|
| start_time = time.time() |
|
|
| |
| env = RlGamesVecEnvWrapper(env, rl_device, clip_obs, clip_actions, obs_groups, concate_obs_groups) |
|
|
| |
| |
| vecenv.register( |
| "IsaacRlgWrapper", lambda config_name, num_actors, **kwargs: RlGamesGpuEnv(config_name, num_actors, **kwargs) |
| ) |
| env_configurations.register("rlgpu", {"vecenv_type": "IsaacRlgWrapper", "env_creator": lambda **kwargs: env}) |
|
|
| |
| agent_cfg["params"]["config"]["num_actors"] = env.unwrapped.num_envs |
| |
|
|
| if "pbt" in agent_cfg and agent_cfg["pbt"]["enabled"]: |
| observers = MultiObserver([IsaacAlgoObserver(), PbtAlgoObserver(agent_cfg, args_cli)]) |
| runner = Runner(observers) |
| else: |
| runner = Runner(IsaacAlgoObserver()) |
|
|
| runner.load(agent_cfg) |
|
|
| |
| runner.reset() |
| |
|
|
| global_rank = int(os.getenv("RANK", "0")) |
| if args_cli.track and global_rank == 0: |
| if args_cli.wandb_entity is None: |
| raise ValueError("Weights and Biases entity must be specified for tracking.") |
| import wandb |
|
|
| wandb.init( |
| project=wandb_project, |
| entity=args_cli.wandb_entity, |
| name=experiment_name, |
| sync_tensorboard=True, |
| monitor_gym=True, |
| save_code=True, |
| ) |
| if not wandb.run.resumed: |
| wandb.config.update({"env_cfg": env_cfg.to_dict()}) |
| wandb.config.update({"agent_cfg": agent_cfg}) |
|
|
| if args_cli.checkpoint is not None: |
| runner.run({"train": True, "play": False, "sigma": train_sigma, "checkpoint": resume_path}) |
| else: |
| runner.run({"train": True, "play": False, "sigma": train_sigma}) |
|
|
| print(f"Training time: {round(time.time() - start_time, 2)} seconds") |
|
|
| |
| env.close() |
|
|
|
|
| if __name__ == "__main__": |
| |
| main() |
| |
| simulation_app.close() |
|
|