# docs and experiment results can be found at https://docs.cleanrl.dev/rl-algorithms/dqn/#dqn_ataripy import argparse import os import random import time from distutils.util import strtobool import gym import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from stable_baselines3.common.atari_wrappers import ( ClipRewardEnv, EpisodicLifeEnv, FireResetEnv, MaxAndSkipEnv, NoopResetEnv, ) from stable_baselines3.common.buffers import ReplayBuffer from torch.utils.tensorboard import SummaryWriter def parse_args(): # fmt: off parser = argparse.ArgumentParser() parser.add_argument("--exp-name", type=str, default=os.path.basename(__file__).rstrip(".py"), help="the name of this experiment") parser.add_argument("--seed", type=int, default=1, help="seed of the experiment") parser.add_argument("--torch-deterministic", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True, help="if toggled, `torch.backends.cudnn.deterministic=False`") parser.add_argument("--cuda", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True, help="if toggled, cuda will be enabled by default") parser.add_argument("--track", type=lambda x: bool(strtobool(x)), default=False, nargs="?", const=True, help="if toggled, this experiment will be tracked with Weights and Biases") parser.add_argument("--wandb-project-name", type=str, default="cleanRL", help="the wandb's project name") parser.add_argument("--wandb-entity", type=str, default=None, help="the entity (team) of wandb's project") parser.add_argument("--capture-video", type=lambda x: bool(strtobool(x)), default=False, nargs="?", const=True, help="whether to capture videos of the agent performances (check out `videos` folder)") parser.add_argument("--save-model", type=lambda x: bool(strtobool(x)), default=False, nargs="?", const=True, help="whether to save model into the `runs/{run_name}` folder") parser.add_argument("--upload-model", type=lambda x: bool(strtobool(x)), default=False, nargs="?", const=True, help="whether to upload the saved model to huggingface") parser.add_argument("--hf-entity", type=str, default="", help="the user or org name of the model repository from the Hugging Face Hub") # Algorithm specific arguments parser.add_argument("--env-id", type=str, default="PongNoFrameskip-v4", help="the id of the environment") parser.add_argument("--total-timesteps", type=int, default=5000000, help="total timesteps of the experiments") parser.add_argument("--learning-rate", type=float, default=0.0001, help="the learning rate of the optimizer") parser.add_argument("--buffer-size", type=int, default=1000000, help="the replay memory buffer size") parser.add_argument("--gamma", type=float, default=0.99, help="the discount factor gamma") parser.add_argument("--target-tau", type=float, default=1.0, help="the target network update rate") parser.add_argument("--target-network-frequency", type=int, default=1000, help="the timesteps it takes to update the target network") parser.add_argument("--batch-size", type=int, default=32, help="the batch size of sample from the reply memory") parser.add_argument("--start-e", type=float, default=1.0, help="the starting epsilon for exploration") parser.add_argument("--end-e", type=float, default=0.01, help="the ending epsilon for exploration") parser.add_argument("--exploration-fraction", type=float, default=0.2, help="the fraction of `total-timesteps` it takes from start-e to go end-e") parser.add_argument("--learning-starts", type=int, default=10000, help="timestep to start learning") parser.add_argument("--train-frequency", type=int, default=1, help="the frequency of training") args = parser.parse_args() # fmt: on return args def make_env(env_id, seed, idx, capture_video, run_name): def thunk(): env = gym.make(env_id) env = gym.wrappers.RecordEpisodeStatistics(env) if capture_video: if idx == 0: env = gym.wrappers.RecordVideo(env, f"videos/{run_name}") env = NoopResetEnv(env, noop_max=30) env = MaxAndSkipEnv(env, skip=4) env = EpisodicLifeEnv(env) if "FIRE" in env.unwrapped.get_action_meanings(): env = FireResetEnv(env) env = ClipRewardEnv(env) env = gym.wrappers.ResizeObservation(env, (84, 84)) env = gym.wrappers.GrayScaleObservation(env) env = gym.wrappers.FrameStack(env, 4) env.seed(seed) env.action_space.seed(seed) env.observation_space.seed(seed) return env return thunk # ALGO LOGIC: initialize agent here: class QNetwork(nn.Module): def __init__(self, env): super().__init__() self.network = nn.Sequential( nn.Conv2d(4, 32, 8, stride=4), nn.ReLU(), nn.Conv2d(32, 64, 4, stride=2), nn.ReLU(), nn.Conv2d(64, 64, 3, stride=1), nn.ReLU(), nn.Flatten(), nn.Linear(3136, 512), nn.ReLU(), nn.Linear(512, env.single_action_space.n), ) def forward(self, x): return self.network(x / 255.0) def linear_schedule(start_e: float, end_e: float, duration: int, t: int): slope = (end_e - start_e) / duration return max(slope * t + start_e, end_e) if __name__ == "__main__": args = parse_args() run_name = f"{args.env_id}__{args.exp_name}__{args.seed}__{int(time.time())}" if args.track: import wandb args.alg_type = os.path.basename(__file__) wandb_sess = wandb.init( project=args.wandb_project_name, entity=args.wandb_entity, config=vars(args), save_code=True, # group='string', name=run_name, sync_tensorboard=False, monitor_gym=True, ) writer = SummaryWriter(f"runs/{run_name}") writer.add_text( "hyperparameters", "|param|value|\n|-|-|\n%s" % ("\n".join([f"|{key}|{value}|" for key, value in vars(args).items()])), ) def log_value(name: str, x: float, y: int): # writer.add_scalar(name, x, y) wandb.log({name: x, "global_step": y}) # TRY NOT TO MODIFY: seeding random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.backends.cudnn.deterministic = args.torch_deterministic device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu") # env setup envs = gym.vector.SyncVectorEnv([make_env(args.env_id, args.seed, 0, args.capture_video, run_name)]) assert isinstance(envs.single_action_space, gym.spaces.Discrete), "only discrete action space is supported" envs.seed(args.seed) q_network = QNetwork(envs).to(device) optimizer = optim.RMSprop(q_network.parameters(), lr=args.learning_rate) target_network = QNetwork(envs).to(device) target_network.load_state_dict(q_network.state_dict()) rb = ReplayBuffer( args.buffer_size, envs.single_observation_space, envs.single_action_space, device, optimize_memory_usage=True, handle_timeout_termination=True, ) start_time = time.time() policy_update_counter = 0 episode_returns = [] # TRY NOT TO MODIFY: start the game obs = envs.reset() for global_step in range(args.total_timesteps): # ALGO LOGIC: put action logic here epsilon = linear_schedule(args.start_e, args.end_e, args.exploration_fraction * args.total_timesteps, global_step) if random.random() < epsilon: actions = np.array([envs.single_action_space.sample() for _ in range(envs.num_envs)]) else: q_values = q_network(torch.Tensor(obs).to(device)) actions = torch.argmax(q_values, dim=1).cpu().numpy() # TRY NOT TO MODIFY: execute the game and log data. next_obs, rewards, dones, infos = envs.step(actions) # TRY NOT TO MODIFY: record rewards for plotting purposes for info in infos: if "episode" in info.keys(): episode_returns.append(info['episode']['r']) episode_returns = episode_returns[-100:] print(f"step={global_step}, return={info['episode']['r']}, sps={int(global_step / (time.time() - start_time))}") log_value("perf/episodic_return", info["episode"]["r"], global_step) log_value("perf/episodic_return_mean_100", np.mean(episode_returns), global_step) log_value("perf/episodic_return_std_100", np.std(episode_returns), global_step) log_value("debug/episodic_length", info["episode"]["l"], global_step) log_value("ex2/epsilon", epsilon, global_step) break # TRY NOT TO MODIFY: save data to reply buffer; handle `terminal_observation` real_next_obs = next_obs.copy() for idx, d in enumerate(dones): if d: real_next_obs[idx] = infos[idx]["terminal_observation"] rb.add(obs, real_next_obs, actions, rewards, dones, infos) # TRY NOT TO MODIFY: CRUCIAL step easy to overlook obs = next_obs # ALGO LOGIC: training. if global_step > args.learning_starts: if global_step % args.train_frequency == 0: data = rb.sample(args.batch_size) with torch.no_grad(): target_max, _ = target_network(data.next_observations).max(dim=1) td_target = data.rewards.flatten() + args.gamma * target_max * (1 - data.dones.flatten()) old_val = q_network(data.observations).gather(1, data.actions).squeeze() loss = F.mse_loss(td_target, old_val) if global_step % 100 == 0: prev = old_val.detach().cpu().numpy() new = td_target.detach().cpu().numpy() diff, a_diff = new-prev, np.abs(new-prev) mean, a_mean = np.mean(diff), np.mean(a_diff) median, a_median = np.median(diff), np.median(a_diff) maximum, a_maximum = np.max(diff), np.max(a_diff) minimum, a_minimum = np.min(diff), np.min(a_diff) std, a_std = np.std(diff), np.std(a_diff) below, a_below = mean - std, a_mean - a_std above, a_above = mean + std, a_mean + a_std pu_scalar, a_pu_scalar = 2 * mean / maximum, 2 * a_mean / a_maximum policy_frequency_scalar_ratio = 1.0 * pu_scalar a_policy_frequency_scalar_ratio = 1.0 * a_pu_scalar log_value("losses/td_loss", loss, global_step) log_value("losses/q_values", old_val.mean().item(), global_step) log_value("td/mean", mean, global_step) log_value("td/a_mean", a_mean, global_step) log_value("td/median", median, global_step) log_value("td/a_median", a_median, global_step) log_value("td/max", maximum, global_step) log_value("td/a_max", a_maximum, global_step) log_value("td/min", minimum, global_step) log_value("td/a_min", a_minimum, global_step) log_value("td/std", std, global_step) log_value("td/a_std", a_std, global_step) log_value("td/below", below, global_step) log_value("td/a_below", a_below, global_step) log_value("td/above", above, global_step) log_value("td/a_above", a_above, global_step) log_value("pu/pu_scalar", pu_scalar, global_step) log_value("pu/a_pu_scalar", a_pu_scalar, global_step) log_value("pu/policy_frequency_scalar_ratio", policy_frequency_scalar_ratio, global_step) log_value("pu/a_policy_frequency_scalar_ratio", a_policy_frequency_scalar_ratio, global_step) log_value("debug/steps_per_second", int(global_step / (time.time() - start_time)), global_step) # optimize the model optimizer.zero_grad() loss.backward() optimizer.step() # update target network if global_step % args.target_network_frequency == 0: for target_network_param, q_network_param in zip(target_network.parameters(), q_network.parameters()): target_network_param.data.copy_( args.target_tau * q_network_param.data + (1.0 - args.target_tau) * target_network_param.data ) policy_update_counter += 1 if global_step % 100 == 0: log_value("pu/n_policy_update", policy_update_counter, global_step) if args.save_model: model_path = f"runs/{run_name}/{args.exp_name}.cleanrl_model" torch.save(q_network.state_dict(), model_path) print(f"model saved to {model_path}") from cleanrl_utils.evals.dqn_eval import evaluate episodic_returns = evaluate( model_path, make_env, args.env_id, eval_episodes=10, run_name=f"{run_name}-eval", Model=QNetwork, device=device, epsilon=0.05, ) for idx, episodic_return in enumerate(episodic_returns): log_value("eval/episodic_return", episodic_return, idx) if args.upload_model: from cleanrl_utils.huggingface import push_to_hub repo_name = f"{args.env_id}-{args.exp_name}-seed{args.seed}" repo_id = f"{args.hf_entity}/{repo_name}" if args.hf_entity else repo_name push_to_hub(args, np.mean(episode_returns), repo_id, "DQN", f"runs/{run_name}", f"videos/{run_name}-eval") wandb_sess.finish() envs.close() writer.close()