VinayHajare commited on
Commit
c97d971
1 Parent(s): 1a6b133

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +7 -9
README.md CHANGED
@@ -29,7 +29,7 @@ using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines
29
 
30
 
31
  ```python
32
- # Usage code
33
  import gymnasium as gym
34
  from huggingface_sb3 import load_from_hub
35
  from stable_baselines3 import PPO
@@ -39,20 +39,18 @@ from stable_baselines3.common.monitor import Monitor
39
 
40
  repo_id = "VinayHajare/ppo-LunarLander-v2"
41
  filename = "ppo-LunarLander-v2.zip"
42
- eval_env = DummyVecEnv([lambda: Monitor(gym.make("LunarLander-v2", render_mode="rgb_array"))])
43
 
44
  checkpoint = load_from_hub(repo_id, filename)
45
- model = PPO.load(checkpoint,env=eval_env,print_system_info=True)
46
 
47
- #eval_env = DummyVecEnv([lambda: Monitor(gym.make("LunarLander-v2", render_mode="rgb_array"))])
48
  mean_reward, std_reward = evaluate_policy(model,eval_env, n_eval_episodes=10, deterministic=True)
49
  print(f"mean_reward={mean_reward:.2f} +/- {std_reward}")
50
 
51
  # Enjoy trained agent
52
- vec_env = model.get_env()
53
- obs, info = vec_env.reset()
54
  for _ in range(1000):
55
- action, _states = model.predict(obs, deterministic=True)
56
- obs, rewards, terminated,truncated, info = vec_env.step(action)
57
- vec_env.render("human")
58
  ```
 
29
 
30
 
31
  ```python
32
+ # !pip gymnasium huggingface-sb3 stable_baselines3[extra]
33
  import gymnasium as gym
34
  from huggingface_sb3 import load_from_hub
35
  from stable_baselines3 import PPO
 
39
 
40
  repo_id = "VinayHajare/ppo-LunarLander-v2"
41
  filename = "ppo-LunarLander-v2.zip"
42
+ eval_env = gym.make("LunarLander-v2", render_mode="human")
43
 
44
  checkpoint = load_from_hub(repo_id, filename)
45
+ model = PPO.load(checkpoint,print_system_info=True)
46
 
 
47
  mean_reward, std_reward = evaluate_policy(model,eval_env, n_eval_episodes=10, deterministic=True)
48
  print(f"mean_reward={mean_reward:.2f} +/- {std_reward}")
49
 
50
  # Enjoy trained agent
51
+ observation, info = eval_env.reset()
 
52
  for _ in range(1000):
53
+ action, _states = model.predict(observation, deterministic=True)
54
+ observation, rewards, terminated, truncated, info = eval_env.step(action)
55
+ eval_env.render()
56
  ```