Younes-hands-on-rl
commited on
Commit
•
d446d2e
1
Parent(s):
551409f
Upload a2c_sb3_cartpole.py
Browse files- a2c_sb3_cartpole.py +30 -0
a2c_sb3_cartpole.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gym
|
2 |
+
|
3 |
+
from stable_baselines3 import A2C
|
4 |
+
|
5 |
+
# Set up the CartPole environement
|
6 |
+
# Create the environment
|
7 |
+
env = gym.make("CartPole-v1")
|
8 |
+
|
9 |
+
# Reset the environment and get the initial observation
|
10 |
+
observation = env.reset()
|
11 |
+
|
12 |
+
print('observation space:', env.observation_space)
|
13 |
+
print('action space:', env.action_space)
|
14 |
+
print('threshold: ', env.spec.reward_threshold)
|
15 |
+
|
16 |
+
|
17 |
+
model = A2C("MlpPolicy", env, verbose=1)
|
18 |
+
print(model)
|
19 |
+
model.learn(total_timesteps=25000)
|
20 |
+
|
21 |
+
|
22 |
+
for _ in range(500):
|
23 |
+
observation = env.reset()
|
24 |
+
done = False
|
25 |
+
|
26 |
+
while not done:
|
27 |
+
action, _observations = model.predict(observation)
|
28 |
+
observation, reward, done, info = env.step(action)
|
29 |
+
|
30 |
+
env.render()
|