zjowowen commited on
Commit
c805ffb
1 Parent(s): 8555fdd

Upload README.md with huggingface_hub

Browse files
Files changed (1) hide show
  1. README.md +13 -10
README.md CHANGED
@@ -21,7 +21,7 @@ model-index:
21
  type: OpenAI/Gym/Atari-SpaceInvadersNoFrameskip-v4
22
  metrics:
23
  - type: mean_reward
24
- value: 685.0 +/- 0.0
25
  name: mean_reward
26
  ---
27
 
@@ -67,10 +67,10 @@ import torch
67
 
68
  # Pull model from files which are git cloned from huggingface
69
  policy_state_dict = torch.load("pytorch_model.bin", map_location=torch.device("cpu"))
70
- cfg = EasyDict(Config.file_to_dict("policy_config.py"))
71
  # Instantiate the agent
72
  agent = PPOF(
73
- env="SpaceInvadersNoFrameskip", exp_name="SpaceInvadersNoFrameskip-v4-PPO", cfg=cfg.exp_config, policy_state_dict=policy_state_dict
74
  )
75
  # Continue training
76
  agent.train(step=5000)
@@ -98,7 +98,7 @@ from huggingface_ding import pull_model_from_hub
98
  policy_state_dict, cfg = pull_model_from_hub(repo_id="OpenDILabCommunity/SpaceInvadersNoFrameskip-v4-PPO")
99
  # Instantiate the agent
100
  agent = PPOF(
101
- env="SpaceInvadersNoFrameskip", exp_name="SpaceInvadersNoFrameskip-v4-PPO", cfg=cfg.exp_config, policy_state_dict=policy_state_dict
102
  )
103
  # Continue training
104
  agent.train(step=5000)
@@ -125,7 +125,7 @@ from ding.bonus import PPOF
125
  from huggingface_ding import push_model_to_hub
126
 
127
  # Instantiate the agent
128
- agent = PPOF(env="SpaceInvadersNoFrameskip", exp_name="SpaceInvadersNoFrameskip-v4-PPO")
129
  # Train the agent
130
  return_ = agent.train(step=int(10000000))
131
  # Push model to huggingface hub
@@ -142,7 +142,8 @@ push_model_to_hub(
142
  usage_file_by_git_clone="./ppo/spaceinvaders_ppo_deploy.py",
143
  usage_file_by_huggingface_ding="./ppo/spaceinvaders_ppo_download.py",
144
  train_file="./ppo/spaceinvaders_ppo.py",
145
- repo_id="OpenDILabCommunity/SpaceInvadersNoFrameskip-v4-PPO"
 
146
  )
147
 
148
  ```
@@ -181,7 +182,9 @@ exp_config = {
181
  'actor_head_hidden_size': 128,
182
  'critic_head_hidden_size': 128
183
  },
184
- 'cfg_type': 'PPOFPolicyDict'
 
 
185
  }
186
 
187
  ```
@@ -199,13 +202,13 @@ exp_config = {
199
  - **Demo:** [video](https://huggingface.co/OpenDILabCommunity/SpaceInvadersNoFrameskip-v4-PPO/blob/main/replay.mp4)
200
  <!-- Provide the size information for the model. -->
201
  - **Parameters total size:** 11501.55 KB
202
- - **Last Update Date:** 2023-06-16
203
 
204
  ## Environments
205
  <!-- Address questions around what environment the model is intended to be trained and deployed at, including the necessary information needed to be provided for future users. -->
206
  - **Benchmark:** OpenAI/Gym/Atari
207
  - **Task:** SpaceInvadersNoFrameskip-v4
208
  - **Gym version:** 0.25.1
209
- - **DI-engine version:** v0.4.8
210
- - **PyTorch version:** 1.7.1
211
  - **Doc**: [DI-engine-docs Environments link](https://di-engine-docs.readthedocs.io/en/latest/13_envs/atari.html)
 
21
  type: OpenAI/Gym/Atari-SpaceInvadersNoFrameskip-v4
22
  metrics:
23
  - type: mean_reward
24
+ value: 700.0 +/- 0.0
25
  name: mean_reward
26
  ---
27
 
 
67
 
68
  # Pull model from files which are git cloned from huggingface
69
  policy_state_dict = torch.load("pytorch_model.bin", map_location=torch.device("cpu"))
70
+ cfg = EasyDict(Config.file_to_dict("policy_config.py").cfg_dict)
71
  # Instantiate the agent
72
  agent = PPOF(
73
+ env_id="SpaceInvadersNoFrameskip-v4", exp_name="SpaceInvadersNoFrameskip-v4-PPO", cfg=cfg.exp_config, policy_state_dict=policy_state_dict
74
  )
75
  # Continue training
76
  agent.train(step=5000)
 
98
  policy_state_dict, cfg = pull_model_from_hub(repo_id="OpenDILabCommunity/SpaceInvadersNoFrameskip-v4-PPO")
99
  # Instantiate the agent
100
  agent = PPOF(
101
+ env_id="SpaceInvadersNoFrameskip-v4", exp_name="SpaceInvadersNoFrameskip-v4-PPO", cfg=cfg.exp_config, policy_state_dict=policy_state_dict
102
  )
103
  # Continue training
104
  agent.train(step=5000)
 
125
  from huggingface_ding import push_model_to_hub
126
 
127
  # Instantiate the agent
128
+ agent = PPOF(env_id="SpaceInvadersNoFrameskip-v4", exp_name="SpaceInvadersNoFrameskip-v4-PPO")
129
  # Train the agent
130
  return_ = agent.train(step=int(10000000))
131
  # Push model to huggingface hub
 
142
  usage_file_by_git_clone="./ppo/spaceinvaders_ppo_deploy.py",
143
  usage_file_by_huggingface_ding="./ppo/spaceinvaders_ppo_download.py",
144
  train_file="./ppo/spaceinvaders_ppo.py",
145
+ repo_id="OpenDILabCommunity/SpaceInvadersNoFrameskip-v4-PPO",
146
+ create_repo=False
147
  )
148
 
149
  ```
 
182
  'actor_head_hidden_size': 128,
183
  'critic_head_hidden_size': 128
184
  },
185
+ 'cfg_type': 'PPOFPolicyDict',
186
+ 'env_id': 'SpaceInvadersNoFrameskip-v4',
187
+ 'exp_name': 'SpaceInvadersNoFrameskip-v4-PPO'
188
  }
189
 
190
  ```
 
202
  - **Demo:** [video](https://huggingface.co/OpenDILabCommunity/SpaceInvadersNoFrameskip-v4-PPO/blob/main/replay.mp4)
203
  <!-- Provide the size information for the model. -->
204
  - **Parameters total size:** 11501.55 KB
205
+ - **Last Update Date:** 2023-09-21
206
 
207
  ## Environments
208
  <!-- Address questions around what environment the model is intended to be trained and deployed at, including the necessary information needed to be provided for future users. -->
209
  - **Benchmark:** OpenAI/Gym/Atari
210
  - **Task:** SpaceInvadersNoFrameskip-v4
211
  - **Gym version:** 0.25.1
212
+ - **DI-engine version:** v0.4.9
213
+ - **PyTorch version:** 2.0.1+cu117
214
  - **Doc**: [DI-engine-docs Environments link](https://di-engine-docs.readthedocs.io/en/latest/13_envs/atari.html)