MariaK ThomasSimonini HF staff commited on
Commit
3d7abcf
β€’
0 Parent(s):

Duplicate from ThomasSimonini/Check-my-progress-Deep-RL-Course

Browse files

Co-authored-by: Thomas Simonini <ThomasSimonini@users.noreply.huggingface.co>

Files changed (4) hide show
  1. .gitattributes +34 -0
  2. README.md +13 -0
  3. app.py +285 -0
  4. utils.py +16 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Check My Progress Deep RL Course
3
+ emoji: πŸ‘€
4
+ colorFrom: yellow
5
+ colorTo: purple
6
+ sdk: gradio
7
+ sdk_version: 3.16.0
8
+ app_file: app.py
9
+ pinned: false
10
+ duplicated_from: ThomasSimonini/Check-my-progress-Deep-RL-Course
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,285 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from huggingface_hub import HfApi, hf_hub_download
3
+ from huggingface_hub.repocard import metadata_load
4
+
5
+ import pandas as pd
6
+
7
+ from utils import *
8
+
9
+ api = HfApi()
10
+
11
+ def get_user_models(hf_username, env_tag, lib_tag):
12
+ """
13
+ List the Reinforcement Learning models
14
+ from user given environment and lib
15
+ :param hf_username: User HF username
16
+ :param env_tag: Environment tag
17
+ :param lib_tag: Library tag
18
+ """
19
+ api = HfApi()
20
+ models = api.list_models(author=hf_username, filter=["reinforcement-learning", env_tag, lib_tag])
21
+
22
+ user_model_ids = [x.modelId for x in models]
23
+ return user_model_ids
24
+
25
+
26
+ def get_user_sf_models(hf_username, env_tag, lib_tag):
27
+ api = HfApi()
28
+ models_sf = []
29
+ models = api.list_models(author=hf_username, filter=["reinforcement-learning", lib_tag])
30
+
31
+ user_model_ids = [x.modelId for x in models]
32
+
33
+ for model in user_model_ids:
34
+ meta = get_metadata(model)
35
+ if meta is None:
36
+ continue
37
+ result = meta["model-index"][0]["results"][0]["dataset"]["name"]
38
+ if result == env_tag:
39
+ models_sf.append(model)
40
+
41
+ return models_sf
42
+
43
+
44
+ def get_metadata(model_id):
45
+ """
46
+ Get model metadata (contains evaluation data)
47
+ :param model_id
48
+ """
49
+ try:
50
+ readme_path = hf_hub_download(model_id, filename="README.md")
51
+ return metadata_load(readme_path)
52
+ except requests.exceptions.HTTPError:
53
+ # 404 README.md not found
54
+ return None
55
+
56
+
57
+ def parse_metrics_accuracy(meta):
58
+ """
59
+ Get model results and parse it
60
+ :param meta: model metadata
61
+ """
62
+ if "model-index" not in meta:
63
+ return None
64
+ result = meta["model-index"][0]["results"]
65
+ metrics = result[0]["metrics"]
66
+ accuracy = metrics[0]["value"]
67
+
68
+ return accuracy
69
+
70
+
71
+ def parse_rewards(accuracy):
72
+ """
73
+ Parse mean_reward and std_reward
74
+ :param accuracy: model results
75
+ """
76
+ default_std = -1000
77
+ default_reward= -1000
78
+ if accuracy != None:
79
+ accuracy = str(accuracy)
80
+ parsed = accuracy.split(' +/- ')
81
+ if len(parsed)>1:
82
+ mean_reward = float(parsed[0])
83
+ std_reward = float(parsed[1])
84
+ elif len(parsed)==1: #only mean reward
85
+ mean_reward = float(parsed[0])
86
+ std_reward = float(0)
87
+ else:
88
+ mean_reward = float(default_std)
89
+ std_reward = float(default_reward)
90
+ else:
91
+ mean_reward = float(default_std)
92
+ std_reward = float(default_reward)
93
+
94
+ return mean_reward, std_reward
95
+
96
+ def calculate_best_result(user_model_ids):
97
+ """
98
+ Calculate the best results of a unit
99
+ best_result = mean_reward - std_reward
100
+ :param user_model_ids: RL models of a user
101
+ """
102
+ best_result = -1000
103
+ best_model_id = ""
104
+ for model in user_model_ids:
105
+ meta = get_metadata(model)
106
+ if meta is None:
107
+ continue
108
+ accuracy = parse_metrics_accuracy(meta)
109
+ mean_reward, std_reward = parse_rewards(accuracy)
110
+ result = mean_reward - std_reward
111
+ if result > best_result:
112
+ best_result = result
113
+ best_model_id = model
114
+
115
+ return best_result, best_model_id
116
+
117
+ def check_if_passed(model):
118
+ """
119
+ Check if result >= baseline
120
+ to know if you pass
121
+ :param model: user model
122
+ """
123
+ if model["best_result"] >= model["min_result"]:
124
+ model["passed_"] = True
125
+
126
+ def certification(hf_username):
127
+ results_certification = [
128
+ {
129
+ "unit": "Unit 1",
130
+ "env": "LunarLander-v2",
131
+ "library": "stable-baselines3",
132
+ "min_result": 200,
133
+ "best_result": 0,
134
+ "best_model_id": "",
135
+ "passed_": False
136
+ },
137
+ {
138
+ "unit": "Unit 2",
139
+ "env": "Taxi-v3",
140
+ "library": "q-learning",
141
+ "min_result": 4,
142
+ "best_result": 0,
143
+ "best_model_id": "",
144
+ "passed_": False
145
+ },
146
+ {
147
+ "unit": "Unit 3",
148
+ "env": "SpaceInvadersNoFrameskip-v4",
149
+ "library": "stable-baselines3",
150
+ "min_result": 200,
151
+ "best_result": 0,
152
+ "best_model_id": "",
153
+ "passed_": False
154
+ },
155
+ {
156
+ "unit": "Unit 4",
157
+ "env": "CartPole-v1",
158
+ "library": "reinforce",
159
+ "min_result": 350,
160
+ "best_result": 0,
161
+ "best_model_id": "",
162
+ "passed_": False
163
+ },
164
+ {
165
+ "unit": "Unit 4",
166
+ "env": "Pixelcopter-PLE-v0",
167
+ "library": "reinforce",
168
+ "min_result": 5,
169
+ "best_result": 0,
170
+ "best_model_id": "",
171
+ "passed_": False
172
+ },
173
+ {
174
+ "unit": "Unit 5",
175
+ "env": "ML-Agents-SnowballTarget",
176
+ "library": "ml-agents",
177
+ "min_result": -100,
178
+ "best_result": 0,
179
+ "best_model_id": "",
180
+ "passed_": False
181
+ },
182
+ {
183
+ "unit": "Unit 5",
184
+ "env": "ML-Agents-Pyramids",
185
+ "library": "ml-agents",
186
+ "min_result": -100,
187
+ "best_result": 0,
188
+ "best_model_id": "",
189
+ "passed_": False
190
+ },
191
+ {
192
+ "unit": "Unit 6",
193
+ "env": "AntBulletEnv-v0",
194
+ "library": "stable-baselines3",
195
+ "min_result": 650,
196
+ "best_result": 0,
197
+ "best_model_id": "",
198
+ "passed_": False
199
+ },
200
+ {
201
+ "unit": "Unit 6",
202
+ "env": "PandaReachDense-v2",
203
+ "library": "stable-baselines3",
204
+ "min_result": -3.5,
205
+ "best_result": 0,
206
+ "best_model_id": "",
207
+ "passed_": False
208
+ },
209
+ {
210
+ "unit": "Unit 7",
211
+ "env": "ML-Agents-SoccerTwos",
212
+ "library": "ml-agents",
213
+ "min_result": -100,
214
+ "best_result": 0,
215
+ "best_model_id": "",
216
+ "passed_": False
217
+ },
218
+ {
219
+ "unit": "Unit 8 PI",
220
+ "env": "LunarLander-v2",
221
+ "library": "deep-rl-course",
222
+ "min_result": -500,
223
+ "best_result": 0,
224
+ "best_model_id": "",
225
+ "passed_": False
226
+ },
227
+ {
228
+ "unit": "Unit 8 PII",
229
+ "env": "doom_health_gathering_supreme",
230
+ "library": "sample-factory",
231
+ "min_result": 5,
232
+ "best_result": 0,
233
+ "best_model_id": "",
234
+ "passed_": False
235
+ },
236
+ ]
237
+ for unit in results_certification:
238
+ if unit["unit"] != "Unit 8 PII":
239
+ # Get user model
240
+ user_models = get_user_models(hf_username, unit['env'], unit['library'])
241
+ # For sample factory vizdoom we don't have env tag for now
242
+ else:
243
+ user_models = get_user_sf_models(hf_username, unit['env'], unit['library'])
244
+
245
+
246
+ # Calculate the best result and get the best_model_id
247
+ best_result, best_model_id = calculate_best_result(user_models)
248
+
249
+ # Save best_result and best_model_id
250
+ unit["best_result"] = best_result
251
+ unit["best_model_id"] = make_clickable_model(best_model_id)
252
+
253
+ # Based on best_result do we pass the unit?
254
+ check_if_passed(unit)
255
+ unit["passed"] = pass_emoji(unit["passed_"])
256
+
257
+ print(results_certification)
258
+
259
+ df = pd.DataFrame(results_certification)
260
+ df = df[['passed', 'unit', 'env', 'min_result', 'best_result', 'best_model_id']]
261
+ return df
262
+
263
+
264
+ with gr.Blocks() as demo:
265
+ gr.Markdown(f"""
266
+ # πŸ† Check your progress in the Deep Reinforcement Learning Course πŸ†
267
+ You can check your progress here.
268
+
269
+ - To get a certificate of completion, you must **pass 80% of the assignments before June 1st 2023**.
270
+ - To get an honors certificate, you must **pass 100% of the assignments before June 1st 2023**.
271
+
272
+ To pass an assignment your model result (mean_reward - std_reward) must be >= min_result
273
+
274
+ **When min_result = -100 it means that you just need to push a model to pass this hands-on. No need to reach a certain result.**
275
+
276
+ Just type your Hugging Face Username πŸ€— (in my case ThomasSimonini)
277
+ """)
278
+
279
+ hf_username = gr.Textbox(placeholder="ThomasSimonini", label="Your Hugging Face Username")
280
+ #email = gr.Textbox(placeholder="thomas.simonini@huggingface.co", label="Your Email (to receive your certificate)")
281
+ check_progress_button = gr.Button(value="Check my progress")
282
+ output = gr.components.Dataframe(value= certification(hf_username), headers=["Pass?", "Unit", "Environment", "Baseline", "Your best result", "Your best model id"], datatype=["markdown", "markdown", "markdown", "number", "number", "markdown", "bool"])
283
+ check_progress_button.click(fn=certification, inputs=hf_username, outputs=output)
284
+
285
+ demo.launch()
utils.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Based on Omar Sanseviero work
2
+ # Make model clickable link
3
+ def make_clickable_model(model_name):
4
+ # remove user from model name
5
+ model_name_show = ' '.join(model_name.split('/')[1:])
6
+
7
+ link = "https://huggingface.co/" + model_name
8
+ return f'<a target="_blank" href="{link}">{model_name_show}</a>'
9
+
10
+ def pass_emoji(passed):
11
+ print("PASSED", passed)
12
+ if passed is True:
13
+ passed = "βœ…"
14
+ else:
15
+ passed = "❌"
16
+ return passed