Spaces:
Runtime error
Runtime error
Updates for Audio course
Browse files
app.py
CHANGED
@@ -1,44 +1,45 @@
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import HfApi, hf_hub_download
|
3 |
from huggingface_hub.repocard import metadata_load
|
4 |
-
|
|
|
5 |
import pandas as pd
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
api = HfApi()
|
10 |
|
11 |
-
def
|
12 |
"""
|
13 |
-
List the
|
14 |
-
from user given environment and lib
|
15 |
:param hf_username: User HF username
|
16 |
-
:param env_tag: Environment tag
|
17 |
-
:param lib_tag: Library tag
|
18 |
"""
|
19 |
-
api = HfApi()
|
20 |
-
models = api.list_models(author=hf_username, filter=["reinforcement-learning", env_tag, lib_tag])
|
21 |
-
|
22 |
-
user_model_ids = [x.modelId for x in models]
|
23 |
-
return user_model_ids
|
24 |
-
|
25 |
-
|
26 |
-
def get_user_sf_models(hf_username, env_tag, lib_tag):
|
27 |
-
api = HfApi()
|
28 |
-
models_sf = []
|
29 |
-
models = api.list_models(author=hf_username, filter=["reinforcement-learning", lib_tag])
|
30 |
|
|
|
31 |
user_model_ids = [x.modelId for x in models]
|
|
|
32 |
|
33 |
for model in user_model_ids:
|
34 |
meta = get_metadata(model)
|
35 |
if meta is None:
|
36 |
continue
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
return models_sf
|
42 |
|
43 |
|
44 |
def get_metadata(model_id):
|
@@ -54,232 +55,125 @@ def get_metadata(model_id):
|
|
54 |
return None
|
55 |
|
56 |
|
57 |
-
def
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
|
70 |
|
71 |
-
def
|
72 |
"""
|
73 |
-
|
74 |
-
:param
|
75 |
"""
|
76 |
-
|
77 |
-
|
78 |
-
if accuracy != None:
|
79 |
-
accuracy = str(accuracy)
|
80 |
-
parsed = accuracy.split(' +/- ')
|
81 |
-
if len(parsed)>1:
|
82 |
-
mean_reward = float(parsed[0])
|
83 |
-
std_reward = float(parsed[1])
|
84 |
-
elif len(parsed)==1: #only mean reward
|
85 |
-
mean_reward = float(parsed[0])
|
86 |
-
std_reward = float(0)
|
87 |
-
else:
|
88 |
-
mean_reward = float(default_std)
|
89 |
-
std_reward = float(default_reward)
|
90 |
-
else:
|
91 |
-
mean_reward = float(default_std)
|
92 |
-
std_reward = float(default_reward)
|
93 |
-
|
94 |
-
return mean_reward, std_reward
|
95 |
|
96 |
-
|
|
|
97 |
"""
|
98 |
Calculate the best results of a unit
|
99 |
-
best_result = mean_reward - std_reward
|
100 |
:param user_model_ids: RL models of a user
|
101 |
"""
|
102 |
-
|
103 |
-
|
|
|
|
|
104 |
for model in user_model_ids:
|
105 |
meta = get_metadata(model)
|
106 |
if meta is None:
|
107 |
continue
|
108 |
-
accuracy = parse_metrics_accuracy(
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
best_result = result
|
113 |
-
best_model_id = model
|
114 |
|
115 |
-
return best_result,
|
116 |
|
117 |
-
def check_if_passed(model):
|
118 |
-
"""
|
119 |
-
Check if result >= baseline
|
120 |
-
to know if you pass
|
121 |
-
:param model: user model
|
122 |
-
"""
|
123 |
-
if model["best_result"] >= model["min_result"]:
|
124 |
-
model["passed_"] = True
|
125 |
|
126 |
def certification(hf_username):
|
127 |
results_certification = [
|
128 |
{
|
129 |
-
"unit": "Unit
|
130 |
-
"
|
131 |
-
"
|
132 |
-
"min_result": 200,
|
133 |
"best_result": 0,
|
134 |
"best_model_id": "",
|
135 |
"passed_": False
|
136 |
},
|
137 |
{
|
138 |
-
"unit": "Unit
|
139 |
-
"
|
140 |
-
"
|
141 |
-
"min_result": 4,
|
142 |
"best_result": 0,
|
143 |
"best_model_id": "",
|
144 |
"passed_": False
|
145 |
},
|
146 |
{
|
147 |
-
"unit": "Unit
|
148 |
-
"
|
149 |
-
"
|
150 |
-
"min_result": 200,
|
151 |
"best_result": 0,
|
152 |
"best_model_id": "",
|
153 |
"passed_": False
|
154 |
},
|
155 |
{
|
156 |
-
"unit": "Unit
|
157 |
-
"
|
158 |
-
"
|
159 |
-
"min_result": 350,
|
160 |
"best_result": 0,
|
161 |
"best_model_id": "",
|
162 |
"passed_": False
|
163 |
},
|
164 |
-
{
|
165 |
-
"unit": "Unit 4",
|
166 |
-
"env": "Pixelcopter-PLE-v0",
|
167 |
-
"library": "reinforce",
|
168 |
-
"min_result": 5,
|
169 |
-
"best_result": 0,
|
170 |
-
"best_model_id": "",
|
171 |
-
"passed_": False
|
172 |
-
},
|
173 |
-
{
|
174 |
-
"unit": "Unit 5",
|
175 |
-
"env": "ML-Agents-SnowballTarget",
|
176 |
-
"library": "ml-agents",
|
177 |
-
"min_result": -100,
|
178 |
-
"best_result": 0,
|
179 |
-
"best_model_id": "",
|
180 |
-
"passed_": False
|
181 |
-
},
|
182 |
-
{
|
183 |
-
"unit": "Unit 5",
|
184 |
-
"env": "ML-Agents-Pyramids",
|
185 |
-
"library": "ml-agents",
|
186 |
-
"min_result": -100,
|
187 |
-
"best_result": 0,
|
188 |
-
"best_model_id": "",
|
189 |
-
"passed_": False
|
190 |
-
},
|
191 |
-
{
|
192 |
-
"unit": "Unit 6",
|
193 |
-
"env": "AntBulletEnv-v0",
|
194 |
-
"library": "stable-baselines3",
|
195 |
-
"min_result": 650,
|
196 |
-
"best_result": 0,
|
197 |
-
"best_model_id": "",
|
198 |
-
"passed_": False
|
199 |
-
},
|
200 |
-
{
|
201 |
-
"unit": "Unit 6",
|
202 |
-
"env": "PandaReachDense-v2",
|
203 |
-
"library": "stable-baselines3",
|
204 |
-
"min_result": -3.5,
|
205 |
-
"best_result": 0,
|
206 |
-
"best_model_id": "",
|
207 |
-
"passed_": False
|
208 |
-
},
|
209 |
-
{
|
210 |
-
"unit": "Unit 7",
|
211 |
-
"env": "ML-Agents-SoccerTwos",
|
212 |
-
"library": "ml-agents",
|
213 |
-
"min_result": -100,
|
214 |
-
"best_result": 0,
|
215 |
-
"best_model_id": "",
|
216 |
-
"passed_": False
|
217 |
-
},
|
218 |
-
{
|
219 |
-
"unit": "Unit 8 PI",
|
220 |
-
"env": "LunarLander-v2",
|
221 |
-
"library": "deep-rl-course",
|
222 |
-
"min_result": -500,
|
223 |
-
"best_result": 0,
|
224 |
-
"best_model_id": "",
|
225 |
-
"passed_": False
|
226 |
-
},
|
227 |
-
{
|
228 |
-
"unit": "Unit 8 PII",
|
229 |
-
"env": "doom_health_gathering_supreme",
|
230 |
-
"library": "sample-factory",
|
231 |
-
"min_result": 5,
|
232 |
-
"best_result": 0,
|
233 |
-
"best_model_id": "",
|
234 |
-
"passed_": False
|
235 |
-
},
|
236 |
]
|
|
|
237 |
for unit in results_certification:
|
238 |
-
if unit["
|
239 |
-
|
240 |
-
|
241 |
-
|
|
|
|
|
|
|
|
|
242 |
else:
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
# Calculate the best result and get the best_model_id
|
247 |
-
best_result, best_model_id = calculate_best_result(user_models)
|
248 |
-
|
249 |
-
# Save best_result and best_model_id
|
250 |
-
unit["best_result"] = best_result
|
251 |
-
unit["best_model_id"] = make_clickable_model(best_model_id)
|
252 |
-
|
253 |
-
# Based on best_result do we pass the unit?
|
254 |
-
check_if_passed(unit)
|
255 |
-
unit["passed"] = pass_emoji(unit["passed_"])
|
256 |
|
257 |
print(results_certification)
|
258 |
|
259 |
df = pd.DataFrame(results_certification)
|
260 |
-
df = df[['passed', 'unit', '
|
261 |
return df
|
262 |
|
263 |
|
264 |
with gr.Blocks() as demo:
|
265 |
gr.Markdown(f"""
|
266 |
-
# 🏆 Check your progress in the
|
267 |
You can check your progress here.
|
268 |
|
269 |
-
- To get a certificate of completion, you must **pass
|
270 |
-
- To get an honors certificate, you must **pass
|
271 |
|
272 |
-
To pass an assignment your model
|
273 |
|
274 |
-
**When min_result = -100 it means that you just need to push a model to pass this hands-on
|
275 |
|
276 |
-
Just type your Hugging Face Username 🤗 (in my case
|
277 |
""")
|
278 |
|
279 |
-
hf_username = gr.Textbox(placeholder="
|
280 |
-
#email = gr.Textbox(placeholder="thomas.simonini@huggingface.co", label="Your Email (to receive your certificate)")
|
281 |
check_progress_button = gr.Button(value="Check my progress")
|
282 |
-
output = gr.components.Dataframe(value=
|
283 |
check_progress_button.click(fn=certification, inputs=hf_username, outputs=output)
|
284 |
|
285 |
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import HfApi, hf_hub_download
|
3 |
from huggingface_hub.repocard import metadata_load
|
4 |
+
import requests
|
5 |
+
import re
|
6 |
import pandas as pd
|
7 |
+
from huggingface_hub import ModelCard
|
8 |
+
|
9 |
+
def make_clickable_model(model_name):
|
10 |
+
# remove user from model name
|
11 |
+
model_name_show = ' '.join(model_name.split('/')[1:])
|
12 |
|
13 |
+
link = "https://huggingface.co/" + model_name
|
14 |
+
return f'<a target="_blank" href="{link}">{model_name_show}</a>'
|
15 |
+
|
16 |
+
def pass_emoji(passed):
|
17 |
+
if passed is True:
|
18 |
+
passed = "✅"
|
19 |
+
else:
|
20 |
+
passed = "❌"
|
21 |
+
return passed
|
22 |
|
23 |
api = HfApi()
|
24 |
|
25 |
+
def get_user_audio_classification_models(hf_username):
|
26 |
"""
|
27 |
+
List the user's Audio Classification models
|
|
|
28 |
:param hf_username: User HF username
|
|
|
|
|
29 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
|
31 |
+
models = api.list_models(author=hf_username, filter=["audio-classification"])
|
32 |
user_model_ids = [x.modelId for x in models]
|
33 |
+
models_gtzan = []
|
34 |
|
35 |
for model in user_model_ids:
|
36 |
meta = get_metadata(model)
|
37 |
if meta is None:
|
38 |
continue
|
39 |
+
if meta["datasets"] == ['marsyas/gtzan']:
|
40 |
+
models_gtzan.append(model)
|
41 |
+
|
42 |
+
return models_gtzan
|
|
|
43 |
|
44 |
|
45 |
def get_metadata(model_id):
|
|
|
55 |
return None
|
56 |
|
57 |
|
58 |
+
def extract_accuracy(model_card_content):
|
59 |
+
"""
|
60 |
+
Extract the accuracy value from the models' model card
|
61 |
+
:param model_card_content: model card content
|
62 |
+
"""
|
63 |
+
accuracy_pattern = r"Accuracy: (\d+\.\d+)"
|
64 |
+
match = re.search(accuracy_pattern, model_card_content)
|
65 |
+
if match:
|
66 |
+
accuracy = match.group(1)
|
67 |
+
return float(accuracy)
|
68 |
+
else:
|
69 |
+
return None
|
70 |
|
71 |
|
72 |
+
def parse_metrics_accuracy(model_id):
|
73 |
"""
|
74 |
+
Get model card and parse it
|
75 |
+
:param model_id: model id
|
76 |
"""
|
77 |
+
card = ModelCard.load(model_id)
|
78 |
+
return extract_accuracy(card.content)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
|
80 |
+
|
81 |
+
def calculate_best_acc_result(user_model_ids):
|
82 |
"""
|
83 |
Calculate the best results of a unit
|
|
|
84 |
:param user_model_ids: RL models of a user
|
85 |
"""
|
86 |
+
|
87 |
+
best_result = -100
|
88 |
+
best_model = ""
|
89 |
+
|
90 |
for model in user_model_ids:
|
91 |
meta = get_metadata(model)
|
92 |
if meta is None:
|
93 |
continue
|
94 |
+
accuracy = parse_metrics_accuracy(model)
|
95 |
+
if accuracy > best_result:
|
96 |
+
best_result = accuracy
|
97 |
+
best_model = meta['model-index'][0]["name"]
|
|
|
|
|
98 |
|
99 |
+
return best_result, best_model
|
100 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
|
102 |
def certification(hf_username):
|
103 |
results_certification = [
|
104 |
{
|
105 |
+
"unit": "Unit 4: Audio Classification",
|
106 |
+
"task": "audio-classification",
|
107 |
+
"baseline_metric": 0.87,
|
|
|
108 |
"best_result": 0,
|
109 |
"best_model_id": "",
|
110 |
"passed_": False
|
111 |
},
|
112 |
{
|
113 |
+
"unit": "Unit 5: TBD",
|
114 |
+
"task": "TBD",
|
115 |
+
"baseline_metric": 0.99,
|
|
|
116 |
"best_result": 0,
|
117 |
"best_model_id": "",
|
118 |
"passed_": False
|
119 |
},
|
120 |
{
|
121 |
+
"unit": "Unit 6: TBD",
|
122 |
+
"task": "TBD",
|
123 |
+
"baseline_metric": 0.99,
|
|
|
124 |
"best_result": 0,
|
125 |
"best_model_id": "",
|
126 |
"passed_": False
|
127 |
},
|
128 |
{
|
129 |
+
"unit": "Unit 7: TBD",
|
130 |
+
"task": "TBD",
|
131 |
+
"baseline_metric": 0.99,
|
|
|
132 |
"best_result": 0,
|
133 |
"best_model_id": "",
|
134 |
"passed_": False
|
135 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
136 |
]
|
137 |
+
|
138 |
for unit in results_certification:
|
139 |
+
if unit["task"] == "audio-classification":
|
140 |
+
user_models = get_user_audio_classification_models(hf_username)
|
141 |
+
best_result, best_model_id = calculate_best_acc_result(user_models)
|
142 |
+
unit["best_result"] = best_result
|
143 |
+
unit["best_model_id"] = make_clickable_model(best_model_id)
|
144 |
+
if unit["best_result"] >= unit["baseline_metric"]:
|
145 |
+
unit["passed_"] = True
|
146 |
+
unit["passed"] = pass_emoji(unit["passed_"])
|
147 |
else:
|
148 |
+
# TBD for other units
|
149 |
+
unit["passed"] = pass_emoji(unit["passed_"])
|
150 |
+
continue
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
151 |
|
152 |
print(results_certification)
|
153 |
|
154 |
df = pd.DataFrame(results_certification)
|
155 |
+
df = df[['passed', 'unit', 'task', 'baseline_metric', 'best_result', 'best_model_id']]
|
156 |
return df
|
157 |
|
158 |
|
159 |
with gr.Blocks() as demo:
|
160 |
gr.Markdown(f"""
|
161 |
+
# 🏆 Check your progress in the Audio Course 🏆
|
162 |
You can check your progress here.
|
163 |
|
164 |
+
- To get a certificate of completion, you must **pass 3 out of 4 assignments before July 31st 2023**.
|
165 |
+
- To get an honors certificate, you must **pass 4 out of 4 assignments before July 31st 2023**.
|
166 |
|
167 |
+
To pass an assignment, your model's metric should be equal or higher than the baseline metric
|
168 |
|
169 |
+
**When min_result = -100 it means that you just need to push a model to pass this hands-on.**
|
170 |
|
171 |
+
Just type your Hugging Face Username 🤗 (in my case MariaK)
|
172 |
""")
|
173 |
|
174 |
+
hf_username = gr.Textbox(placeholder="MariaK", label="Your Hugging Face Username")
|
|
|
175 |
check_progress_button = gr.Button(value="Check my progress")
|
176 |
+
output = gr.components.Dataframe(value=certification(hf_username))
|
177 |
check_progress_button.click(fn=certification, inputs=hf_username, outputs=output)
|
178 |
|
179 |
demo.launch()
|