osanseviero HF staff commited on
Commit
5d82e47
β€’
1 Parent(s): 52328f6

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +235 -0
app.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import pandas as pd
3
+ from tqdm.auto import tqdm
4
+ import gradio as gr
5
+ from huggingface_hub import HfApi, hf_hub_download
6
+ from huggingface_hub.repocard import metadata_load
7
+
8
+ def make_clickable_model(model_name):
9
+ # remove user from model name
10
+ model_name_show = ' '.join(model_name.split('/')[1:])
11
+
12
+ link = "https://huggingface.co/" + model_name
13
+ return f'<a target="_blank" href="{link}">{model_name_show}</a>'
14
+
15
+ # Make user clickable link
16
+ def make_clickable_user(user_id):
17
+ link = "https://huggingface.co/" + user_id
18
+ return f'<a target="_blank" href="{link}">{user_id}</a>'
19
+
20
+
21
+ def get_model_ids(assignment):
22
+ api = HfApi()
23
+ models = api.list_models(author="Classroom-workshop", filter=assignment)
24
+ model_ids = [x.modelId for x in models]
25
+ return model_ids
26
+
27
+ def get_metadata(model_id):
28
+ try:
29
+ readme_path = hf_hub_download(model_id, filename="README.md")
30
+ return metadata_load(readme_path)
31
+ except requests.exceptions.HTTPError:
32
+ # 404 README.md not found
33
+ return None
34
+
35
+ def parse_metrics_accuracy(meta):
36
+ if "model-index" not in meta:
37
+ return None
38
+ result = meta["model-index"][0]["results"]
39
+ metrics = result[0]["metrics"]
40
+ accuracy = metrics[0]["value"]
41
+ return accuracy
42
+
43
+ # We keep the worst case episode
44
+ def parse_rewards(accuracy):
45
+ default_std = -1000
46
+ default_reward=-1000
47
+ if accuracy != None:
48
+ parsed = accuracy.split(' +/- ')
49
+ if len(parsed)>1:
50
+ mean_reward = float(parsed[0])
51
+ std_reward = float(parsed[1])
52
+ else:
53
+ mean_reward = float(default_std)
54
+ std_reward = float(default_reward)
55
+
56
+ else:
57
+ mean_reward = float(default_std)
58
+ std_reward = float(default_reward)
59
+ return mean_reward, std_reward
60
+
61
+
62
+
63
+ class Leaderboard:
64
+ def __init__(self) -> None:
65
+ self.leaderboard= {}
66
+
67
+ def add_leaderboard(self,id=None, title=None):
68
+ if id is not None and title is not None:
69
+ id = id.strip()
70
+ title = title.strip()
71
+ self.leaderboard.update({id:{'title':title,'data':get_data_per_env(id)}})
72
+ def get_data(self):
73
+ return self.leaderboard
74
+
75
+ def get_ids(self):
76
+ return list(self.leaderboard.keys())
77
+
78
+
79
+
80
+ # CSS file for the
81
+ with open('app.css','r') as f:
82
+ BLOCK_CSS = f.read()
83
+
84
+
85
+ LOADED_MODEL_IDS = {}
86
+
87
+ def get_data(rl_env):
88
+ global LOADED_MODEL_IDS
89
+ data = []
90
+ model_ids = get_model_ids(rl_env)
91
+ LOADED_MODEL_IDS[rl_env]=model_ids
92
+
93
+ for model_id in tqdm(model_ids):
94
+ meta = get_metadata(model_id)
95
+ if meta is None:
96
+ continue
97
+ user_id = model_id.split('/')[0]
98
+ row = {}
99
+ row["User"] = user_id
100
+ row["Model"] = model_id
101
+ metric = parse_metrics_accuracy(meta)
102
+ row["Result"] = metric
103
+ data.append(row)
104
+ return pd.DataFrame.from_records(data)
105
+
106
+ def get_data_per_env(assignment):
107
+ dataframe = get_data(assignment)
108
+ dataframe = dataframe.fillna("")
109
+
110
+ if not dataframe.empty:
111
+ # turn the model ids into clickable links
112
+ dataframe["User"] = dataframe["User"].apply(make_clickable_user)
113
+ dataframe["Model"] = dataframe["Model"].apply(make_clickable_model)
114
+ dataframe = dataframe.sort_values(by=['Results'], ascending=False)
115
+ if not 'Ranking' in dataframe.columns:
116
+ dataframe.insert(0, 'Ranking', [i for i in range(1,len(dataframe)+1)])
117
+ else:
118
+ dataframe['Ranking'] = [i for i in range(1,len(dataframe)+1)]
119
+ table_html = dataframe.to_html(escape=False, index=False,justify = 'left')
120
+ return table_html,dataframe,dataframe.empty
121
+ else:
122
+ html = """<div style="color: green">
123
+ <p> βŒ› Please wait. Results will be out soon... </p>
124
+ </div>
125
+ """
126
+ return html,dataframe,dataframe.empty
127
+
128
+
129
+
130
+ leaderboard = Leaderboard()
131
+ leaderboard.add_leaderboard('assignment1'," Automatic Speech Recognition")
132
+ leaderboard.add_leaderboard('assignment2',"RL Agent for Moon landing")
133
+
134
+ ASSIGNMENTS = leaderboard.get_ids()
135
+ DETAILS = leaderboard.get_data()
136
+
137
+
138
+
139
+
140
+ def update_data(rl_env):
141
+ global LOADED_MODEL_IDS
142
+ data = []
143
+ model_ids = [x for x in get_model_ids(rl_env) if x not in LOADED_MODEL_IDS[rl_env]]
144
+ LOADED_MODEL_IDS[rl_env]+=model_ids
145
+
146
+ for model_id in tqdm(model_ids):
147
+ meta = get_metadata(model_id)
148
+ if meta is None:
149
+ continue
150
+ user_id = model_id.split('/')[0]
151
+ row = {}
152
+ row["User"] = user_id
153
+ row["Model"] = model_id
154
+ accuracy = parse_metrics_accuracy(meta)
155
+ row["Accuracy"] = accuracy
156
+ data.append(row)
157
+ return pd.DataFrame.from_records(data)
158
+
159
+
160
+ def update_data_per_env(rl_env):
161
+ global DETAILS
162
+
163
+ _,old_dataframe,_ = DETAILS[rl_env]['data']
164
+ new_dataframe = update_data(rl_env)
165
+
166
+ new_dataframe = new_dataframe.fillna("")
167
+ if not new_dataframe.empty:
168
+ new_dataframe["User"] = new_dataframe["User"].apply(make_clickable_user)
169
+ new_dataframe["Model"] = new_dataframe["Model"].apply(make_clickable_model)
170
+
171
+ dataframe = pd.concat([old_dataframe,new_dataframe])
172
+
173
+ if not dataframe.empty:
174
+
175
+ dataframe = dataframe.sort_values(by=['Results'], ascending=False)
176
+ if not 'Ranking' in dataframe.columns:
177
+ dataframe.insert(0, 'Ranking', [i for i in range(1,len(dataframe)+1)])
178
+ else:
179
+ dataframe['Ranking'] = [i for i in range(1,len(dataframe)+1)]
180
+ table_html = dataframe.to_html(escape=False, index=False,justify = 'left')
181
+ return table_html,dataframe,dataframe.empty
182
+ else:
183
+ html = """<div style="color: green">
184
+ <p> βŒ› Please wait. Results will be out soon... </p>
185
+ </div>
186
+ """
187
+ return html,dataframe,dataframe.empty
188
+
189
+
190
+
191
+
192
+
193
+
194
+ def get_info_display(len_dataframe,env_name,name_leaderboard,is_empty):
195
+ if not is_empty:
196
+ markdown = """
197
+ <div class='infoPoint'>
198
+ <h1> {name_leaderboard} </h1>
199
+ <br>
200
+ <p> This is a leaderboard of <b>{len_dataframe}</b> assignments for assignment {env_name} πŸ‘©β€πŸš€. </p>
201
+ <br>
202
+ </div>
203
+ """.format(len_dataframe = len_dataframe,env_name = env_name,name_leaderboard = name_leaderboard)
204
+
205
+ else:
206
+ markdown = """
207
+ <div class='infoPoint'>
208
+ <h1> {name_leaderboard} </h1>
209
+ <br>
210
+ </div>
211
+ """.format(name_leaderboard = name_leaderboard)
212
+ return markdown
213
+
214
+ def reload_all_data():
215
+
216
+ global DETAILS, ASSIGNMENTS
217
+
218
+ for assignment in ASSIGNMENTS:
219
+ DETAILS[assignment]['data'] = update_data_per_env(assignment)
220
+
221
+ html = """<div style="color: green">
222
+ <p> βœ… Leaderboard updated! Click `Reload Leaderboard` to see the current leaderboard.</p>
223
+ </div>
224
+ """
225
+ return html
226
+
227
+
228
+ def reload_leaderboard(rl_env):
229
+ global DETAILS
230
+
231
+ data_html,data_dataframe,is_empty = DETAILS[rl_env]['data']
232
+
233
+ markdown = get_info_display(len(data_dataframe),rl_env, DETAILS[rl_env]['title'],is_empty)
234
+
235
+ return markdown,data_html