import requests import pandas as pd from tqdm.auto import tqdm import gradio as gr from huggingface_hub import HfApi, hf_hub_download from huggingface_hub.repocard import metadata_load def make_clickable_model(model_name): # remove user from model name model_name_show = ' '.join(model_name.split('/')[1:]) link = "https://huggingface.co/" + model_name return f'{model_name_show}' # Make user clickable link def make_clickable_user(user_id): link = "https://huggingface.co/" + user_id return f'{user_id}' def get_model_ids(assignment): api = HfApi() models = api.list_models(author="Classroom-workshop", filter=assignment) model_ids = [x.modelId for x in models] return model_ids def get_metadata(model_id): try: readme_path = hf_hub_download(model_id, filename="README.md") return metadata_load(readme_path) except requests.exceptions.HTTPError: # 404 README.md not found return None def parse_metrics_accuracy(meta): if "model-index" not in meta: return None result = meta["model-index"][0]["results"] metrics = result[0]["metrics"] accuracy = metrics[0]["value"] return accuracy # We keep the worst case episode def parse_rewards(accuracy): default_std = -1000 default_reward=-1000 if accuracy != None: parsed = accuracy.split(' +/- ') if len(parsed)>1: mean_reward = float(parsed[0]) std_reward = float(parsed[1]) else: mean_reward = float(default_std) std_reward = float(default_reward) else: mean_reward = float(default_std) std_reward = float(default_reward) return mean_reward, std_reward class Leaderboard: def __init__(self) -> None: self.leaderboard= {} def add_leaderboard(self,id=None, title=None): if id is not None and title is not None: id = id.strip() title = title.strip() self.leaderboard.update({id:{'title':title,'data':get_data_per_env(id)}}) def get_data(self): return self.leaderboard def get_ids(self): return list(self.leaderboard.keys()) # CSS file for the with open('app.css','r') as f: BLOCK_CSS = f.read() LOADED_MODEL_IDS = {} def get_data(rl_env): global LOADED_MODEL_IDS data = [] model_ids = get_model_ids(rl_env) LOADED_MODEL_IDS[rl_env]=model_ids for model_id in tqdm(model_ids): meta = get_metadata(model_id) if meta is None: continue user_id = model_id.split('/')[0] row = {} row["User"] = user_id row["Model"] = model_id metric = parse_metrics_accuracy(meta) row["Result"] = metric data.append(row) return pd.DataFrame.from_records(data) def get_data_per_env(assignment): dataframe = get_data(assignment) dataframe = dataframe.fillna("") if not dataframe.empty: # turn the model ids into clickable links dataframe["User"] = dataframe["User"].apply(make_clickable_user) dataframe["Model"] = dataframe["Model"].apply(make_clickable_model) dataframe = dataframe.sort_values(by=['Results'], ascending=False) if not 'Ranking' in dataframe.columns: dataframe.insert(0, 'Ranking', [i for i in range(1,len(dataframe)+1)]) else: dataframe['Ranking'] = [i for i in range(1,len(dataframe)+1)] table_html = dataframe.to_html(escape=False, index=False,justify = 'left') return table_html,dataframe,dataframe.empty else: html = """

⌛ Please wait. Results will be out soon...

""" return html,dataframe,dataframe.empty leaderboard = Leaderboard() leaderboard.add_leaderboard('assignment1'," Automatic Speech Recognition") leaderboard.add_leaderboard('assignment2',"RL Agent for Moon landing") ASSIGNMENTS = leaderboard.get_ids() DETAILS = leaderboard.get_data() def update_data(rl_env): global LOADED_MODEL_IDS data = [] model_ids = [x for x in get_model_ids(rl_env) if x not in LOADED_MODEL_IDS[rl_env]] LOADED_MODEL_IDS[rl_env]+=model_ids for model_id in tqdm(model_ids): meta = get_metadata(model_id) if meta is None: continue user_id = model_id.split('/')[0] row = {} row["User"] = user_id row["Model"] = model_id accuracy = parse_metrics_accuracy(meta) row["Accuracy"] = accuracy data.append(row) return pd.DataFrame.from_records(data) def update_data_per_env(rl_env): global DETAILS _,old_dataframe,_ = DETAILS[rl_env]['data'] new_dataframe = update_data(rl_env) new_dataframe = new_dataframe.fillna("") if not new_dataframe.empty: new_dataframe["User"] = new_dataframe["User"].apply(make_clickable_user) new_dataframe["Model"] = new_dataframe["Model"].apply(make_clickable_model) dataframe = pd.concat([old_dataframe,new_dataframe]) if not dataframe.empty: dataframe = dataframe.sort_values(by=['Results'], ascending=False) if not 'Ranking' in dataframe.columns: dataframe.insert(0, 'Ranking', [i for i in range(1,len(dataframe)+1)]) else: dataframe['Ranking'] = [i for i in range(1,len(dataframe)+1)] table_html = dataframe.to_html(escape=False, index=False,justify = 'left') return table_html,dataframe,dataframe.empty else: html = """

⌛ Please wait. Results will be out soon...

""" return html,dataframe,dataframe.empty def get_info_display(len_dataframe,env_name,name_leaderboard,is_empty): if not is_empty: markdown = """

{name_leaderboard}


This is a leaderboard of {len_dataframe} assignments for assignment {env_name} 👩‍🚀.


""".format(len_dataframe = len_dataframe,env_name = env_name,name_leaderboard = name_leaderboard) else: markdown = """

{name_leaderboard}


""".format(name_leaderboard = name_leaderboard) return markdown def reload_all_data(): global DETAILS, ASSIGNMENTS for assignment in ASSIGNMENTS: DETAILS[assignment]['data'] = update_data_per_env(assignment) html = """

✅ Leaderboard updated! Click `Reload Leaderboard` to see the current leaderboard.

""" return html def reload_leaderboard(rl_env): global DETAILS data_html,data_dataframe,is_empty = DETAILS[rl_env]['data'] markdown = get_info_display(len(data_dataframe),rl_env, DETAILS[rl_env]['title'],is_empty) return markdown,data_html