Upload 17 files
Browse files- Makefile +13 -0
 - app.py +140 -0
 - leaderboard_data.json +56 -0
 - pyproject.toml +13 -0
 - requirements.txt +16 -0
 - src/.DS_Store +0 -0
 - src/about.py +96 -0
 - src/display/css_html_js.py +282 -0
 - src/display/formatting.py +27 -0
 - src/display/utils.py +110 -0
 - src/envs.py +25 -0
 - src/json_leaderboard.py +121 -0
 - src/leaderboard/.DS_Store +0 -0
 - src/leaderboard/read_evals.py +196 -0
 - src/populate.py +117 -0
 - src/submission/check_validity.py +99 -0
 - src/submission/submit.py +119 -0
 
    	
        Makefile
    ADDED
    
    | 
         @@ -0,0 +1,13 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            .PHONY: style format
         
     | 
| 2 | 
         
            +
             
     | 
| 3 | 
         
            +
             
     | 
| 4 | 
         
            +
            style:
         
     | 
| 5 | 
         
            +
            	python -m black --line-length 119 .
         
     | 
| 6 | 
         
            +
            	python -m isort .
         
     | 
| 7 | 
         
            +
            	ruff check --fix .
         
     | 
| 8 | 
         
            +
             
     | 
| 9 | 
         
            +
             
     | 
| 10 | 
         
            +
            quality:
         
     | 
| 11 | 
         
            +
            	python -m black --check --line-length 119 .
         
     | 
| 12 | 
         
            +
            	python -m isort --check-only .
         
     | 
| 13 | 
         
            +
            	ruff check .
         
     | 
    	
        app.py
    ADDED
    
    | 
         @@ -0,0 +1,140 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            import gradio as gr
         
     | 
| 2 | 
         
            +
            import pandas as pd
         
     | 
| 3 | 
         
            +
            from pathlib import Path
         
     | 
| 4 | 
         
            +
             
     | 
| 5 | 
         
            +
            from src.json_leaderboard import create_leaderboard_df
         
     | 
| 6 | 
         
            +
            from src.about import (
         
     | 
| 7 | 
         
            +
                CITATION_BUTTON_TEXT,
         
     | 
| 8 | 
         
            +
                INTRODUCTION_TEXT,
         
     | 
| 9 | 
         
            +
                LINKS_AND_INFO,
         
     | 
| 10 | 
         
            +
                TITLE,
         
     | 
| 11 | 
         
            +
            )
         
     | 
| 12 | 
         
            +
            from src.display.css_html_js import custom_css
         
     | 
| 13 | 
         
            +
             
     | 
| 14 | 
         
            +
            # 固定列,永远在前面
         
     | 
| 15 | 
         
            +
            FIXED_COLUMNS = ["Model", "Release Date", "HF Model", "Open Source"]  
         
     | 
| 16 | 
         
            +
             
     | 
| 17 | 
         
            +
            def get_json_df():
         
     | 
| 18 | 
         
            +
                """Load the leaderboard DataFrame"""
         
     | 
| 19 | 
         
            +
                json_path = Path(__file__).parent / "leaderboard_data.json"
         
     | 
| 20 | 
         
            +
                df = create_leaderboard_df(str(json_path))
         
     | 
| 21 | 
         
            +
                return df
         
     | 
| 22 | 
         
            +
             
     | 
| 23 | 
         
            +
            # 提取大类及其子类
         
     | 
| 24 | 
         
            +
            def extract_categories_and_subs(df):
         
     | 
| 25 | 
         
            +
                """
         
     | 
| 26 | 
         
            +
                返回 {大类: {"overall": 大类列, "subs": [子类列]}}
         
     | 
| 27 | 
         
            +
                大类列以 '-Overall' 结尾,紧跟其后的列为子类
         
     | 
| 28 | 
         
            +
                """
         
     | 
| 29 | 
         
            +
                category_dict = {}
         
     | 
| 30 | 
         
            +
                all_cols = list(df.columns)
         
     | 
| 31 | 
         
            +
                skip_cols = set(FIXED_COLUMNS + ["Overall"])
         
     | 
| 32 | 
         
            +
             
     | 
| 33 | 
         
            +
                i = 0
         
     | 
| 34 | 
         
            +
                while i < len(all_cols):
         
     | 
| 35 | 
         
            +
                    col = all_cols[i]
         
     | 
| 36 | 
         
            +
                    if col.endswith("-Overall") and col not in skip_cols:
         
     | 
| 37 | 
         
            +
                        cat_name = col.replace("-Overall", "")
         
     | 
| 38 | 
         
            +
                        subs = []
         
     | 
| 39 | 
         
            +
                        j = i + 1
         
     | 
| 40 | 
         
            +
                        while j < len(all_cols):
         
     | 
| 41 | 
         
            +
                            next_col = all_cols[j]
         
     | 
| 42 | 
         
            +
                            if next_col.endswith("-Overall") or next_col in skip_cols:
         
     | 
| 43 | 
         
            +
                                break
         
     | 
| 44 | 
         
            +
                            subs.append(next_col)
         
     | 
| 45 | 
         
            +
                            j += 1
         
     | 
| 46 | 
         
            +
                        category_dict[cat_name] = {"overall": col, "subs": subs}
         
     | 
| 47 | 
         
            +
                    i += 1
         
     | 
| 48 | 
         
            +
                return category_dict
         
     | 
| 49 | 
         
            +
             
     | 
| 50 | 
         
            +
            # 列过滤函数,保持固定列 + 用户选择列 + 顺序不变
         
     | 
| 51 | 
         
            +
            def filtered_leaderboard(df, selected_columns):
         
     | 
| 52 | 
         
            +
                selected_columns = selected_columns or []
         
     | 
| 53 | 
         
            +
                final_cols = FIXED_COLUMNS + [col for col in df.columns if col in selected_columns and col not in FIXED_COLUMNS]
         
     | 
| 54 | 
         
            +
                return df[final_cols]
         
     | 
| 55 | 
         
            +
             
     | 
| 56 | 
         
            +
            # Update functions
         
     | 
| 57 | 
         
            +
            def update_leaderboard_overall(selected_cols, df_overall):
         
     | 
| 58 | 
         
            +
                return filtered_leaderboard(df_overall, selected_cols)
         
     | 
| 59 | 
         
            +
             
     | 
| 60 | 
         
            +
            def update_leaderboard_cat(selected_cols, df_cat):
         
     | 
| 61 | 
         
            +
                return filtered_leaderboard(df_cat, selected_cols)
         
     | 
| 62 | 
         
            +
             
     | 
| 63 | 
         
            +
            # 初始化
         
     | 
| 64 | 
         
            +
            df = get_json_df()
         
     | 
| 65 | 
         
            +
            ALL_COLUMNS_ORDERED = list(df.columns)
         
     | 
| 66 | 
         
            +
            categories = extract_categories_and_subs(df)
         
     | 
| 67 | 
         
            +
             
     | 
| 68 | 
         
            +
            # 可选列 = 全部列 - 固定列
         
     | 
| 69 | 
         
            +
            optional_columns = [col for col in df.columns if col not in FIXED_COLUMNS]
         
     | 
| 70 | 
         
            +
             
     | 
| 71 | 
         
            +
            # Gradio interface
         
     | 
| 72 | 
         
            +
            demo = gr.Blocks(css=custom_css, title="UniGenBench Leaderboard")
         
     | 
| 73 | 
         
            +
             
     | 
| 74 | 
         
            +
            with demo:
         
     | 
| 75 | 
         
            +
                gr.HTML(TITLE)
         
     | 
| 76 | 
         
            +
                gr.HTML(LINKS_AND_INFO)
         
     | 
| 77 | 
         
            +
                gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
         
     | 
| 78 | 
         
            +
             
     | 
| 79 | 
         
            +
                with gr.Tabs(elem_classes="tab-buttons") as tabs:
         
     | 
| 80 | 
         
            +
                    # Overall leaderboard
         
     | 
| 81 | 
         
            +
                    with gr.TabItem("🏅 Overall Leaderboard", elem_id="tab-overall"):
         
     | 
| 82 | 
         
            +
                        selected_columns_overall = gr.CheckboxGroup(
         
     | 
| 83 | 
         
            +
                            choices=optional_columns,
         
     | 
| 84 | 
         
            +
                            label="Select additional columns to display",
         
     | 
| 85 | 
         
            +
                            value=optional_columns
         
     | 
| 86 | 
         
            +
                        )
         
     | 
| 87 | 
         
            +
                        leaderboard_table = gr.Dataframe(
         
     | 
| 88 | 
         
            +
                            value=df[ALL_COLUMNS_ORDERED],
         
     | 
| 89 | 
         
            +
                            headers=list(df.columns),
         
     | 
| 90 | 
         
            +
                            datatype=["html" if col in ["Model","HF Model"] else "str" for col in df.columns],
         
     | 
| 91 | 
         
            +
                            interactive=False,
         
     | 
| 92 | 
         
            +
                            wrap=False
         
     | 
| 93 | 
         
            +
                        )
         
     | 
| 94 | 
         
            +
                        selected_columns_overall.change(
         
     | 
| 95 | 
         
            +
                            fn=update_leaderboard_overall,
         
     | 
| 96 | 
         
            +
                            inputs=[selected_columns_overall, gr.State(value=df)],
         
     | 
| 97 | 
         
            +
                            outputs=leaderboard_table
         
     | 
| 98 | 
         
            +
                        )
         
     | 
| 99 | 
         
            +
             
     | 
| 100 | 
         
            +
                    # 每个大类 leaderboard
         
     | 
| 101 | 
         
            +
                    for cat_name, info in categories.items():
         
     | 
| 102 | 
         
            +
                        with gr.TabItem(f"🏆 {cat_name}", elem_id=f"tab-{cat_name}"):
         
     | 
| 103 | 
         
            +
                            cat_cols = [info["overall"]] + info["subs"]
         
     | 
| 104 | 
         
            +
                            cat_df = df[FIXED_COLUMNS + cat_cols]
         
     | 
| 105 | 
         
            +
             
     | 
| 106 | 
         
            +
                            optional_columns_cat = [col for col in cat_cols if col not in FIXED_COLUMNS]
         
     | 
| 107 | 
         
            +
                            selected_columns_cat = gr.CheckboxGroup(
         
     | 
| 108 | 
         
            +
                                choices=optional_columns_cat,
         
     | 
| 109 | 
         
            +
                                label=f"Select additional columns for {cat_name}",
         
     | 
| 110 | 
         
            +
                                value=optional_columns_cat
         
     | 
| 111 | 
         
            +
                            )
         
     | 
| 112 | 
         
            +
                            leaderboard_table_cat = gr.Dataframe(
         
     | 
| 113 | 
         
            +
                                value=cat_df,
         
     | 
| 114 | 
         
            +
                                headers=list(cat_df.columns),
         
     | 
| 115 | 
         
            +
                                datatype=["html" if col in ["Model","HF Model"] else "str" for col in cat_df.columns],
         
     | 
| 116 | 
         
            +
                                interactive=False,
         
     | 
| 117 | 
         
            +
                                wrap=False
         
     | 
| 118 | 
         
            +
                            )
         
     | 
| 119 | 
         
            +
                            selected_columns_cat.change(
         
     | 
| 120 | 
         
            +
                                fn=update_leaderboard_cat,
         
     | 
| 121 | 
         
            +
                                inputs=[selected_columns_cat, gr.State(value=cat_df)],
         
     | 
| 122 | 
         
            +
                                outputs=leaderboard_table_cat
         
     | 
| 123 | 
         
            +
                            )
         
     | 
| 124 | 
         
            +
             
     | 
| 125 | 
         
            +
                # Citation
         
     | 
| 126 | 
         
            +
                with gr.Row():
         
     | 
| 127 | 
         
            +
                    with gr.Column():
         
     | 
| 128 | 
         
            +
                        gr.Markdown("## 📙 Citation")
         
     | 
| 129 | 
         
            +
                        gr.Markdown("If you use [UniGenBench]() in your research, please cite our work:")
         
     | 
| 130 | 
         
            +
                        citation_textbox = gr.Textbox(
         
     | 
| 131 | 
         
            +
                            value=CITATION_BUTTON_TEXT,
         
     | 
| 132 | 
         
            +
                            elem_id="citation-textbox",
         
     | 
| 133 | 
         
            +
                            show_label=False,
         
     | 
| 134 | 
         
            +
                            interactive=False,
         
     | 
| 135 | 
         
            +
                            lines=8,
         
     | 
| 136 | 
         
            +
                            show_copy_button=True
         
     | 
| 137 | 
         
            +
                        )
         
     | 
| 138 | 
         
            +
             
     | 
| 139 | 
         
            +
            if __name__ == "__main__":
         
     | 
| 140 | 
         
            +
                demo.launch()
         
     | 
    	
        leaderboard_data.json
    ADDED
    
    | 
         @@ -0,0 +1,56 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {
         
     | 
| 2 | 
         
            +
              "leaderboard": [
         
     | 
| 3 | 
         
            +
                {
         
     | 
| 4 | 
         
            +
                  "model": "Qwen-Image",
         
     | 
| 5 | 
         
            +
                  "link": "",
         
     | 
| 6 | 
         
            +
                  "hf": "",
         
     | 
| 7 | 
         
            +
                  "open_source": true,
         
     | 
| 8 | 
         
            +
                  "release_date": "",
         
     | 
| 9 | 
         
            +
             
     | 
| 10 | 
         
            +
                  "Overall": 78.81,
         
     | 
| 11 | 
         
            +
             
     | 
| 12 | 
         
            +
                  "Style": 95.10,
         
     | 
| 13 | 
         
            +
             
     | 
| 14 | 
         
            +
                  "World Knowledge": 94.30,
         
     | 
| 15 | 
         
            +
             
     | 
| 16 | 
         
            +
                  "Attribute-Overall": 87.61,
         
     | 
| 17 | 
         
            +
                  "Quantity": 81.94,
         
     | 
| 18 | 
         
            +
                  "Expression": 84.62,
         
     | 
| 19 | 
         
            +
                  "Material": 91.98,
         
     | 
| 20 | 
         
            +
                  "Size": 84.03,
         
     | 
| 21 | 
         
            +
                  "Shape": 84.38,
         
     | 
| 22 | 
         
            +
                  "Color": 99.17,
         
     | 
| 23 | 
         
            +
             
     | 
| 24 | 
         
            +
                  "Action-Overall": 84.13,
         
     | 
| 25 | 
         
            +
                  "Hand": 82.05,
         
     | 
| 26 | 
         
            +
                  "Full body": 88.59,
         
     | 
| 27 | 
         
            +
                  "Animal": 88.24,
         
     | 
| 28 | 
         
            +
                  "Non Contact": 80.61,
         
     | 
| 29 | 
         
            +
                  "Contact": 77.38,
         
     | 
| 30 | 
         
            +
                  "State": 87.74,
         
     | 
| 31 | 
         
            +
             
     | 
| 32 | 
         
            +
                  "Relationship-Overall": 79.70,
         
     | 
| 33 | 
         
            +
                  "Composition": 81.76,
         
     | 
| 34 | 
         
            +
                  "Similarity": 67.78,
         
     | 
| 35 | 
         
            +
                  "Inclusion": 86.96,
         
     | 
| 36 | 
         
            +
                  "Comparison": 81.25,
         
     | 
| 37 | 
         
            +
             
     | 
| 38 | 
         
            +
                  "Compound-Overall": 73.32,
         
     | 
| 39 | 
         
            +
                  "Imagination": 73.21,
         
     | 
| 40 | 
         
            +
                  "Feature matching": 73.44,
         
     | 
| 41 | 
         
            +
             
     | 
| 42 | 
         
            +
                  "Grammar-Overall": 60.29,
         
     | 
| 43 | 
         
            +
                  "Pronoun Reference": 83.82,
         
     | 
| 44 | 
         
            +
                  "Consistency": 70.37,
         
     | 
| 45 | 
         
            +
                  "Negation": 27.31,
         
     | 
| 46 | 
         
            +
             
     | 
| 47 | 
         
            +
                  "Layout-Overall": 85.52,
         
     | 
| 48 | 
         
            +
                  "2D": 86.40,
         
     | 
| 49 | 
         
            +
                  "3D": 85.23,
         
     | 
| 50 | 
         
            +
             
     | 
| 51 | 
         
            +
                  "Logical Reasoning": 53.64,
         
     | 
| 52 | 
         
            +
             
     | 
| 53 | 
         
            +
                  "Text": 76.14
         
     | 
| 54 | 
         
            +
                }
         
     | 
| 55 | 
         
            +
              ]
         
     | 
| 56 | 
         
            +
            }
         
     | 
    	
        pyproject.toml
    ADDED
    
    | 
         @@ -0,0 +1,13 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            [tool.ruff]
         
     | 
| 2 | 
         
            +
            # Enable pycodestyle (`E`) and Pyflakes (`F`) codes by default.
         
     | 
| 3 | 
         
            +
            select = ["E", "F"]
         
     | 
| 4 | 
         
            +
            ignore = ["E501"] # line too long (black is taking care of this)
         
     | 
| 5 | 
         
            +
            line-length = 119
         
     | 
| 6 | 
         
            +
            fixable = ["A", "B", "C", "D", "E", "F", "G", "I", "N", "Q", "S", "T", "W", "ANN", "ARG", "BLE", "COM", "DJ", "DTZ", "EM", "ERA", "EXE", "FBT", "ICN", "INP", "ISC", "NPY", "PD", "PGH", "PIE", "PL", "PT", "PTH", "PYI", "RET", "RSE", "RUF", "SIM", "SLF", "TCH", "TID", "TRY", "UP", "YTT"]
         
     | 
| 7 | 
         
            +
             
     | 
| 8 | 
         
            +
            [tool.isort]
         
     | 
| 9 | 
         
            +
            profile = "black"
         
     | 
| 10 | 
         
            +
            line_length = 119
         
     | 
| 11 | 
         
            +
             
     | 
| 12 | 
         
            +
            [tool.black]
         
     | 
| 13 | 
         
            +
            line-length = 119
         
     | 
    	
        requirements.txt
    ADDED
    
    | 
         @@ -0,0 +1,16 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            APScheduler
         
     | 
| 2 | 
         
            +
            black
         
     | 
| 3 | 
         
            +
            datasets
         
     | 
| 4 | 
         
            +
            gradio>=3.41
         
     | 
| 5 | 
         
            +
            gradio[oauth]
         
     | 
| 6 | 
         
            +
            gradio_leaderboard==0.0.13
         
     | 
| 7 | 
         
            +
            gradio_client
         
     | 
| 8 | 
         
            +
            huggingface-hub>=0.18.0
         
     | 
| 9 | 
         
            +
            matplotlib
         
     | 
| 10 | 
         
            +
            numpy
         
     | 
| 11 | 
         
            +
            pandas
         
     | 
| 12 | 
         
            +
            python-dateutil
         
     | 
| 13 | 
         
            +
            tqdm
         
     | 
| 14 | 
         
            +
            transformers
         
     | 
| 15 | 
         
            +
            tokenizers>=0.15.0
         
     | 
| 16 | 
         
            +
            sentencepiece
         
     | 
    	
        src/.DS_Store
    ADDED
    
    | 
         Binary file (8.2 kB). View file 
     | 
| 
         | 
    	
        src/about.py
    ADDED
    
    | 
         @@ -0,0 +1,96 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            from dataclasses import dataclass
         
     | 
| 2 | 
         
            +
            from enum import Enum
         
     | 
| 3 | 
         
            +
             
     | 
| 4 | 
         
            +
            @dataclass
         
     | 
| 5 | 
         
            +
            class Task:
         
     | 
| 6 | 
         
            +
                benchmark: str
         
     | 
| 7 | 
         
            +
                metric: str
         
     | 
| 8 | 
         
            +
                col_name: str
         
     | 
| 9 | 
         
            +
             
     | 
| 10 | 
         
            +
             
     | 
| 11 | 
         
            +
            # Select your tasks here
         
     | 
| 12 | 
         
            +
            # ---------------------------------------------------
         
     | 
| 13 | 
         
            +
            class Tasks(Enum):
         
     | 
| 14 | 
         
            +
                # task_key in the json file, metric_key in the json file, name to display in the leaderboard 
         
     | 
| 15 | 
         
            +
                # For MMLongBench-Doc (https://arxiv.org/abs/2407.01523), we use ACC as the main metric
         
     | 
| 16 | 
         
            +
                task0 = Task("mmlongbench_doc", "acc", "ACC")
         
     | 
| 17 | 
         
            +
             
     | 
| 18 | 
         
            +
            NUM_FEWSHOT = 0 # Change with your few shot
         
     | 
| 19 | 
         
            +
            # ---------------------------------------------------
         
     | 
| 20 | 
         
            +
             
     | 
| 21 | 
         
            +
             
     | 
| 22 | 
         
            +
             
     | 
| 23 | 
         
            +
            # Your leaderboard name
         
     | 
| 24 | 
         
            +
            TITLE = """<h1 align="center" id="space-title">🥇 <a href="" target="_blank">UniGenBench</a> Leaderboard</h1>"""
         
     | 
| 25 | 
         
            +
             
     | 
| 26 | 
         
            +
            # Links and conference info
         
     | 
| 27 | 
         
            +
            LINKS_AND_INFO = """
         
     | 
| 28 | 
         
            +
            <div align="center">
         
     | 
| 29 | 
         
            +
            <p><a href="https://github.com/CodeGoat24/UnifiedReward" target="_blank">UnifiedReward Team</a></p>
         
     | 
| 30 | 
         
            +
            <p>
         
     | 
| 31 | 
         
            +
            <a href="" target="_blank">🏠 Homepage</a> | 
         
     | 
| 32 | 
         
            +
            <a href="" target="_blank">📄 arXiv Paper</a> | 
         
     | 
| 33 | 
         
            +
            </p>
         
     | 
| 34 | 
         
            +
            </div>
         
     | 
| 35 | 
         
            +
            """
         
     | 
| 36 | 
         
            +
             
     | 
| 37 | 
         
            +
            # What does your leaderboard evaluate?
         
     | 
| 38 | 
         
            +
            INTRODUCTION_TEXT = """
         
     | 
| 39 | 
         
            +
            📚 [UniGenBench]() is a unified and versatile benchmark for T2I generation that integrates diverse prompt themes with a comprehensive suite of fine-grained evaluation criteria. 
         
     | 
| 40 | 
         
            +
             
     | 
| 41 | 
         
            +
            🔧 You can use the official [GitHub repo](https://github.com/CodeGoat24/UniGenBench) to evaluate your model on [UniGenBench]().
         
     | 
| 42 | 
         
            +
             
     | 
| 43 | 
         
            +
            📝 To add your own model to the leaderboard, please send an Email to yibinwang1121@163.com, then we will help with the evaluation and updating the leaderboard.
         
     | 
| 44 | 
         
            +
            """
         
     | 
| 45 | 
         
            +
             
     | 
| 46 | 
         
            +
            # Which evaluations are you running? how can people reproduce what you have?
         
     | 
| 47 | 
         
            +
            LLM_BENCHMARKS_TEXT = f"""
         
     | 
| 48 | 
         
            +
            ## How it works
         
     | 
| 49 | 
         
            +
             
     | 
| 50 | 
         
            +
            [MMLongBench-Doc](https://arxiv.org/abs/2407.01523) evaluates multimodal models on their ability to understand long documents containing both text and visual elements. The benchmark includes various document understanding tasks that require models to process and reason over extended contexts.
         
     | 
| 51 | 
         
            +
             
     | 
| 52 | 
         
            +
            ## Evaluation Metrics
         
     | 
| 53 | 
         
            +
             
     | 
| 54 | 
         
            +
            - **ACC (Accuracy)**: The primary metric measuring the overall accuracy of model predictions on document understanding tasks.
         
     | 
| 55 | 
         
            +
            - **Parameters**: Model size in billions of parameters
         
     | 
| 56 | 
         
            +
            - **Open Source**: Whether the model weights are publicly available
         
     | 
| 57 | 
         
            +
             
     | 
| 58 | 
         
            +
            ## Reproducibility
         
     | 
| 59 | 
         
            +
             
     | 
| 60 | 
         
            +
            To reproduce our results, please refer to the official [MMLongBench-Doc](https://arxiv.org/abs/2407.01523) repository for evaluation scripts and detailed instructions.
         
     | 
| 61 | 
         
            +
            """
         
     | 
| 62 | 
         
            +
             
     | 
| 63 | 
         
            +
            EVALUATION_QUEUE_TEXT = """
         
     | 
| 64 | 
         
            +
            ## Some good practices before submitting a model
         
     | 
| 65 | 
         
            +
             
     | 
| 66 | 
         
            +
            ### 1) Make sure you can load your model and tokenizer using AutoClasses:
         
     | 
| 67 | 
         
            +
            ```python
         
     | 
| 68 | 
         
            +
            from transformers import AutoConfig, AutoModel, AutoTokenizer
         
     | 
| 69 | 
         
            +
            config = AutoConfig.from_pretrained("your model name", revision=revision)
         
     | 
| 70 | 
         
            +
            model = AutoModel.from_pretrained("your model name", revision=revision)
         
     | 
| 71 | 
         
            +
            tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
         
     | 
| 72 | 
         
            +
            ```
         
     | 
| 73 | 
         
            +
            If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.
         
     | 
| 74 | 
         
            +
             
     | 
| 75 | 
         
            +
            Note: make sure your model is public!
         
     | 
| 76 | 
         
            +
            Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted!
         
     | 
| 77 | 
         
            +
             
     | 
| 78 | 
         
            +
            ### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index)
         
     | 
| 79 | 
         
            +
            It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`!
         
     | 
| 80 | 
         
            +
             
     | 
| 81 | 
         
            +
            ### 3) Make sure your model has an open license!
         
     | 
| 82 | 
         
            +
            This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model 🤗
         
     | 
| 83 | 
         
            +
             
     | 
| 84 | 
         
            +
            ### 4) Fill up your model card
         
     | 
| 85 | 
         
            +
            When we add extra information about models to the leaderboard, it will be automatically taken from the model card
         
     | 
| 86 | 
         
            +
             
     | 
| 87 | 
         
            +
            ## In case of model failure
         
     | 
| 88 | 
         
            +
            If your model is displayed in the `FAILED` category, its execution stopped.
         
     | 
| 89 | 
         
            +
            Make sure you have followed the above steps first.
         
     | 
| 90 | 
         
            +
            If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task).
         
     | 
| 91 | 
         
            +
            """
         
     | 
| 92 | 
         
            +
             
     | 
| 93 | 
         
            +
            CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
         
     | 
| 94 | 
         
            +
            CITATION_BUTTON_TEXT = r"""
         
     | 
| 95 | 
         
            +
             
     | 
| 96 | 
         
            +
            }"""
         
     | 
    	
        src/display/css_html_js.py
    ADDED
    
    | 
         @@ -0,0 +1,282 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            custom_css = """
         
     | 
| 2 | 
         
            +
             
     | 
| 3 | 
         
            +
            .markdown-text {
         
     | 
| 4 | 
         
            +
                font-size: 16px !important;
         
     | 
| 5 | 
         
            +
                line-height: 1.6 !important;
         
     | 
| 6 | 
         
            +
            }
         
     | 
| 7 | 
         
            +
             
     | 
| 8 | 
         
            +
            .gradio-dataframe table {
         
     | 
| 9 | 
         
            +
                table-layout: auto;   /* 根据内容自动分配列宽 */
         
     | 
| 10 | 
         
            +
                width: 100%;          /* 占满容器宽度 */
         
     | 
| 11 | 
         
            +
            }
         
     | 
| 12 | 
         
            +
             
     | 
| 13 | 
         
            +
            .gradio-dataframe th,
         
     | 
| 14 | 
         
            +
            .gradio-dataframe td {
         
     | 
| 15 | 
         
            +
                white-space: nowrap;          /* 不换行 */
         
     | 
| 16 | 
         
            +
                padding: 6px 10px;            /* 内边距更美观 */
         
     | 
| 17 | 
         
            +
            }
         
     | 
| 18 | 
         
            +
             
     | 
| 19 | 
         
            +
            /* Enhanced Leaderboard table styling */
         
     | 
| 20 | 
         
            +
            .dataframe {
         
     | 
| 21 | 
         
            +
                background: white !important;
         
     | 
| 22 | 
         
            +
                border-radius: 12px !important;
         
     | 
| 23 | 
         
            +
                box-shadow: 0 4px 16px rgba(0, 0, 0, 0.08) !important;
         
     | 
| 24 | 
         
            +
                overflow: hidden !important;
         
     | 
| 25 | 
         
            +
                border: 1px solid #e8ecef !important;
         
     | 
| 26 | 
         
            +
                font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', system-ui, sans-serif !important;
         
     | 
| 27 | 
         
            +
            }
         
     | 
| 28 | 
         
            +
             
     | 
| 29 | 
         
            +
            .dataframe th {
         
     | 
| 30 | 
         
            +
                background: linear-gradient(135deg, #f8f9fa 0%, #e9ecef 100%) !important;
         
     | 
| 31 | 
         
            +
                color: #2c3e50 !important;
         
     | 
| 32 | 
         
            +
                font-weight: 600 !important;
         
     | 
| 33 | 
         
            +
                font-size: 11px !important;
         
     | 
| 34 | 
         
            +
                padding: 16px 12px !important;
         
     | 
| 35 | 
         
            +
                text-align: center !important;
         
     | 
| 36 | 
         
            +
                border-bottom: 2px solid #dee2e6 !important;
         
     | 
| 37 | 
         
            +
                letter-spacing: 0.025em !important;
         
     | 
| 38 | 
         
            +
                text-transform: uppercase !important;
         
     | 
| 39 | 
         
            +
            }
         
     | 
| 40 | 
         
            +
             
     | 
| 41 | 
         
            +
            /* Override any conflicting styles */
         
     | 
| 42 | 
         
            +
            .dataframe thead th {
         
     | 
| 43 | 
         
            +
                font-size: 11px !important;
         
     | 
| 44 | 
         
            +
            }
         
     | 
| 45 | 
         
            +
             
     | 
| 46 | 
         
            +
            .dataframe th span {
         
     | 
| 47 | 
         
            +
                font-size: 11px !important;
         
     | 
| 48 | 
         
            +
            }
         
     | 
| 49 | 
         
            +
             
     | 
| 50 | 
         
            +
            .dataframe td {
         
     | 
| 51 | 
         
            +
                padding: 14px 12px !important;
         
     | 
| 52 | 
         
            +
                border-bottom: 1px solid #f0f2f5 !important;
         
     | 
| 53 | 
         
            +
                text-align: center !important;
         
     | 
| 54 | 
         
            +
                vertical-align: middle !important;
         
     | 
| 55 | 
         
            +
                font-size: 15px !important;
         
     | 
| 56 | 
         
            +
                color: #2c3e50 !important;
         
     | 
| 57 | 
         
            +
                line-height: 1.4 !important;
         
     | 
| 58 | 
         
            +
            }
         
     | 
| 59 | 
         
            +
             
     | 
| 60 | 
         
            +
            .dataframe tr:hover td {
         
     | 
| 61 | 
         
            +
                background-color: #f8f9fa !important;
         
     | 
| 62 | 
         
            +
                transition: background-color 0.2s ease !important;
         
     | 
| 63 | 
         
            +
            }
         
     | 
| 64 | 
         
            +
             
     | 
| 65 | 
         
            +
            .dataframe tr:nth-child(even) td {
         
     | 
| 66 | 
         
            +
                background-color: #fdfdfd !important;
         
     | 
| 67 | 
         
            +
            }
         
     | 
| 68 | 
         
            +
             
     | 
| 69 | 
         
            +
            /* Enhanced hyperlinks in table */
         
     | 
| 70 | 
         
            +
            .dataframe a {
         
     | 
| 71 | 
         
            +
                color: #0066cc !important;
         
     | 
| 72 | 
         
            +
                text-decoration: underline !important;
         
     | 
| 73 | 
         
            +
                font-weight: 500 !important;
         
     | 
| 74 | 
         
            +
                transition: all 0.2s ease !important;
         
     | 
| 75 | 
         
            +
                border-radius: 4px !important;
         
     | 
| 76 | 
         
            +
                padding: 2px 6px !important;
         
     | 
| 77 | 
         
            +
                display: inline-block !important;
         
     | 
| 78 | 
         
            +
            }
         
     | 
| 79 | 
         
            +
             
     | 
| 80 | 
         
            +
            .dataframe a:hover {
         
     | 
| 81 | 
         
            +
                color: #004499 !important;
         
     | 
| 82 | 
         
            +
                background-color: rgba(0, 102, 204, 0.1) !important;
         
     | 
| 83 | 
         
            +
                text-decoration: underline !important;
         
     | 
| 84 | 
         
            +
                transform: translateY(-1px) !important;
         
     | 
| 85 | 
         
            +
            }
         
     | 
| 86 | 
         
            +
             
     | 
| 87 | 
         
            +
            .dataframe a:visited {
         
     | 
| 88 | 
         
            +
                color: #5a6c7d !important;
         
     | 
| 89 | 
         
            +
            }
         
     | 
| 90 | 
         
            +
             
     | 
| 91 | 
         
            +
            /* Model name styling (assuming first column contains model names) */
         
     | 
| 92 | 
         
            +
            .dataframe td:first-child {
         
     | 
| 93 | 
         
            +
                font-weight: 600 !important;
         
     | 
| 94 | 
         
            +
                color: #1a202c !important;
         
     | 
| 95 | 
         
            +
                text-align: left !important;
         
     | 
| 96 | 
         
            +
                padding-left: 16px !important;
         
     | 
| 97 | 
         
            +
            }
         
     | 
| 98 | 
         
            +
             
     | 
| 99 | 
         
            +
            /* Score highlighting */
         
     | 
| 100 | 
         
            +
            .dataframe td:last-child {
         
     | 
| 101 | 
         
            +
                font-weight: 600 !important;
         
     | 
| 102 | 
         
            +
                font-size: 16px !important;
         
     | 
| 103 | 
         
            +
            }
         
     | 
| 104 | 
         
            +
             
     | 
| 105 | 
         
            +
            #models-to-add-text {
         
     | 
| 106 | 
         
            +
                font-size: 18px !important;
         
     | 
| 107 | 
         
            +
            }
         
     | 
| 108 | 
         
            +
             
     | 
| 109 | 
         
            +
            #citation-button span {
         
     | 
| 110 | 
         
            +
                font-size: 16px !important;
         
     | 
| 111 | 
         
            +
            }
         
     | 
| 112 | 
         
            +
             
     | 
| 113 | 
         
            +
            #citation-button textarea {
         
     | 
| 114 | 
         
            +
                font-size: 16px !important;
         
     | 
| 115 | 
         
            +
            }
         
     | 
| 116 | 
         
            +
             
     | 
| 117 | 
         
            +
            #citation-button > label > button {
         
     | 
| 118 | 
         
            +
                margin: 6px;
         
     | 
| 119 | 
         
            +
                transform: scale(1.3);
         
     | 
| 120 | 
         
            +
            }
         
     | 
| 121 | 
         
            +
             
     | 
| 122 | 
         
            +
            /* Citation section styling */
         
     | 
| 123 | 
         
            +
            #citation-textbox textarea {
         
     | 
| 124 | 
         
            +
                background: linear-gradient(135deg, #f8f9fa 0%, #e9ecef 100%) !important;
         
     | 
| 125 | 
         
            +
                border: 2px solid #dee2e6 !important;
         
     | 
| 126 | 
         
            +
                border-radius: 12px !important;
         
     | 
| 127 | 
         
            +
                font-family: 'Monaco', 'Menlo', 'Ubuntu Mono', monospace !important;
         
     | 
| 128 | 
         
            +
                font-size: 12px !important;
         
     | 
| 129 | 
         
            +
                padding: 20px !important;
         
     | 
| 130 | 
         
            +
                line-height: 1.6 !important;
         
     | 
| 131 | 
         
            +
                box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1) !important;
         
     | 
| 132 | 
         
            +
            }
         
     | 
| 133 | 
         
            +
             
     | 
| 134 | 
         
            +
            #citation-textbox label > button {
         
     | 
| 135 | 
         
            +
                background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
         
     | 
| 136 | 
         
            +
                border: none !important;
         
     | 
| 137 | 
         
            +
                border-radius: 8px !important;
         
     | 
| 138 | 
         
            +
                color: white !important;
         
     | 
| 139 | 
         
            +
                padding: 8px 16px !important;
         
     | 
| 140 | 
         
            +
                margin: 8px !important;
         
     | 
| 141 | 
         
            +
                transform: scale(1.1);
         
     | 
| 142 | 
         
            +
                transition: transform 0.2s ease;
         
     | 
| 143 | 
         
            +
            }
         
     | 
| 144 | 
         
            +
             
     | 
| 145 | 
         
            +
            #citation-textbox label > button:hover {
         
     | 
| 146 | 
         
            +
                transform: scale(1.15) !important;
         
     | 
| 147 | 
         
            +
            }
         
     | 
| 148 | 
         
            +
             
     | 
| 149 | 
         
            +
            #leaderboard-table {
         
     | 
| 150 | 
         
            +
                margin-top: 25px !important;
         
     | 
| 151 | 
         
            +
            }
         
     | 
| 152 | 
         
            +
             
     | 
| 153 | 
         
            +
            #leaderboard-table-lite {
         
     | 
| 154 | 
         
            +
                margin-top: 25px !important;
         
     | 
| 155 | 
         
            +
            }
         
     | 
| 156 | 
         
            +
             
     | 
| 157 | 
         
            +
            #search-bar-table-box > div:first-child {
         
     | 
| 158 | 
         
            +
                background: none;
         
     | 
| 159 | 
         
            +
                border: none;
         
     | 
| 160 | 
         
            +
            }
         
     | 
| 161 | 
         
            +
             
         
     | 
| 162 | 
         
            +
            #search-bar {
         
     | 
| 163 | 
         
            +
                padding: 0px;
         
     | 
| 164 | 
         
            +
            }
         
     | 
| 165 | 
         
            +
             
     | 
| 166 | 
         
            +
            /* Enhanced table column widths and responsiveness */
         
     | 
| 167 | 
         
            +
            #leaderboard-table td:nth-child(2),
         
     | 
| 168 | 
         
            +
            #leaderboard-table th:nth-child(2) {
         
     | 
| 169 | 
         
            +
                max-width: 400px;
         
     | 
| 170 | 
         
            +
                overflow: hidden;
         
     | 
| 171 | 
         
            +
                text-overflow: ellipsis;
         
     | 
| 172 | 
         
            +
                white-space: nowrap;
         
     | 
| 173 | 
         
            +
            }
         
     | 
| 174 | 
         
            +
             
     | 
| 175 | 
         
            +
            /* Responsive table improvements */
         
     | 
| 176 | 
         
            +
            .dataframe {
         
     | 
| 177 | 
         
            +
                width: 100% !important;
         
     | 
| 178 | 
         
            +
                margin: 16px 0 !important;
         
     | 
| 179 | 
         
            +
            }
         
     | 
| 180 | 
         
            +
             
     | 
| 181 | 
         
            +
            /* Better mobile responsiveness */
         
     | 
| 182 | 
         
            +
            @media (max-width: 768px) {
         
     | 
| 183 | 
         
            +
                .dataframe th,
         
     | 
| 184 | 
         
            +
                .dataframe td {
         
     | 
| 185 | 
         
            +
                    padding: 8px 6px !important;
         
     | 
| 186 | 
         
            +
                    font-size: 13px !important;
         
     | 
| 187 | 
         
            +
                }
         
     | 
| 188 | 
         
            +
                
         
     | 
| 189 | 
         
            +
                .dataframe th {
         
     | 
| 190 | 
         
            +
                    font-size: 12px !important;
         
     | 
| 191 | 
         
            +
                }
         
     | 
| 192 | 
         
            +
                
         
     | 
| 193 | 
         
            +
                #leaderboard-table td:nth-child(2),
         
     | 
| 194 | 
         
            +
                #leaderboard-table th:nth-child(2) {
         
     | 
| 195 | 
         
            +
                    max-width: 200px;
         
     | 
| 196 | 
         
            +
                }
         
     | 
| 197 | 
         
            +
            }
         
     | 
| 198 | 
         
            +
             
     | 
| 199 | 
         
            +
            /* Rank column special styling */
         
     | 
| 200 | 
         
            +
            .dataframe td:nth-child(1) {
         
     | 
| 201 | 
         
            +
                background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
         
     | 
| 202 | 
         
            +
                color: white !important;
         
     | 
| 203 | 
         
            +
                font-weight: 700 !important;
         
     | 
| 204 | 
         
            +
                font-size: 16px !important;
         
     | 
| 205 | 
         
            +
                border-radius: 8px !important;
         
     | 
| 206 | 
         
            +
                margin: 4px !important;
         
     | 
| 207 | 
         
            +
                min-width: 40px !important;
         
     | 
| 208 | 
         
            +
            }
         
     | 
| 209 | 
         
            +
             
     | 
| 210 | 
         
            +
            .dataframe tr:nth-child(1) td:nth-child(1) {
         
     | 
| 211 | 
         
            +
                background: linear-gradient(135deg, #ffd700 0%, #ffed4e 100%) !important;
         
     | 
| 212 | 
         
            +
                color: #8b4513 !important;
         
     | 
| 213 | 
         
            +
            }
         
     | 
| 214 | 
         
            +
             
     | 
| 215 | 
         
            +
            .dataframe tr:nth-child(2) td:nth-child(1) {
         
     | 
| 216 | 
         
            +
                background: linear-gradient(135deg, #c0c0c0 0%, #e8e8e8 100%) !important;
         
     | 
| 217 | 
         
            +
                color: #4a4a4a !important;
         
     | 
| 218 | 
         
            +
            }
         
     | 
| 219 | 
         
            +
             
     | 
| 220 | 
         
            +
            .dataframe tr:nth-child(3) td:nth-child(1) {
         
     | 
| 221 | 
         
            +
                background: linear-gradient(135deg, #cd7f32 0%, #daa520 100%) !important;
         
     | 
| 222 | 
         
            +
                color: white !important;
         
     | 
| 223 | 
         
            +
            }
         
     | 
| 224 | 
         
            +
             
     | 
| 225 | 
         
            +
            .tab-buttons button {
         
     | 
| 226 | 
         
            +
                font-size: 20px;
         
     | 
| 227 | 
         
            +
            }
         
     | 
| 228 | 
         
            +
             
     | 
| 229 | 
         
            +
            #scale-logo {
         
     | 
| 230 | 
         
            +
                border-style: none !important;
         
     | 
| 231 | 
         
            +
                box-shadow: none;
         
     | 
| 232 | 
         
            +
                display: block;
         
     | 
| 233 | 
         
            +
                margin-left: auto;
         
     | 
| 234 | 
         
            +
                margin-right: auto;
         
     | 
| 235 | 
         
            +
                max-width: 600px;
         
     | 
| 236 | 
         
            +
            }
         
     | 
| 237 | 
         
            +
             
     | 
| 238 | 
         
            +
            #scale-logo .download {
         
     | 
| 239 | 
         
            +
                display: none;
         
     | 
| 240 | 
         
            +
            }
         
     | 
| 241 | 
         
            +
            #filter_type{
         
     | 
| 242 | 
         
            +
                border: 0;
         
     | 
| 243 | 
         
            +
                padding-left: 0;
         
     | 
| 244 | 
         
            +
                padding-top: 0;
         
     | 
| 245 | 
         
            +
            }
         
     | 
| 246 | 
         
            +
            #filter_type label {
         
     | 
| 247 | 
         
            +
                display: flex;
         
     | 
| 248 | 
         
            +
            }
         
     | 
| 249 | 
         
            +
            #filter_type label > span{
         
     | 
| 250 | 
         
            +
                margin-top: var(--spacing-lg);
         
     | 
| 251 | 
         
            +
                margin-right: 0.5em;
         
     | 
| 252 | 
         
            +
            }
         
     | 
| 253 | 
         
            +
            #filter_type label > .wrap{
         
     | 
| 254 | 
         
            +
                width: 103px;
         
     | 
| 255 | 
         
            +
            }
         
     | 
| 256 | 
         
            +
            #filter_type label > .wrap .wrap-inner{  
         
     | 
| 257 | 
         
            +
                padding: 2px;
         
     | 
| 258 | 
         
            +
            }
         
     | 
| 259 | 
         
            +
            #filter_type label > .wrap .wrap-inner input{
         
     | 
| 260 | 
         
            +
                width: 1px
         
     | 
| 261 | 
         
            +
            }
         
     | 
| 262 | 
         
            +
            #filter-columns-type{
         
     | 
| 263 | 
         
            +
                border:0;
         
     | 
| 264 | 
         
            +
                padding:0.5;
         
     | 
| 265 | 
         
            +
            }
         
     | 
| 266 | 
         
            +
            #filter-columns-size{
         
     | 
| 267 | 
         
            +
                border:0;
         
     | 
| 268 | 
         
            +
                padding:0.5;
         
     | 
| 269 | 
         
            +
            }
         
     | 
| 270 | 
         
            +
            #box-filter > .form{
         
     | 
| 271 | 
         
            +
                border: 0
         
     | 
| 272 | 
         
            +
            }
         
     | 
| 273 | 
         
            +
            """
         
     | 
| 274 | 
         
            +
             
     | 
| 275 | 
         
            +
             
     | 
| 276 | 
         
            +
            get_window_url_params = """
         
     | 
| 277 | 
         
            +
                function(url_params) {
         
     | 
| 278 | 
         
            +
                    const params = new URLSearchParams(window.location.search);
         
     | 
| 279 | 
         
            +
                    url_params = Object.fromEntries(params);
         
     | 
| 280 | 
         
            +
                    return url_params;
         
     | 
| 281 | 
         
            +
                }
         
     | 
| 282 | 
         
            +
                """
         
     | 
    	
        src/display/formatting.py
    ADDED
    
    | 
         @@ -0,0 +1,27 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            def model_hyperlink(link, model_name):
         
     | 
| 2 | 
         
            +
                return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
         
     | 
| 3 | 
         
            +
             
     | 
| 4 | 
         
            +
             
     | 
| 5 | 
         
            +
            def make_clickable_model(model_name):
         
     | 
| 6 | 
         
            +
                link = f"https://huggingface.co/{model_name}"
         
     | 
| 7 | 
         
            +
                return model_hyperlink(link, model_name)
         
     | 
| 8 | 
         
            +
             
     | 
| 9 | 
         
            +
             
     | 
| 10 | 
         
            +
            def styled_error(error):
         
     | 
| 11 | 
         
            +
                return f"<p style='color: red; font-size: 20px; text-align: center;'>{error}</p>"
         
     | 
| 12 | 
         
            +
             
     | 
| 13 | 
         
            +
             
     | 
| 14 | 
         
            +
            def styled_warning(warn):
         
     | 
| 15 | 
         
            +
                return f"<p style='color: orange; font-size: 20px; text-align: center;'>{warn}</p>"
         
     | 
| 16 | 
         
            +
             
     | 
| 17 | 
         
            +
             
     | 
| 18 | 
         
            +
            def styled_message(message):
         
     | 
| 19 | 
         
            +
                return f"<p style='color: green; font-size: 20px; text-align: center;'>{message}</p>"
         
     | 
| 20 | 
         
            +
             
     | 
| 21 | 
         
            +
             
     | 
| 22 | 
         
            +
            def has_no_nan_values(df, columns):
         
     | 
| 23 | 
         
            +
                return df[columns].notna().all(axis=1)
         
     | 
| 24 | 
         
            +
             
     | 
| 25 | 
         
            +
             
     | 
| 26 | 
         
            +
            def has_nan_values(df, columns):
         
     | 
| 27 | 
         
            +
                return df[columns].isna().any(axis=1)
         
     | 
    	
        src/display/utils.py
    ADDED
    
    | 
         @@ -0,0 +1,110 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            from dataclasses import dataclass, make_dataclass
         
     | 
| 2 | 
         
            +
            from enum import Enum
         
     | 
| 3 | 
         
            +
             
     | 
| 4 | 
         
            +
            import pandas as pd
         
     | 
| 5 | 
         
            +
             
     | 
| 6 | 
         
            +
            from src.about import Tasks
         
     | 
| 7 | 
         
            +
             
     | 
| 8 | 
         
            +
            def fields(raw_class):
         
     | 
| 9 | 
         
            +
                return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
         
     | 
| 10 | 
         
            +
             
     | 
| 11 | 
         
            +
             
     | 
| 12 | 
         
            +
            # These classes are for user facing column names,
         
     | 
| 13 | 
         
            +
            # to avoid having to change them all around the code
         
     | 
| 14 | 
         
            +
            # when a modif is needed
         
     | 
| 15 | 
         
            +
            @dataclass
         
     | 
| 16 | 
         
            +
            class ColumnContent:
         
     | 
| 17 | 
         
            +
                name: str
         
     | 
| 18 | 
         
            +
                type: str
         
     | 
| 19 | 
         
            +
                displayed_by_default: bool
         
     | 
| 20 | 
         
            +
                hidden: bool = False
         
     | 
| 21 | 
         
            +
                never_hidden: bool = False
         
     | 
| 22 | 
         
            +
             
     | 
| 23 | 
         
            +
            ## Leaderboard columns
         
     | 
| 24 | 
         
            +
            auto_eval_column_dict = []
         
     | 
| 25 | 
         
            +
            # Init
         
     | 
| 26 | 
         
            +
            auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
         
     | 
| 27 | 
         
            +
            auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
         
     | 
| 28 | 
         
            +
            #Scores
         
     | 
| 29 | 
         
            +
            auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)])
         
     | 
| 30 | 
         
            +
            for task in Tasks:
         
     | 
| 31 | 
         
            +
                auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
         
     | 
| 32 | 
         
            +
            # Model information
         
     | 
| 33 | 
         
            +
            auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
         
     | 
| 34 | 
         
            +
            auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
         
     | 
| 35 | 
         
            +
            auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
         
     | 
| 36 | 
         
            +
            auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)])
         
     | 
| 37 | 
         
            +
            auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)])
         
     | 
| 38 | 
         
            +
            auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
         
     | 
| 39 | 
         
            +
            auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)])
         
     | 
| 40 | 
         
            +
            auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
         
     | 
| 41 | 
         
            +
            auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
         
     | 
| 42 | 
         
            +
             
     | 
| 43 | 
         
            +
            # We use make dataclass to dynamically fill the scores from Tasks
         
     | 
| 44 | 
         
            +
            AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
         
     | 
| 45 | 
         
            +
             
     | 
| 46 | 
         
            +
            ## For the queue columns in the submission tab
         
     | 
| 47 | 
         
            +
            @dataclass(frozen=True)
         
     | 
| 48 | 
         
            +
            class EvalQueueColumn:  # Queue column
         
     | 
| 49 | 
         
            +
                model = ColumnContent("model", "markdown", True)
         
     | 
| 50 | 
         
            +
                revision = ColumnContent("revision", "str", True)
         
     | 
| 51 | 
         
            +
                private = ColumnContent("private", "bool", True)
         
     | 
| 52 | 
         
            +
                precision = ColumnContent("precision", "str", True)
         
     | 
| 53 | 
         
            +
                weight_type = ColumnContent("weight_type", "str", "Original")
         
     | 
| 54 | 
         
            +
                status = ColumnContent("status", "str", True)
         
     | 
| 55 | 
         
            +
             
     | 
| 56 | 
         
            +
            ## All the model information that we might need
         
     | 
| 57 | 
         
            +
            @dataclass
         
     | 
| 58 | 
         
            +
            class ModelDetails:
         
     | 
| 59 | 
         
            +
                name: str
         
     | 
| 60 | 
         
            +
                display_name: str = ""
         
     | 
| 61 | 
         
            +
                symbol: str = "" # emoji
         
     | 
| 62 | 
         
            +
             
     | 
| 63 | 
         
            +
             
     | 
| 64 | 
         
            +
            class ModelType(Enum):
         
     | 
| 65 | 
         
            +
                PT = ModelDetails(name="pretrained", symbol="🟢")
         
     | 
| 66 | 
         
            +
                FT = ModelDetails(name="fine-tuned", symbol="🔶")
         
     | 
| 67 | 
         
            +
                IFT = ModelDetails(name="instruction-tuned", symbol="⭕")
         
     | 
| 68 | 
         
            +
                RL = ModelDetails(name="RL-tuned", symbol="🟦")
         
     | 
| 69 | 
         
            +
                Unknown = ModelDetails(name="", symbol="?")
         
     | 
| 70 | 
         
            +
             
     | 
| 71 | 
         
            +
                def to_str(self, separator=" "):
         
     | 
| 72 | 
         
            +
                    return f"{self.value.symbol}{separator}{self.value.name}"
         
     | 
| 73 | 
         
            +
             
     | 
| 74 | 
         
            +
                @staticmethod
         
     | 
| 75 | 
         
            +
                def from_str(type):
         
     | 
| 76 | 
         
            +
                    if "fine-tuned" in type or "🔶" in type:
         
     | 
| 77 | 
         
            +
                        return ModelType.FT
         
     | 
| 78 | 
         
            +
                    if "pretrained" in type or "🟢" in type:
         
     | 
| 79 | 
         
            +
                        return ModelType.PT
         
     | 
| 80 | 
         
            +
                    if "RL-tuned" in type or "🟦" in type:
         
     | 
| 81 | 
         
            +
                        return ModelType.RL
         
     | 
| 82 | 
         
            +
                    if "instruction-tuned" in type or "⭕" in type:
         
     | 
| 83 | 
         
            +
                        return ModelType.IFT
         
     | 
| 84 | 
         
            +
                    return ModelType.Unknown
         
     | 
| 85 | 
         
            +
             
     | 
| 86 | 
         
            +
            class WeightType(Enum):
         
     | 
| 87 | 
         
            +
                Adapter = ModelDetails("Adapter")
         
     | 
| 88 | 
         
            +
                Original = ModelDetails("Original")
         
     | 
| 89 | 
         
            +
                Delta = ModelDetails("Delta")
         
     | 
| 90 | 
         
            +
             
     | 
| 91 | 
         
            +
            class Precision(Enum):
         
     | 
| 92 | 
         
            +
                float16 = ModelDetails("float16")
         
     | 
| 93 | 
         
            +
                bfloat16 = ModelDetails("bfloat16")
         
     | 
| 94 | 
         
            +
                Unknown = ModelDetails("?")
         
     | 
| 95 | 
         
            +
             
     | 
| 96 | 
         
            +
                def from_str(precision):
         
     | 
| 97 | 
         
            +
                    if precision in ["torch.float16", "float16"]:
         
     | 
| 98 | 
         
            +
                        return Precision.float16
         
     | 
| 99 | 
         
            +
                    if precision in ["torch.bfloat16", "bfloat16"]:
         
     | 
| 100 | 
         
            +
                        return Precision.bfloat16
         
     | 
| 101 | 
         
            +
                    return Precision.Unknown
         
     | 
| 102 | 
         
            +
             
     | 
| 103 | 
         
            +
            # Column selection
         
     | 
| 104 | 
         
            +
            COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
         
     | 
| 105 | 
         
            +
             
     | 
| 106 | 
         
            +
            EVAL_COLS = [c.name for c in fields(EvalQueueColumn)]
         
     | 
| 107 | 
         
            +
            EVAL_TYPES = [c.type for c in fields(EvalQueueColumn)]
         
     | 
| 108 | 
         
            +
             
     | 
| 109 | 
         
            +
            BENCHMARK_COLS = [t.value.col_name for t in Tasks]
         
     | 
| 110 | 
         
            +
             
     | 
    	
        src/envs.py
    ADDED
    
    | 
         @@ -0,0 +1,25 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            import os
         
     | 
| 2 | 
         
            +
             
     | 
| 3 | 
         
            +
            from huggingface_hub import HfApi
         
     | 
| 4 | 
         
            +
             
     | 
| 5 | 
         
            +
            # Info to change for your repository
         
     | 
| 6 | 
         
            +
            # ----------------------------------
         
     | 
| 7 | 
         
            +
            TOKEN = os.environ.get("HF_TOKEN") # A read/write token for your org
         
     | 
| 8 | 
         
            +
             
     | 
| 9 | 
         
            +
            OWNER = "demo-leaderboard-backend" # Change to your org - don't forget to create a results and request dataset, with the correct format!
         
     | 
| 10 | 
         
            +
            # ----------------------------------
         
     | 
| 11 | 
         
            +
             
     | 
| 12 | 
         
            +
            REPO_ID = f"{OWNER}/leaderboard"
         
     | 
| 13 | 
         
            +
            QUEUE_REPO = f"{OWNER}/requests"
         
     | 
| 14 | 
         
            +
            RESULTS_REPO = f"{OWNER}/results"
         
     | 
| 15 | 
         
            +
             
     | 
| 16 | 
         
            +
            # If you setup a cache later, just change HF_HOME
         
     | 
| 17 | 
         
            +
            CACHE_PATH=os.getenv("HF_HOME", ".")
         
     | 
| 18 | 
         
            +
             
     | 
| 19 | 
         
            +
            # Local caches
         
     | 
| 20 | 
         
            +
            EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-queue")
         
     | 
| 21 | 
         
            +
            EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
         
     | 
| 22 | 
         
            +
            EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
         
     | 
| 23 | 
         
            +
            EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
         
     | 
| 24 | 
         
            +
             
     | 
| 25 | 
         
            +
            API = HfApi(token=TOKEN)
         
     | 
    	
        src/json_leaderboard.py
    ADDED
    
    | 
         @@ -0,0 +1,121 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            import json
         
     | 
| 2 | 
         
            +
            import pandas as pd
         
     | 
| 3 | 
         
            +
            from pathlib import Path
         
     | 
| 4 | 
         
            +
             
     | 
| 5 | 
         
            +
             
     | 
| 6 | 
         
            +
            def load_leaderboard_from_json(json_path="leaderboard_data.json"):
         
     | 
| 7 | 
         
            +
                """Load leaderboard data from JSON file"""
         
     | 
| 8 | 
         
            +
                try:
         
     | 
| 9 | 
         
            +
                    with open(json_path, 'r', encoding='utf-8') as f:
         
     | 
| 10 | 
         
            +
                        data = json.load(f)
         
     | 
| 11 | 
         
            +
                    return data['leaderboard']
         
     | 
| 12 | 
         
            +
                except FileNotFoundError:
         
     | 
| 13 | 
         
            +
                    print(f"JSON file {json_path} not found")
         
     | 
| 14 | 
         
            +
                    return []
         
     | 
| 15 | 
         
            +
                except json.JSONDecodeError:
         
     | 
| 16 | 
         
            +
                    print(f"Error decoding JSON file {json_path}")
         
     | 
| 17 | 
         
            +
                    return []
         
     | 
| 18 | 
         
            +
             
     | 
| 19 | 
         
            +
             
     | 
| 20 | 
         
            +
            def create_leaderboard_df(json_path="leaderboard_data.json"):
         
     | 
| 21 | 
         
            +
                """Create a pandas DataFrame from JSON leaderboard data"""
         
     | 
| 22 | 
         
            +
                leaderboard_data = load_leaderboard_from_json(json_path)
         
     | 
| 23 | 
         
            +
                
         
     | 
| 24 | 
         
            +
                if not leaderboard_data:
         
     | 
| 25 | 
         
            +
                    return pd.DataFrame()
         
     | 
| 26 | 
         
            +
                
         
     | 
| 27 | 
         
            +
                # Convert to DataFrame
         
     | 
| 28 | 
         
            +
                df = pd.DataFrame(leaderboard_data)
         
     | 
| 29 | 
         
            +
                
         
     | 
| 30 | 
         
            +
                # Sort by ACC score (descending)
         
     | 
| 31 | 
         
            +
                df = df.sort_values('Overall', ascending=False).reset_index(drop=True)
         
     | 
| 32 | 
         
            +
                
         
     | 
| 33 | 
         
            +
                # Add ranking icons and make model names clickable links to papers
         
     | 
| 34 | 
         
            +
                def add_ranking_icon_and_link(index, model_name, paper_link):
         
     | 
| 35 | 
         
            +
                    if index == 0:
         
     | 
| 36 | 
         
            +
                        return f'🥇 <a href="{paper_link}" target="_blank">{model_name}</a>'
         
     | 
| 37 | 
         
            +
                    elif index == 1:
         
     | 
| 38 | 
         
            +
                        return f'🥈 <a href="{paper_link}" target="_blank">{model_name}</a>'
         
     | 
| 39 | 
         
            +
                    elif index == 2:
         
     | 
| 40 | 
         
            +
                        return f'🥉 <a href="{paper_link}" target="_blank">{model_name}</a>'
         
     | 
| 41 | 
         
            +
                    else:
         
     | 
| 42 | 
         
            +
                        return f'<a href="{paper_link}" target="_blank">{model_name}</a>'
         
     | 
| 43 | 
         
            +
                
         
     | 
| 44 | 
         
            +
                # Format the DataFrame for display
         
     | 
| 45 | 
         
            +
                display_df = pd.DataFrame({
         
     | 
| 46 | 
         
            +
                'Model': [add_ranking_icon_and_link(i, model, link) for i, (model, link) in enumerate(zip(df['model'], df['link']))],
         
     | 
| 47 | 
         
            +
                'Release Date': df['release_date'],
         
     | 
| 48 | 
         
            +
                'HF Model': df['hf'].apply(lambda x: f'<a href="{x}" target="_blank">🤗</a>' if x != "-" else "-"),
         
     | 
| 49 | 
         
            +
                'Open Source': df['open_source'].apply(lambda x: '✓' if x else '✗'),
         
     | 
| 50 | 
         
            +
             
     | 
| 51 | 
         
            +
                'Overall': df['Overall'].apply(lambda x: f"{x:.2f}"),
         
     | 
| 52 | 
         
            +
             
     | 
| 53 | 
         
            +
                'Style': df['Style'].apply(lambda x: f"{x:.2f}"),
         
     | 
| 54 | 
         
            +
             
     | 
| 55 | 
         
            +
                'World Knowledge': df['World Knowledge'].apply(lambda x: f"{x:.2f}"),
         
     | 
| 56 | 
         
            +
                'Logical Reasoning': df['Logical Reasoning'].apply(lambda x: f"{x:.2f}"),
         
     | 
| 57 | 
         
            +
             
     | 
| 58 | 
         
            +
             
     | 
| 59 | 
         
            +
                'Text': df['Text'].apply(lambda x: f"{x:.2f}"),
         
     | 
| 60 | 
         
            +
             
     | 
| 61 | 
         
            +
             
     | 
| 62 | 
         
            +
             
     | 
| 63 | 
         
            +
                'Attribute-Overall': df['Attribute-Overall'].apply(lambda x: f"{x:.2f}"),
         
     | 
| 64 | 
         
            +
                'Quantity': df['Quantity'].apply(lambda x: f"{x:.2f}"),
         
     | 
| 65 | 
         
            +
                'Expression': df['Expression'].apply(lambda x: f"{x:.2f}"),
         
     | 
| 66 | 
         
            +
                'Material': df['Material'].apply(lambda x: f"{x:.2f}"),
         
     | 
| 67 | 
         
            +
                'Size': df['Size'].apply(lambda x: f"{x:.2f}"),
         
     | 
| 68 | 
         
            +
                'Shape': df['Shape'].apply(lambda x: f"{x:.2f}"),
         
     | 
| 69 | 
         
            +
                'Color': df['Color'].apply(lambda x: f"{x:.2f}"),
         
     | 
| 70 | 
         
            +
             
     | 
| 71 | 
         
            +
                'Action-Overall': df['Action-Overall'].apply(lambda x: f"{x:.2f}"),
         
     | 
| 72 | 
         
            +
                'Hand': df['Hand'].apply(lambda x: f"{x:.2f}"),
         
     | 
| 73 | 
         
            +
                'Full body': df['Full body'].apply(lambda x: f"{x:.2f}"),
         
     | 
| 74 | 
         
            +
                'Animal': df['Animal'].apply(lambda x: f"{x:.2f}"),
         
     | 
| 75 | 
         
            +
                'Non Contact': df['Non Contact'].apply(lambda x: f"{x:.2f}"),
         
     | 
| 76 | 
         
            +
                'Contact': df['Contact'].apply(lambda x: f"{x:.2f}"),
         
     | 
| 77 | 
         
            +
                'State': df['State'].apply(lambda x: f"{x:.2f}"),
         
     | 
| 78 | 
         
            +
                    
         
     | 
| 79 | 
         
            +
                'Relationship-Overall': df['Relationship-Overall'].apply(lambda x: f"{x:.2f}"),
         
     | 
| 80 | 
         
            +
                'Composition': df['Composition'].apply(lambda x: f"{x:.2f}"),
         
     | 
| 81 | 
         
            +
                'Similarity': df['Similarity'].apply(lambda x: f"{x:.2f}"),
         
     | 
| 82 | 
         
            +
                'Inclusion': df['Inclusion'].apply(lambda x: f"{x:.2f}"),
         
     | 
| 83 | 
         
            +
                'Comparison': df['Comparison'].apply(lambda x: f"{x:.2f}"),
         
     | 
| 84 | 
         
            +
             
     | 
| 85 | 
         
            +
             
     | 
| 86 | 
         
            +
                'Compound-Overall': df['Compound-Overall'].apply(lambda x: f"{x:.2f}"),
         
     | 
| 87 | 
         
            +
                'Imagination': df['Imagination'].apply(lambda x: f"{x:.2f}"),
         
     | 
| 88 | 
         
            +
                'Feature matching': df['Feature matching'].apply(lambda x: f"{x:.2f}"),
         
     | 
| 89 | 
         
            +
             
     | 
| 90 | 
         
            +
             
     | 
| 91 | 
         
            +
                'Grammar-Overall': df['Grammar-Overall'].apply(lambda x: f"{x:.2f}"),
         
     | 
| 92 | 
         
            +
                'Pronoun Reference': df['Pronoun Reference'].apply(lambda x: f"{x:.2f}"),
         
     | 
| 93 | 
         
            +
                'Consistency': df['Consistency'].apply(lambda x: f"{x:.2f}"),
         
     | 
| 94 | 
         
            +
                'Negation': df['Negation'].apply(lambda x: f"{x:.2f}"),
         
     | 
| 95 | 
         
            +
             
     | 
| 96 | 
         
            +
             
     | 
| 97 | 
         
            +
                'Layout-Overall': df['Layout-Overall'].apply(lambda x: f"{x:.2f}"),
         
     | 
| 98 | 
         
            +
                '2D': df['2D'].apply(lambda x: f"{x:.2f}"),
         
     | 
| 99 | 
         
            +
                '3D': df['3D'].apply(lambda x: f"{x:.2f}"),
         
     | 
| 100 | 
         
            +
             
     | 
| 101 | 
         
            +
             
     | 
| 102 | 
         
            +
            })
         
     | 
| 103 | 
         
            +
                
         
     | 
| 104 | 
         
            +
                return display_df
         
     | 
| 105 | 
         
            +
             
     | 
| 106 | 
         
            +
             
     | 
| 107 | 
         
            +
            def get_leaderboard_stats(json_path="leaderboard_data.json"):
         
     | 
| 108 | 
         
            +
                """Get statistics about the leaderboard"""
         
     | 
| 109 | 
         
            +
                leaderboard_data = load_leaderboard_from_json(json_path)
         
     | 
| 110 | 
         
            +
                
         
     | 
| 111 | 
         
            +
                if not leaderboard_data:
         
     | 
| 112 | 
         
            +
                    return {}
         
     | 
| 113 | 
         
            +
                
         
     | 
| 114 | 
         
            +
                df = pd.DataFrame(leaderboard_data)
         
     | 
| 115 | 
         
            +
                
         
     | 
| 116 | 
         
            +
                stats = {
         
     | 
| 117 | 
         
            +
                    'total_models': len(df),
         
     | 
| 118 | 
         
            +
                    'open_source_models': df['open_source'].sum(),
         
     | 
| 119 | 
         
            +
                }
         
     | 
| 120 | 
         
            +
                
         
     | 
| 121 | 
         
            +
                return stats
         
     | 
    	
        src/leaderboard/.DS_Store
    ADDED
    
    | 
         Binary file (6.15 kB). View file 
     | 
| 
         | 
    	
        src/leaderboard/read_evals.py
    ADDED
    
    | 
         @@ -0,0 +1,196 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            import glob
         
     | 
| 2 | 
         
            +
            import json
         
     | 
| 3 | 
         
            +
            import math
         
     | 
| 4 | 
         
            +
            import os
         
     | 
| 5 | 
         
            +
            from dataclasses import dataclass
         
     | 
| 6 | 
         
            +
             
     | 
| 7 | 
         
            +
            import dateutil
         
     | 
| 8 | 
         
            +
            import numpy as np
         
     | 
| 9 | 
         
            +
             
     | 
| 10 | 
         
            +
            from src.display.formatting import make_clickable_model
         
     | 
| 11 | 
         
            +
            from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, WeightType
         
     | 
| 12 | 
         
            +
            from src.submission.check_validity import is_model_on_hub
         
     | 
| 13 | 
         
            +
             
     | 
| 14 | 
         
            +
             
     | 
| 15 | 
         
            +
            @dataclass
         
     | 
| 16 | 
         
            +
            class EvalResult:
         
     | 
| 17 | 
         
            +
                """Represents one full evaluation. Built from a combination of the result and request file for a given run.
         
     | 
| 18 | 
         
            +
                """
         
     | 
| 19 | 
         
            +
                eval_name: str # org_model_precision (uid)
         
     | 
| 20 | 
         
            +
                full_model: str # org/model (path on hub)
         
     | 
| 21 | 
         
            +
                org: str 
         
     | 
| 22 | 
         
            +
                model: str
         
     | 
| 23 | 
         
            +
                revision: str # commit hash, "" if main
         
     | 
| 24 | 
         
            +
                results: dict
         
     | 
| 25 | 
         
            +
                precision: Precision = Precision.Unknown
         
     | 
| 26 | 
         
            +
                model_type: ModelType = ModelType.Unknown # Pretrained, fine tuned, ...
         
     | 
| 27 | 
         
            +
                weight_type: WeightType = WeightType.Original # Original or Adapter
         
     | 
| 28 | 
         
            +
                architecture: str = "Unknown" 
         
     | 
| 29 | 
         
            +
                license: str = "?"
         
     | 
| 30 | 
         
            +
                likes: int = 0
         
     | 
| 31 | 
         
            +
                num_params: int = 0
         
     | 
| 32 | 
         
            +
                date: str = "" # submission date of request file
         
     | 
| 33 | 
         
            +
                still_on_hub: bool = False
         
     | 
| 34 | 
         
            +
             
     | 
| 35 | 
         
            +
                @classmethod
         
     | 
| 36 | 
         
            +
                def init_from_json_file(self, json_filepath):
         
     | 
| 37 | 
         
            +
                    """Inits the result from the specific model result file"""
         
     | 
| 38 | 
         
            +
                    with open(json_filepath) as fp:
         
     | 
| 39 | 
         
            +
                        data = json.load(fp)
         
     | 
| 40 | 
         
            +
             
     | 
| 41 | 
         
            +
                    config = data.get("config")
         
     | 
| 42 | 
         
            +
             
     | 
| 43 | 
         
            +
                    # Precision
         
     | 
| 44 | 
         
            +
                    precision = Precision.from_str(config.get("model_dtype"))
         
     | 
| 45 | 
         
            +
             
     | 
| 46 | 
         
            +
                    # Get model and org
         
     | 
| 47 | 
         
            +
                    org_and_model = config.get("model_name", config.get("model_args", None))
         
     | 
| 48 | 
         
            +
                    org_and_model = org_and_model.split("/", 1)
         
     | 
| 49 | 
         
            +
             
     | 
| 50 | 
         
            +
                    if len(org_and_model) == 1:
         
     | 
| 51 | 
         
            +
                        org = None
         
     | 
| 52 | 
         
            +
                        model = org_and_model[0]
         
     | 
| 53 | 
         
            +
                        result_key = f"{model}_{precision.value.name}"
         
     | 
| 54 | 
         
            +
                    else:
         
     | 
| 55 | 
         
            +
                        org = org_and_model[0]
         
     | 
| 56 | 
         
            +
                        model = org_and_model[1]
         
     | 
| 57 | 
         
            +
                        result_key = f"{org}_{model}_{precision.value.name}"
         
     | 
| 58 | 
         
            +
                    full_model = "/".join(org_and_model)
         
     | 
| 59 | 
         
            +
             
     | 
| 60 | 
         
            +
                    still_on_hub, _, model_config = is_model_on_hub(
         
     | 
| 61 | 
         
            +
                        full_model, config.get("model_sha", "main"), trust_remote_code=True, test_tokenizer=False
         
     | 
| 62 | 
         
            +
                    )
         
     | 
| 63 | 
         
            +
                    architecture = "?"
         
     | 
| 64 | 
         
            +
                    if model_config is not None:
         
     | 
| 65 | 
         
            +
                        architectures = getattr(model_config, "architectures", None)
         
     | 
| 66 | 
         
            +
                        if architectures:
         
     | 
| 67 | 
         
            +
                            architecture = ";".join(architectures)
         
     | 
| 68 | 
         
            +
             
     | 
| 69 | 
         
            +
                    # Extract results available in this file (some results are split in several files)
         
     | 
| 70 | 
         
            +
                    results = {}
         
     | 
| 71 | 
         
            +
                    for task in Tasks:
         
     | 
| 72 | 
         
            +
                        task = task.value
         
     | 
| 73 | 
         
            +
             
     | 
| 74 | 
         
            +
                        # We average all scores of a given metric (not all metrics are present in all files)
         
     | 
| 75 | 
         
            +
                        accs = np.array([v.get(task.metric, None) for k, v in data["results"].items() if task.benchmark == k])
         
     | 
| 76 | 
         
            +
                        if accs.size == 0 or any([acc is None for acc in accs]):
         
     | 
| 77 | 
         
            +
                            continue
         
     | 
| 78 | 
         
            +
             
     | 
| 79 | 
         
            +
                        mean_acc = np.mean(accs) * 100.0
         
     | 
| 80 | 
         
            +
                        results[task.benchmark] = mean_acc
         
     | 
| 81 | 
         
            +
             
     | 
| 82 | 
         
            +
                    return self(
         
     | 
| 83 | 
         
            +
                        eval_name=result_key,
         
     | 
| 84 | 
         
            +
                        full_model=full_model,
         
     | 
| 85 | 
         
            +
                        org=org,
         
     | 
| 86 | 
         
            +
                        model=model,
         
     | 
| 87 | 
         
            +
                        results=results,
         
     | 
| 88 | 
         
            +
                        precision=precision,  
         
     | 
| 89 | 
         
            +
                        revision= config.get("model_sha", ""),
         
     | 
| 90 | 
         
            +
                        still_on_hub=still_on_hub,
         
     | 
| 91 | 
         
            +
                        architecture=architecture
         
     | 
| 92 | 
         
            +
                    )
         
     | 
| 93 | 
         
            +
             
     | 
| 94 | 
         
            +
                def update_with_request_file(self, requests_path):
         
     | 
| 95 | 
         
            +
                    """Finds the relevant request file for the current model and updates info with it"""
         
     | 
| 96 | 
         
            +
                    request_file = get_request_file_for_model(requests_path, self.full_model, self.precision.value.name)
         
     | 
| 97 | 
         
            +
             
     | 
| 98 | 
         
            +
                    try:
         
     | 
| 99 | 
         
            +
                        with open(request_file, "r") as f:
         
     | 
| 100 | 
         
            +
                            request = json.load(f)
         
     | 
| 101 | 
         
            +
                        self.model_type = ModelType.from_str(request.get("model_type", ""))
         
     | 
| 102 | 
         
            +
                        self.weight_type = WeightType[request.get("weight_type", "Original")]
         
     | 
| 103 | 
         
            +
                        self.license = request.get("license", "?")
         
     | 
| 104 | 
         
            +
                        self.likes = request.get("likes", 0)
         
     | 
| 105 | 
         
            +
                        self.num_params = request.get("params", 0)
         
     | 
| 106 | 
         
            +
                        self.date = request.get("submitted_time", "")
         
     | 
| 107 | 
         
            +
                    except Exception:
         
     | 
| 108 | 
         
            +
                        print(f"Could not find request file for {self.org}/{self.model} with precision {self.precision.value.name}")
         
     | 
| 109 | 
         
            +
             
     | 
| 110 | 
         
            +
                def to_dict(self):
         
     | 
| 111 | 
         
            +
                    """Converts the Eval Result to a dict compatible with our dataframe display"""
         
     | 
| 112 | 
         
            +
                    average = sum([v for v in self.results.values() if v is not None]) / len(Tasks)
         
     | 
| 113 | 
         
            +
                    data_dict = {
         
     | 
| 114 | 
         
            +
                        "eval_name": self.eval_name,  # not a column, just a save name,
         
     | 
| 115 | 
         
            +
                        AutoEvalColumn.precision.name: self.precision.value.name,
         
     | 
| 116 | 
         
            +
                        AutoEvalColumn.model_type.name: self.model_type.value.name,
         
     | 
| 117 | 
         
            +
                        AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol,
         
     | 
| 118 | 
         
            +
                        AutoEvalColumn.weight_type.name: self.weight_type.value.name,
         
     | 
| 119 | 
         
            +
                        AutoEvalColumn.architecture.name: self.architecture,
         
     | 
| 120 | 
         
            +
                        AutoEvalColumn.model.name: make_clickable_model(self.full_model),
         
     | 
| 121 | 
         
            +
                        AutoEvalColumn.revision.name: self.revision,
         
     | 
| 122 | 
         
            +
                        AutoEvalColumn.average.name: average,
         
     | 
| 123 | 
         
            +
                        AutoEvalColumn.license.name: self.license,
         
     | 
| 124 | 
         
            +
                        AutoEvalColumn.likes.name: self.likes,
         
     | 
| 125 | 
         
            +
                        AutoEvalColumn.params.name: self.num_params,
         
     | 
| 126 | 
         
            +
                        AutoEvalColumn.still_on_hub.name: self.still_on_hub,
         
     | 
| 127 | 
         
            +
                    }
         
     | 
| 128 | 
         
            +
             
     | 
| 129 | 
         
            +
                    for task in Tasks:
         
     | 
| 130 | 
         
            +
                        data_dict[task.value.col_name] = self.results[task.value.benchmark]
         
     | 
| 131 | 
         
            +
             
     | 
| 132 | 
         
            +
                    return data_dict
         
     | 
| 133 | 
         
            +
             
     | 
| 134 | 
         
            +
             
     | 
| 135 | 
         
            +
            def get_request_file_for_model(requests_path, model_name, precision):
         
     | 
| 136 | 
         
            +
                """Selects the correct request file for a given model. Only keeps runs tagged as FINISHED"""
         
     | 
| 137 | 
         
            +
                request_files = os.path.join(
         
     | 
| 138 | 
         
            +
                    requests_path,
         
     | 
| 139 | 
         
            +
                    f"{model_name}_eval_request_*.json",
         
     | 
| 140 | 
         
            +
                )
         
     | 
| 141 | 
         
            +
                request_files = glob.glob(request_files)
         
     | 
| 142 | 
         
            +
             
     | 
| 143 | 
         
            +
                # Select correct request file (precision)
         
     | 
| 144 | 
         
            +
                request_file = ""
         
     | 
| 145 | 
         
            +
                request_files = sorted(request_files, reverse=True)
         
     | 
| 146 | 
         
            +
                for tmp_request_file in request_files:
         
     | 
| 147 | 
         
            +
                    with open(tmp_request_file, "r") as f:
         
     | 
| 148 | 
         
            +
                        req_content = json.load(f)
         
     | 
| 149 | 
         
            +
                        if (
         
     | 
| 150 | 
         
            +
                            req_content["status"] in ["FINISHED"]
         
     | 
| 151 | 
         
            +
                            and req_content["precision"] == precision.split(".")[-1]
         
     | 
| 152 | 
         
            +
                        ):
         
     | 
| 153 | 
         
            +
                            request_file = tmp_request_file
         
     | 
| 154 | 
         
            +
                return request_file
         
     | 
| 155 | 
         
            +
             
     | 
| 156 | 
         
            +
             
     | 
| 157 | 
         
            +
            def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResult]:
         
     | 
| 158 | 
         
            +
                """From the path of the results folder root, extract all needed info for results"""
         
     | 
| 159 | 
         
            +
                model_result_filepaths = []
         
     | 
| 160 | 
         
            +
             
     | 
| 161 | 
         
            +
                for root, _, files in os.walk(results_path):
         
     | 
| 162 | 
         
            +
                    # We should only have json files in model results
         
     | 
| 163 | 
         
            +
                    if len(files) == 0 or any([not f.endswith(".json") for f in files]):
         
     | 
| 164 | 
         
            +
                        continue
         
     | 
| 165 | 
         
            +
             
     | 
| 166 | 
         
            +
                    # Sort the files by date
         
     | 
| 167 | 
         
            +
                    try:
         
     | 
| 168 | 
         
            +
                        files.sort(key=lambda x: x.removesuffix(".json").removeprefix("results_")[:-7])
         
     | 
| 169 | 
         
            +
                    except dateutil.parser._parser.ParserError:
         
     | 
| 170 | 
         
            +
                        files = [files[-1]]
         
     | 
| 171 | 
         
            +
             
     | 
| 172 | 
         
            +
                    for file in files:
         
     | 
| 173 | 
         
            +
                        model_result_filepaths.append(os.path.join(root, file))
         
     | 
| 174 | 
         
            +
             
     | 
| 175 | 
         
            +
                eval_results = {}
         
     | 
| 176 | 
         
            +
                for model_result_filepath in model_result_filepaths:
         
     | 
| 177 | 
         
            +
                    # Creation of result
         
     | 
| 178 | 
         
            +
                    eval_result = EvalResult.init_from_json_file(model_result_filepath)
         
     | 
| 179 | 
         
            +
                    eval_result.update_with_request_file(requests_path)
         
     | 
| 180 | 
         
            +
             
     | 
| 181 | 
         
            +
                    # Store results of same eval together
         
     | 
| 182 | 
         
            +
                    eval_name = eval_result.eval_name
         
     | 
| 183 | 
         
            +
                    if eval_name in eval_results.keys():
         
     | 
| 184 | 
         
            +
                        eval_results[eval_name].results.update({k: v for k, v in eval_result.results.items() if v is not None})
         
     | 
| 185 | 
         
            +
                    else:
         
     | 
| 186 | 
         
            +
                        eval_results[eval_name] = eval_result
         
     | 
| 187 | 
         
            +
             
     | 
| 188 | 
         
            +
                results = []
         
     | 
| 189 | 
         
            +
                for v in eval_results.values():
         
     | 
| 190 | 
         
            +
                    try:
         
     | 
| 191 | 
         
            +
                        v.to_dict() # we test if the dict version is complete
         
     | 
| 192 | 
         
            +
                        results.append(v)
         
     | 
| 193 | 
         
            +
                    except KeyError:  # not all eval values present
         
     | 
| 194 | 
         
            +
                        continue
         
     | 
| 195 | 
         
            +
             
     | 
| 196 | 
         
            +
                return results
         
     | 
    	
        src/populate.py
    ADDED
    
    | 
         @@ -0,0 +1,117 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            import json
         
     | 
| 2 | 
         
            +
            import os
         
     | 
| 3 | 
         
            +
             
     | 
| 4 | 
         
            +
            import pandas as pd
         
     | 
| 5 | 
         
            +
             
     | 
| 6 | 
         
            +
            from src.display.formatting import has_no_nan_values, make_clickable_model
         
     | 
| 7 | 
         
            +
            from src.display.utils import AutoEvalColumn, EvalQueueColumn, ModelType, Precision, WeightType
         
     | 
| 8 | 
         
            +
            from src.leaderboard.read_evals import get_raw_eval_results
         
     | 
| 9 | 
         
            +
            from src.about import Tasks
         
     | 
| 10 | 
         
            +
             
     | 
| 11 | 
         
            +
             
     | 
| 12 | 
         
            +
            def load_csv_results():
         
     | 
| 13 | 
         
            +
                """Load results from main-results.csv file"""
         
     | 
| 14 | 
         
            +
                csv_path = "main-results.csv"
         
     | 
| 15 | 
         
            +
                if not os.path.exists(csv_path):
         
     | 
| 16 | 
         
            +
                    return []
         
     | 
| 17 | 
         
            +
                
         
     | 
| 18 | 
         
            +
                df = pd.read_csv(csv_path)
         
     | 
| 19 | 
         
            +
                results = []
         
     | 
| 20 | 
         
            +
                
         
     | 
| 21 | 
         
            +
                for _, row in df.iterrows():
         
     | 
| 22 | 
         
            +
                    # Parse parameters - handle different formats
         
     | 
| 23 | 
         
            +
                    param_str = str(row['Param'])
         
     | 
| 24 | 
         
            +
                    if 'activated' in param_str:
         
     | 
| 25 | 
         
            +
                        # Extract the activated parameter count (e.g., "2.8B activated (16B total)")
         
     | 
| 26 | 
         
            +
                        param_value = float(param_str.split('B')[0])
         
     | 
| 27 | 
         
            +
                    elif 'B' in param_str:
         
     | 
| 28 | 
         
            +
                        # Simple format (e.g., "9B")
         
     | 
| 29 | 
         
            +
                        param_value = float(param_str.replace('B', ''))
         
     | 
| 30 | 
         
            +
                    else:
         
     | 
| 31 | 
         
            +
                        param_value = 0
         
     | 
| 32 | 
         
            +
                        
         
     | 
| 33 | 
         
            +
                    # Convert CSV data to the format expected by the leaderboard
         
     | 
| 34 | 
         
            +
                    data_dict = {
         
     | 
| 35 | 
         
            +
                        AutoEvalColumn.model.name: make_clickable_model(row['Model']),
         
     | 
| 36 | 
         
            +
                        AutoEvalColumn.average.name: row['ACC'],  # Using ACC as the average score
         
     | 
| 37 | 
         
            +
                        AutoEvalColumn.params.name: param_value,
         
     | 
| 38 | 
         
            +
                        AutoEvalColumn.license.name: "Open Source" if row['Open Source?'] == 'Yes' else "Proprietary",
         
     | 
| 39 | 
         
            +
                        AutoEvalColumn.model_type.name: ModelType.FT.value.name,  # Default to fine-tuned
         
     | 
| 40 | 
         
            +
                        AutoEvalColumn.precision.name: Precision.float16.value.name,  # Default precision
         
     | 
| 41 | 
         
            +
                        AutoEvalColumn.weight_type.name: WeightType.Original.value.name,
         
     | 
| 42 | 
         
            +
                        AutoEvalColumn.architecture.name: "Unknown",
         
     | 
| 43 | 
         
            +
                        AutoEvalColumn.still_on_hub.name: True,
         
     | 
| 44 | 
         
            +
                        AutoEvalColumn.revision.name: "",
         
     | 
| 45 | 
         
            +
                        AutoEvalColumn.likes.name: 0,
         
     | 
| 46 | 
         
            +
                        AutoEvalColumn.model_type_symbol.name: ModelType.FT.value.symbol,
         
     | 
| 47 | 
         
            +
                    }
         
     | 
| 48 | 
         
            +
                    
         
     | 
| 49 | 
         
            +
                    # Add task-specific scores (required by the leaderboard)
         
     | 
| 50 | 
         
            +
                    for task in Tasks:
         
     | 
| 51 | 
         
            +
                        data_dict[task.name] = row['ACC']  # Use the same ACC score for all tasks
         
     | 
| 52 | 
         
            +
                        
         
     | 
| 53 | 
         
            +
                    results.append(data_dict)
         
     | 
| 54 | 
         
            +
                
         
     | 
| 55 | 
         
            +
                return results
         
     | 
| 56 | 
         
            +
             
     | 
| 57 | 
         
            +
             
     | 
| 58 | 
         
            +
            def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
         
     | 
| 59 | 
         
            +
                """Creates a dataframe from all the individual experiment results"""
         
     | 
| 60 | 
         
            +
                raw_data = get_raw_eval_results(results_path, requests_path)
         
     | 
| 61 | 
         
            +
                all_data_json = [v.to_dict() for v in raw_data]
         
     | 
| 62 | 
         
            +
                
         
     | 
| 63 | 
         
            +
                # If no JSON data found, try loading from CSV
         
     | 
| 64 | 
         
            +
                if not all_data_json:
         
     | 
| 65 | 
         
            +
                    all_data_json = load_csv_results()
         
     | 
| 66 | 
         
            +
                
         
     | 
| 67 | 
         
            +
                if not all_data_json:
         
     | 
| 68 | 
         
            +
                    # Return empty dataframe if no data found
         
     | 
| 69 | 
         
            +
                    return pd.DataFrame(columns=cols)
         
     | 
| 70 | 
         
            +
             
     | 
| 71 | 
         
            +
                df = pd.DataFrame.from_records(all_data_json)
         
     | 
| 72 | 
         
            +
                
         
     | 
| 73 | 
         
            +
                # Only include columns that exist in the dataframe
         
     | 
| 74 | 
         
            +
                existing_cols = [col for col in cols if col in df.columns]
         
     | 
| 75 | 
         
            +
                
         
     | 
| 76 | 
         
            +
                df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
         
     | 
| 77 | 
         
            +
                df = df[existing_cols].round(decimals=2)
         
     | 
| 78 | 
         
            +
             
     | 
| 79 | 
         
            +
                # filter out if any of the benchmarks have not been produced
         
     | 
| 80 | 
         
            +
                df = df[has_no_nan_values(df, benchmark_cols)]
         
     | 
| 81 | 
         
            +
                return df
         
     | 
| 82 | 
         
            +
             
     | 
| 83 | 
         
            +
             
     | 
| 84 | 
         
            +
            def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
         
     | 
| 85 | 
         
            +
                """Creates the different dataframes for the evaluation queues requestes"""
         
     | 
| 86 | 
         
            +
                entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
         
     | 
| 87 | 
         
            +
                all_evals = []
         
     | 
| 88 | 
         
            +
             
     | 
| 89 | 
         
            +
                for entry in entries:
         
     | 
| 90 | 
         
            +
                    if ".json" in entry:
         
     | 
| 91 | 
         
            +
                        file_path = os.path.join(save_path, entry)
         
     | 
| 92 | 
         
            +
                        with open(file_path) as fp:
         
     | 
| 93 | 
         
            +
                            data = json.load(fp)
         
     | 
| 94 | 
         
            +
             
     | 
| 95 | 
         
            +
                        data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
         
     | 
| 96 | 
         
            +
                        data[EvalQueueColumn.revision.name] = data.get("revision", "main")
         
     | 
| 97 | 
         
            +
             
     | 
| 98 | 
         
            +
                        all_evals.append(data)
         
     | 
| 99 | 
         
            +
                    elif ".md" not in entry:
         
     | 
| 100 | 
         
            +
                        # this is a folder
         
     | 
| 101 | 
         
            +
                        sub_entries = [e for e in os.listdir(f"{save_path}/{entry}") if os.path.isfile(e) and not e.startswith(".")]
         
     | 
| 102 | 
         
            +
                        for sub_entry in sub_entries:
         
     | 
| 103 | 
         
            +
                            file_path = os.path.join(save_path, entry, sub_entry)
         
     | 
| 104 | 
         
            +
                            with open(file_path) as fp:
         
     | 
| 105 | 
         
            +
                                data = json.load(fp)
         
     | 
| 106 | 
         
            +
             
     | 
| 107 | 
         
            +
                            data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
         
     | 
| 108 | 
         
            +
                            data[EvalQueueColumn.revision.name] = data.get("revision", "main")
         
     | 
| 109 | 
         
            +
                            all_evals.append(data)
         
     | 
| 110 | 
         
            +
             
     | 
| 111 | 
         
            +
                pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
         
     | 
| 112 | 
         
            +
                running_list = [e for e in all_evals if e["status"] == "RUNNING"]
         
     | 
| 113 | 
         
            +
                finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
         
     | 
| 114 | 
         
            +
                df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
         
     | 
| 115 | 
         
            +
                df_running = pd.DataFrame.from_records(running_list, columns=cols)
         
     | 
| 116 | 
         
            +
                df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
         
     | 
| 117 | 
         
            +
                return df_finished[cols], df_running[cols], df_pending[cols]
         
     | 
    	
        src/submission/check_validity.py
    ADDED
    
    | 
         @@ -0,0 +1,99 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            import json
         
     | 
| 2 | 
         
            +
            import os
         
     | 
| 3 | 
         
            +
            import re
         
     | 
| 4 | 
         
            +
            from collections import defaultdict
         
     | 
| 5 | 
         
            +
            from datetime import datetime, timedelta, timezone
         
     | 
| 6 | 
         
            +
             
     | 
| 7 | 
         
            +
            import huggingface_hub
         
     | 
| 8 | 
         
            +
            from huggingface_hub import ModelCard
         
     | 
| 9 | 
         
            +
            from huggingface_hub.hf_api import ModelInfo
         
     | 
| 10 | 
         
            +
            from transformers import AutoConfig
         
     | 
| 11 | 
         
            +
            from transformers.models.auto.tokenization_auto import AutoTokenizer
         
     | 
| 12 | 
         
            +
             
     | 
| 13 | 
         
            +
            def check_model_card(repo_id: str) -> tuple[bool, str]:
         
     | 
| 14 | 
         
            +
                """Checks if the model card and license exist and have been filled"""
         
     | 
| 15 | 
         
            +
                try:
         
     | 
| 16 | 
         
            +
                    card = ModelCard.load(repo_id)
         
     | 
| 17 | 
         
            +
                except huggingface_hub.utils.EntryNotFoundError:
         
     | 
| 18 | 
         
            +
                    return False, "Please add a model card to your model to explain how you trained/fine-tuned it."
         
     | 
| 19 | 
         
            +
             
     | 
| 20 | 
         
            +
                # Enforce license metadata
         
     | 
| 21 | 
         
            +
                if card.data.license is None:
         
     | 
| 22 | 
         
            +
                    if not ("license_name" in card.data and "license_link" in card.data):
         
     | 
| 23 | 
         
            +
                        return False, (
         
     | 
| 24 | 
         
            +
                            "License not found. Please add a license to your model card using the `license` metadata or a"
         
     | 
| 25 | 
         
            +
                            " `license_name`/`license_link` pair."
         
     | 
| 26 | 
         
            +
                        )
         
     | 
| 27 | 
         
            +
             
     | 
| 28 | 
         
            +
                # Enforce card content
         
     | 
| 29 | 
         
            +
                if len(card.text) < 200:
         
     | 
| 30 | 
         
            +
                    return False, "Please add a description to your model card, it is too short."
         
     | 
| 31 | 
         
            +
             
     | 
| 32 | 
         
            +
                return True, ""
         
     | 
| 33 | 
         
            +
             
     | 
| 34 | 
         
            +
            def is_model_on_hub(model_name: str, revision: str, token: str = None, trust_remote_code=False, test_tokenizer=False) -> tuple[bool, str]:
         
     | 
| 35 | 
         
            +
                """Checks if the model model_name is on the hub, and whether it (and its tokenizer) can be loaded with AutoClasses."""
         
     | 
| 36 | 
         
            +
                try:
         
     | 
| 37 | 
         
            +
                    config = AutoConfig.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
         
     | 
| 38 | 
         
            +
                    if test_tokenizer:
         
     | 
| 39 | 
         
            +
                        try:
         
     | 
| 40 | 
         
            +
                            tk = AutoTokenizer.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
         
     | 
| 41 | 
         
            +
                        except ValueError as e:
         
     | 
| 42 | 
         
            +
                            return (
         
     | 
| 43 | 
         
            +
                                False,
         
     | 
| 44 | 
         
            +
                                f"uses a tokenizer which is not in a transformers release: {e}",
         
     | 
| 45 | 
         
            +
                                None
         
     | 
| 46 | 
         
            +
                            )
         
     | 
| 47 | 
         
            +
                        except Exception as e:
         
     | 
| 48 | 
         
            +
                            return (False, "'s tokenizer cannot be loaded. Is your tokenizer class in a stable transformers release, and correctly configured?", None)
         
     | 
| 49 | 
         
            +
                    return True, None, config
         
     | 
| 50 | 
         
            +
             
     | 
| 51 | 
         
            +
                except ValueError:
         
     | 
| 52 | 
         
            +
                    return (
         
     | 
| 53 | 
         
            +
                        False,
         
     | 
| 54 | 
         
            +
                        "needs to be launched with `trust_remote_code=True`. For safety reason, we do not allow these models to be automatically submitted to the leaderboard.",
         
     | 
| 55 | 
         
            +
                        None
         
     | 
| 56 | 
         
            +
                    )
         
     | 
| 57 | 
         
            +
             
     | 
| 58 | 
         
            +
                except Exception as e:
         
     | 
| 59 | 
         
            +
                    return False, "was not found on hub!", None
         
     | 
| 60 | 
         
            +
             
     | 
| 61 | 
         
            +
             
     | 
| 62 | 
         
            +
            def get_model_size(model_info: ModelInfo, precision: str):
         
     | 
| 63 | 
         
            +
                """Gets the model size from the configuration, or the model name if the configuration does not contain the information."""
         
     | 
| 64 | 
         
            +
                try:
         
     | 
| 65 | 
         
            +
                    model_size = round(model_info.safetensors["total"] / 1e9, 3)
         
     | 
| 66 | 
         
            +
                except (AttributeError, TypeError):
         
     | 
| 67 | 
         
            +
                    return 0  # Unknown model sizes are indicated as 0, see NUMERIC_INTERVALS in app.py
         
     | 
| 68 | 
         
            +
             
     | 
| 69 | 
         
            +
                size_factor = 8 if (precision == "GPTQ" or "gptq" in model_info.modelId.lower()) else 1
         
     | 
| 70 | 
         
            +
                model_size = size_factor * model_size
         
     | 
| 71 | 
         
            +
                return model_size
         
     | 
| 72 | 
         
            +
             
     | 
| 73 | 
         
            +
            def get_model_arch(model_info: ModelInfo):
         
     | 
| 74 | 
         
            +
                """Gets the model architecture from the configuration"""
         
     | 
| 75 | 
         
            +
                return model_info.config.get("architectures", "Unknown")
         
     | 
| 76 | 
         
            +
             
     | 
| 77 | 
         
            +
            def already_submitted_models(requested_models_dir: str) -> set[str]:
         
     | 
| 78 | 
         
            +
                """Gather a list of already submitted models to avoid duplicates"""
         
     | 
| 79 | 
         
            +
                depth = 1
         
     | 
| 80 | 
         
            +
                file_names = []
         
     | 
| 81 | 
         
            +
                users_to_submission_dates = defaultdict(list)
         
     | 
| 82 | 
         
            +
             
     | 
| 83 | 
         
            +
                for root, _, files in os.walk(requested_models_dir):
         
     | 
| 84 | 
         
            +
                    current_depth = root.count(os.sep) - requested_models_dir.count(os.sep)
         
     | 
| 85 | 
         
            +
                    if current_depth == depth:
         
     | 
| 86 | 
         
            +
                        for file in files:
         
     | 
| 87 | 
         
            +
                            if not file.endswith(".json"):
         
     | 
| 88 | 
         
            +
                                continue
         
     | 
| 89 | 
         
            +
                            with open(os.path.join(root, file), "r") as f:
         
     | 
| 90 | 
         
            +
                                info = json.load(f)
         
     | 
| 91 | 
         
            +
                                file_names.append(f"{info['model']}_{info['revision']}_{info['precision']}")
         
     | 
| 92 | 
         
            +
             
     | 
| 93 | 
         
            +
                                # Select organisation
         
     | 
| 94 | 
         
            +
                                if info["model"].count("/") == 0 or "submitted_time" not in info:
         
     | 
| 95 | 
         
            +
                                    continue
         
     | 
| 96 | 
         
            +
                                organisation, _ = info["model"].split("/")
         
     | 
| 97 | 
         
            +
                                users_to_submission_dates[organisation].append(info["submitted_time"])
         
     | 
| 98 | 
         
            +
             
     | 
| 99 | 
         
            +
                return set(file_names), users_to_submission_dates
         
     | 
    	
        src/submission/submit.py
    ADDED
    
    | 
         @@ -0,0 +1,119 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            import json
         
     | 
| 2 | 
         
            +
            import os
         
     | 
| 3 | 
         
            +
            from datetime import datetime, timezone
         
     | 
| 4 | 
         
            +
             
     | 
| 5 | 
         
            +
            from src.display.formatting import styled_error, styled_message, styled_warning
         
     | 
| 6 | 
         
            +
            from src.envs import API, EVAL_REQUESTS_PATH, TOKEN, QUEUE_REPO
         
     | 
| 7 | 
         
            +
            from src.submission.check_validity import (
         
     | 
| 8 | 
         
            +
                already_submitted_models,
         
     | 
| 9 | 
         
            +
                check_model_card,
         
     | 
| 10 | 
         
            +
                get_model_size,
         
     | 
| 11 | 
         
            +
                is_model_on_hub,
         
     | 
| 12 | 
         
            +
            )
         
     | 
| 13 | 
         
            +
             
     | 
| 14 | 
         
            +
            REQUESTED_MODELS = None
         
     | 
| 15 | 
         
            +
            USERS_TO_SUBMISSION_DATES = None
         
     | 
| 16 | 
         
            +
             
     | 
| 17 | 
         
            +
            def add_new_eval(
         
     | 
| 18 | 
         
            +
                model: str,
         
     | 
| 19 | 
         
            +
                base_model: str,
         
     | 
| 20 | 
         
            +
                revision: str,
         
     | 
| 21 | 
         
            +
                precision: str,
         
     | 
| 22 | 
         
            +
                weight_type: str,
         
     | 
| 23 | 
         
            +
                model_type: str,
         
     | 
| 24 | 
         
            +
            ):
         
     | 
| 25 | 
         
            +
                global REQUESTED_MODELS
         
     | 
| 26 | 
         
            +
                global USERS_TO_SUBMISSION_DATES
         
     | 
| 27 | 
         
            +
                if not REQUESTED_MODELS:
         
     | 
| 28 | 
         
            +
                    REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
         
     | 
| 29 | 
         
            +
             
     | 
| 30 | 
         
            +
                user_name = ""
         
     | 
| 31 | 
         
            +
                model_path = model
         
     | 
| 32 | 
         
            +
                if "/" in model:
         
     | 
| 33 | 
         
            +
                    user_name = model.split("/")[0]
         
     | 
| 34 | 
         
            +
                    model_path = model.split("/")[1]
         
     | 
| 35 | 
         
            +
             
     | 
| 36 | 
         
            +
                precision = precision.split(" ")[0]
         
     | 
| 37 | 
         
            +
                current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
         
     | 
| 38 | 
         
            +
             
     | 
| 39 | 
         
            +
                if model_type is None or model_type == "":
         
     | 
| 40 | 
         
            +
                    return styled_error("Please select a model type.")
         
     | 
| 41 | 
         
            +
             
     | 
| 42 | 
         
            +
                # Does the model actually exist?
         
     | 
| 43 | 
         
            +
                if revision == "":
         
     | 
| 44 | 
         
            +
                    revision = "main"
         
     | 
| 45 | 
         
            +
             
     | 
| 46 | 
         
            +
                # Is the model on the hub?
         
     | 
| 47 | 
         
            +
                if weight_type in ["Delta", "Adapter"]:
         
     | 
| 48 | 
         
            +
                    base_model_on_hub, error, _ = is_model_on_hub(model_name=base_model, revision=revision, token=TOKEN, test_tokenizer=True)
         
     | 
| 49 | 
         
            +
                    if not base_model_on_hub:
         
     | 
| 50 | 
         
            +
                        return styled_error(f'Base model "{base_model}" {error}')
         
     | 
| 51 | 
         
            +
             
     | 
| 52 | 
         
            +
                if not weight_type == "Adapter":
         
     | 
| 53 | 
         
            +
                    model_on_hub, error, _ = is_model_on_hub(model_name=model, revision=revision, token=TOKEN, test_tokenizer=True)
         
     | 
| 54 | 
         
            +
                    if not model_on_hub:
         
     | 
| 55 | 
         
            +
                        return styled_error(f'Model "{model}" {error}')
         
     | 
| 56 | 
         
            +
             
     | 
| 57 | 
         
            +
                # Is the model info correctly filled?
         
     | 
| 58 | 
         
            +
                try:
         
     | 
| 59 | 
         
            +
                    model_info = API.model_info(repo_id=model, revision=revision)
         
     | 
| 60 | 
         
            +
                except Exception:
         
     | 
| 61 | 
         
            +
                    return styled_error("Could not get your model information. Please fill it up properly.")
         
     | 
| 62 | 
         
            +
             
     | 
| 63 | 
         
            +
                model_size = get_model_size(model_info=model_info, precision=precision)
         
     | 
| 64 | 
         
            +
             
     | 
| 65 | 
         
            +
                # Were the model card and license filled?
         
     | 
| 66 | 
         
            +
                try:
         
     | 
| 67 | 
         
            +
                    license = model_info.cardData["license"]
         
     | 
| 68 | 
         
            +
                except Exception:
         
     | 
| 69 | 
         
            +
                    return styled_error("Please select a license for your model")
         
     | 
| 70 | 
         
            +
             
     | 
| 71 | 
         
            +
                modelcard_OK, error_msg = check_model_card(model)
         
     | 
| 72 | 
         
            +
                if not modelcard_OK:
         
     | 
| 73 | 
         
            +
                    return styled_error(error_msg)
         
     | 
| 74 | 
         
            +
             
     | 
| 75 | 
         
            +
                # Seems good, creating the eval
         
     | 
| 76 | 
         
            +
                print("Adding new eval")
         
     | 
| 77 | 
         
            +
             
     | 
| 78 | 
         
            +
                eval_entry = {
         
     | 
| 79 | 
         
            +
                    "model": model,
         
     | 
| 80 | 
         
            +
                    "base_model": base_model,
         
     | 
| 81 | 
         
            +
                    "revision": revision,
         
     | 
| 82 | 
         
            +
                    "precision": precision,
         
     | 
| 83 | 
         
            +
                    "weight_type": weight_type,
         
     | 
| 84 | 
         
            +
                    "status": "PENDING",
         
     | 
| 85 | 
         
            +
                    "submitted_time": current_time,
         
     | 
| 86 | 
         
            +
                    "model_type": model_type,
         
     | 
| 87 | 
         
            +
                    "likes": model_info.likes,
         
     | 
| 88 | 
         
            +
                    "params": model_size,
         
     | 
| 89 | 
         
            +
                    "license": license,
         
     | 
| 90 | 
         
            +
                    "private": False,
         
     | 
| 91 | 
         
            +
                }
         
     | 
| 92 | 
         
            +
             
     | 
| 93 | 
         
            +
                # Check for duplicate submission
         
     | 
| 94 | 
         
            +
                if f"{model}_{revision}_{precision}" in REQUESTED_MODELS:
         
     | 
| 95 | 
         
            +
                    return styled_warning("This model has been already submitted.")
         
     | 
| 96 | 
         
            +
             
     | 
| 97 | 
         
            +
                print("Creating eval file")
         
     | 
| 98 | 
         
            +
                OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
         
     | 
| 99 | 
         
            +
                os.makedirs(OUT_DIR, exist_ok=True)
         
     | 
| 100 | 
         
            +
                out_path = f"{OUT_DIR}/{model_path}_eval_request_False_{precision}_{weight_type}.json"
         
     | 
| 101 | 
         
            +
             
     | 
| 102 | 
         
            +
                with open(out_path, "w") as f:
         
     | 
| 103 | 
         
            +
                    f.write(json.dumps(eval_entry))
         
     | 
| 104 | 
         
            +
             
     | 
| 105 | 
         
            +
                print("Uploading eval file")
         
     | 
| 106 | 
         
            +
                API.upload_file(
         
     | 
| 107 | 
         
            +
                    path_or_fileobj=out_path,
         
     | 
| 108 | 
         
            +
                    path_in_repo=out_path.split("eval-queue/")[1],
         
     | 
| 109 | 
         
            +
                    repo_id=QUEUE_REPO,
         
     | 
| 110 | 
         
            +
                    repo_type="dataset",
         
     | 
| 111 | 
         
            +
                    commit_message=f"Add {model} to eval queue",
         
     | 
| 112 | 
         
            +
                )
         
     | 
| 113 | 
         
            +
             
     | 
| 114 | 
         
            +
                # Remove the local file
         
     | 
| 115 | 
         
            +
                os.remove(out_path)
         
     | 
| 116 | 
         
            +
             
     | 
| 117 | 
         
            +
                return styled_message(
         
     | 
| 118 | 
         
            +
                    "Your request has been submitted to the evaluation queue!\nPlease wait for up to an hour for the model to show in the PENDING list."
         
     | 
| 119 | 
         
            +
                )
         
     |