BenchmarkBot commited on
Commit
c8763bd
β€’
1 Parent(s): a77818e

removed boilerplate

Browse files
.gitignore ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ __pycache__/
2
+ .env
3
+ .ipynb_checkpoints
4
+ *ipynb
5
+ .vscode/
README.md CHANGED
@@ -1,12 +1,12 @@
1
  ---
2
- title: Open Llm Perf Leaderboard
3
- emoji: 🌍
4
  colorFrom: green
5
- colorTo: red
6
  sdk: gradio
7
- sdk_version: 3.35.2
8
  app_file: app.py
9
- pinned: false
10
  license: apache-2.0
11
  ---
12
 
 
1
  ---
2
+ title: Open LLM Perf Leaderboard
3
+ emoji: πŸ†
4
  colorFrom: green
5
+ colorTo: indigo
6
  sdk: gradio
7
+ sdk_version: 3.27.0
8
  app_file: app.py
9
+ pinned: true
10
  license: apache-2.0
11
  ---
12
 
app.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ import pandas as pd
4
+ from huggingface_hub import HfApi
5
+ from huggingface_hub import Repository
6
+ from apscheduler.schedulers.background import BackgroundScheduler
7
+
8
+ from src.assets.text_content import *
9
+ from src.assets.css_html_js import custom_css, get_window_url_params
10
+
11
+ OPTIMUM_TOKEN = os.environ.get("OPTIMUM_TOKEN", None)
12
+
13
+ LLM_PERF_LEADERBOARD_REPO = "optimum/llm-perf-leaderboard"
14
+ LLM_PERF_DATASET_REPO = "optimum/llm-perf"
15
+
16
+
17
+ api = HfApi()
18
+
19
+
20
+ def restart_space():
21
+ api.restart_space(
22
+ repo_id=LLM_PERF_LEADERBOARD_REPO, token=OPTIMUM_TOKEN
23
+ )
24
+
25
+
26
+ def load_all_info_from_hub():
27
+ llm_perf_repo = None
28
+ if OPTIMUM_TOKEN:
29
+ llm_perf_repo = Repository(
30
+ local_dir="./llm-perf/",
31
+ clone_from=LLM_PERF_DATASET_REPO,
32
+ token=OPTIMUM_TOKEN,
33
+ repo_type="dataset",
34
+ )
35
+ llm_perf_repo.git_pull()
36
+
37
+ return llm_perf_repo
38
+
39
+
40
+ llm_perf_repo = load_all_info_from_hub()
41
+
42
+
43
+ def has_no_nan_values(df, columns):
44
+ return df[columns].notna().all(axis=1)
45
+
46
+
47
+ def has_nan_values(df, columns):
48
+ return df[columns].isna().any(axis=1)
49
+
50
+
51
+ def get_leaderboard_df():
52
+ if llm_perf_repo:
53
+ llm_perf_repo.git_pull()
54
+
55
+ df = pd.read_csv("./llm-perf/reports/cuda_1_100/inference_report.csv")
56
+
57
+ return df
58
+
59
+
60
+ original_df = get_leaderboard_df()
61
+ leaderboard_df = original_df.copy()
62
+
63
+
64
+ def refresh():
65
+ leaderboard_df = get_leaderboard_df()
66
+
67
+ return leaderboard_df
68
+
69
+
70
+ demo = gr.Blocks(css=custom_css)
71
+ with demo:
72
+ gr.HTML(TITLE)
73
+ gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
74
+
75
+ with gr.Tabs(elem_classes="tab-buttons") as tabs:
76
+ with gr.TabItem("πŸ… LLM-Perf Benchmark", elem_id="llm-perf-benchmark-tab-table", id=0):
77
+ leaderboard_table_lite = gr.components.Dataframe(
78
+ value=leaderboard_df,
79
+ headers=leaderboard_df.columns.tolist(),
80
+ # datatype=TYPES_LITE,
81
+ max_rows=None,
82
+ elem_id="leaderboard-table-lite",
83
+ )
84
+
85
+ with gr.Row():
86
+ with gr.Column():
87
+ with gr.Accordion("πŸ“™ Citation", open=False):
88
+ citation_button = gr.Textbox(
89
+ value=CITATION_BUTTON_TEXT,
90
+ label=CITATION_BUTTON_LABEL,
91
+ elem_id="citation-button",
92
+ ).style(show_copy_button=True)
93
+ with gr.Column():
94
+ with gr.Accordion("✨ CHANGELOG", open=False):
95
+ changelog = gr.Markdown(
96
+ CHANGELOG_TEXT, elem_id="changelog-text")
97
+
98
+ dummy = gr.Textbox(visible=False)
99
+ demo.load(
100
+ dummy,
101
+ tabs,
102
+ _js=get_window_url_params,
103
+ )
104
+
105
+ scheduler = BackgroundScheduler()
106
+ scheduler.add_job(restart_space, "interval", seconds=3600)
107
+ scheduler.start()
108
+ demo.queue(concurrency_count=40).launch()
requirements.txt ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ aiofiles==23.1.0
2
+ aiohttp==3.8.4
3
+ aiosignal==1.3.1
4
+ altair==4.2.2
5
+ anyio==3.6.2
6
+ APScheduler==3.10.1
7
+ async-timeout==4.0.2
8
+ attrs==23.1.0
9
+ certifi==2022.12.7
10
+ charset-normalizer==3.1.0
11
+ click==8.1.3
12
+ contourpy==1.0.7
13
+ cycler==0.11.0
14
+ datasets==2.12.0
15
+ entrypoints==0.4
16
+ fastapi==0.95.1
17
+ ffmpy==0.3.0
18
+ filelock==3.11.0
19
+ fonttools==4.39.3
20
+ frozenlist==1.3.3
21
+ fsspec==2023.4.0
22
+ gradio==3.27.0
23
+ gradio_client==0.1.3
24
+ h11==0.14.0
25
+ httpcore==0.17.0
26
+ httpx==0.24.0
27
+ huggingface-hub==0.13.4
28
+ idna==3.4
29
+ Jinja2==3.1.2
30
+ jsonschema==4.17.3
31
+ kiwisolver==1.4.4
32
+ linkify-it-py==2.0.0
33
+ markdown-it-py==2.2.0
34
+ MarkupSafe==2.1.2
35
+ matplotlib==3.7.1
36
+ mdit-py-plugins==0.3.3
37
+ mdurl==0.1.2
38
+ multidict==6.0.4
39
+ numpy==1.24.2
40
+ orjson==3.8.10
41
+ packaging==23.1
42
+ pandas==2.0.0
43
+ Pillow==9.5.0
44
+ plotly==5.14.1
45
+ pyarrow==11.0.0
46
+ pydantic==1.10.7
47
+ pydub==0.25.1
48
+ pyparsing==3.0.9
49
+ pyrsistent==0.19.3
50
+ python-dateutil==2.8.2
51
+ python-multipart==0.0.6
52
+ pytz==2023.3
53
+ pytz-deprecation-shim==0.1.0.post0
54
+ PyYAML==6.0
55
+ requests==2.28.2
56
+ semantic-version==2.10.0
57
+ six==1.16.0
58
+ sniffio==1.3.0
59
+ starlette==0.26.1
60
+ toolz==0.12.0
61
+ tqdm==4.65.0
62
+ transformers==4.28.1
63
+ typing_extensions==4.5.0
64
+ tzdata==2023.3
65
+ tzlocal==4.3
66
+ uc-micro-py==1.0.1
67
+ urllib3==1.26.15
68
+ uvicorn==0.21.1
69
+ websockets==11.0.1
70
+ yarl==1.8.2
src/assets/css_html_js.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ custom_css = """
2
+ #changelog-text {
3
+ font-size: 16px !important;
4
+ }
5
+
6
+ #changelog-text h2 {
7
+ font-size: 18px !important;
8
+ }
9
+
10
+ .markdown-text {
11
+ font-size: 16px !important;
12
+ }
13
+
14
+ #models-to-add-text {
15
+ font-size: 18px !important;
16
+ }
17
+
18
+ #citation-button span {
19
+ font-size: 16px !important;
20
+ }
21
+
22
+ #citation-button textarea {
23
+ font-size: 16px !important;
24
+ }
25
+
26
+ #citation-button > label > button {
27
+ margin: 6px;
28
+ transform: scale(1.3);
29
+ }
30
+
31
+ #leaderboard-table {
32
+ margin-top: 15px
33
+ }
34
+
35
+ #leaderboard-table-lite {
36
+ margin-top: 15px
37
+ }
38
+
39
+ #search-bar-table-box > div:first-child {
40
+ background: none;
41
+ border: none;
42
+ }
43
+
44
+ #search-bar {
45
+ padding: 0px;
46
+ width: 30%;
47
+ }
48
+
49
+ /* Hides the final AutoEvalColumn */
50
+ #llm-benchmark-tab-table table td:last-child,
51
+ #llm-benchmark-tab-table table th:last-child {
52
+ display: none;
53
+ }
54
+
55
+ /* Limit the width of the first AutoEvalColumn so that names don't expand too much */
56
+ table td:first-child,
57
+ table th:first-child {
58
+ max-width: 400px;
59
+ overflow: auto;
60
+ white-space: nowrap;
61
+ }
62
+
63
+ .tab-buttons button {
64
+ font-size: 20px;
65
+ }
66
+
67
+ #scale-logo {
68
+ border-style: none !important;
69
+ box-shadow: none;
70
+ display: block;
71
+ margin-left: auto;
72
+ margin-right: auto;
73
+ max-width: 600px;
74
+ }
75
+
76
+ #scale-logo .download {
77
+ display: none;
78
+ }
79
+ """
80
+
81
+ get_window_url_params = """
82
+ function(url_params) {
83
+ const params = new URLSearchParams(window.location.search);
84
+ url_params = Object.fromEntries(params);
85
+ return url_params;
86
+ }
87
+ """
src/assets/hardcoded_evals.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from src.utils_display import AutoEvalColumn, model_hyperlink
2
+
3
+ gpt4_values = {
4
+ AutoEvalColumn.model.name: model_hyperlink("https://arxiv.org/abs/2303.08774", "gpt4"),
5
+ AutoEvalColumn.revision.name: "tech report",
6
+ AutoEvalColumn.is_8bit.name: None,
7
+ AutoEvalColumn.average.name: 84.3,
8
+ AutoEvalColumn.arc.name: 96.3,
9
+ AutoEvalColumn.hellaswag.name: 95.3,
10
+ AutoEvalColumn.mmlu.name: 86.4,
11
+ AutoEvalColumn.truthfulqa.name: 59.0,
12
+ AutoEvalColumn.dummy.name: "GPT-4",
13
+ }
14
+
15
+ gpt35_values = {
16
+ AutoEvalColumn.model.name: model_hyperlink("https://arxiv.org/abs/2303.08774", "gpt3.5"),
17
+ AutoEvalColumn.revision.name: "tech report",
18
+ AutoEvalColumn.is_8bit.name: None,
19
+ AutoEvalColumn.average.name: 71.9,
20
+ AutoEvalColumn.arc.name: 85.2,
21
+ AutoEvalColumn.hellaswag.name: 85.5,
22
+ AutoEvalColumn.mmlu.name: 70.0,
23
+ AutoEvalColumn.truthfulqa.name: 47.0,
24
+ AutoEvalColumn.dummy.name: "GPT-3.5",
25
+ }
26
+
27
+ baseline = {
28
+ AutoEvalColumn.model.name: "<p>Baseline</p>",
29
+ AutoEvalColumn.revision.name: "N/A",
30
+ AutoEvalColumn.is_8bit.name: None,
31
+ AutoEvalColumn.average.name: 25.0,
32
+ AutoEvalColumn.arc.name: 25.0,
33
+ AutoEvalColumn.hellaswag.name: 25.0,
34
+ AutoEvalColumn.mmlu.name: 25.0,
35
+ AutoEvalColumn.truthfulqa.name: 25.0,
36
+ AutoEvalColumn.dummy.name: "baseline",
37
+ }
38
+
src/assets/text_content.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ CHANGELOG_TEXT = f"""
2
+ ## [2023-06-19]
3
+ - Added model type column
4
+ - Hid revision and 8bit columns since all models are the same atm
5
+
6
+ ## [2023-06-16]
7
+ - Refactored code base
8
+ - Added new columns: number of parameters, hub likes, license
9
+
10
+ ## [2023-06-13]
11
+ - Adjust description for TruthfulQA
12
+
13
+ ## [2023-06-12]
14
+ - Add Human & GPT-4 Evaluations
15
+
16
+ ## [2023-06-05]
17
+ - Increase concurrent thread count to 40
18
+ - Search models on ENTER
19
+
20
+ ## [2023-06-02]
21
+ - Add a typeahead search bar
22
+ - Use webhooks to automatically spawn a new Space when someone opens a PR
23
+ - Start recording `submitted_time` for eval requests
24
+ - Limit AutoEvalColumn max-width
25
+
26
+ ## [2023-05-30]
27
+ - Add a citation button
28
+ - Simplify Gradio layout
29
+
30
+ ## [2023-05-29]
31
+ - Auto-restart every hour for the latest results
32
+ - Sync with the internal version (minor style changes)
33
+
34
+ ## [2023-05-24]
35
+ - Add a baseline that has 25.0 for all values
36
+ - Add CHANGELOG
37
+
38
+ ## [2023-05-23]
39
+ - Fix a CSS issue that made the leaderboard hard to read in dark mode
40
+
41
+ ## [2023-05-22]
42
+ - Display a success/error message after submitting evaluation requests
43
+ - Reject duplicate submission
44
+ - Do not display results that have incomplete results
45
+ - Display different queues for jobs that are RUNNING, PENDING, FINISHED status
46
+
47
+ ## [2023-05-15]
48
+ - Fix a typo: from "TruthQA" to "QA"
49
+
50
+ ## [2023-05-10]
51
+ - Fix a bug that prevented auto-refresh
52
+
53
+ ## [2023-05-10]
54
+ - Release the leaderboard to public
55
+ """
56
+
57
+ TITLE = """<h1 align="center" id="space-title">πŸ€— Open LLM Leaderboard</h1>"""
58
+
59
+ INTRODUCTION_TEXT = f"""
60
+ πŸ“ The πŸ€— Open LLM Leaderboard aims to track, rank and evaluate LLMs and chatbots as they are released.
61
+
62
+ πŸ€— Anyone from the community can submit a model for automated evaluation on the πŸ€— GPU cluster, as long as it is a πŸ€— Transformers model with weights on the Hub. We also support evaluation of models with delta-weights for non-commercial licensed models, such as LLaMa.
63
+ """
64
+
65
+ LLM_BENCHMARKS_TEXT = f"""
66
+ With the plethora of large language models (LLMs) and chatbots being released week upon week, often with grandiose claims of their performance, it can be hard to filter out the genuine progress that is being made by the open-source community and which model is the current state of the art.
67
+
68
+ πŸ“ˆ We evaluate models on 4 key benchmarks from the <a href="https://github.com/EleutherAI/lm-evaluation-harness" target="_blank"> Eleuther AI Language Model Evaluation Harness </a>, a unified framework to test generative language models on a large number of different evaluation tasks.
69
+
70
+ - <a href="https://arxiv.org/abs/1803.05457" target="_blank"> AI2 Reasoning Challenge </a> (25-shot) - a set of grade-school science questions.
71
+ - <a href="https://arxiv.org/abs/1905.07830" target="_blank"> HellaSwag </a> (10-shot) - a test of commonsense inference, which is easy for humans (~95%) but challenging for SOTA models.
72
+ - <a href="https://arxiv.org/abs/2009.03300" target="_blank"> MMLU </a> (5-shot) - a test to measure a text model's multitask accuracy. The test covers 57 tasks including elementary mathematics, US history, computer science, law, and more.
73
+ - <a href="https://arxiv.org/abs/2109.07958" target="_blank"> TruthfulQA </a> (0-shot) - a test to measure a model’s propensity to reproduce falsehoods commonly found online.
74
+
75
+ We chose these benchmarks as they test a variety of reasoning and general knowledge across a wide variety of fields in 0-shot and few-shot settings.
76
+ """
77
+
78
+ EVALUATION_QUEUE_TEXT = f"""
79
+ # Evaluation Queue for the πŸ€— Open LLM Leaderboard
80
+ These models will be automatically evaluated on the πŸ€— cluster.
81
+ """
82
+
83
+ CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
84
+ CITATION_BUTTON_TEXT = r"""@misc{open-llm-leaderboard,
85
+ author = {Edward Beeching, Sheon Han, Nathan Lambert, Nazneen Rajani, Omar Sanseviero, Lewis Tunstall, Thomas Wolf},
86
+ title = {Open LLM Leaderboard},
87
+ year = {2023},
88
+ publisher = {Hugging Face},
89
+ howpublished = "\url{https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard}"
90
+
91
+ }
92
+ @software{eval-harness,
93
+ author = {Gao, Leo and
94
+ Tow, Jonathan and
95
+ Biderman, Stella and
96
+ Black, Sid and
97
+ DiPofi, Anthony and
98
+ Foster, Charles and
99
+ Golding, Laurence and
100
+ Hsu, Jeffrey and
101
+ McDonell, Kyle and
102
+ Muennighoff, Niklas and
103
+ Phang, Jason and
104
+ Reynolds, Laria and
105
+ Tang, Eric and
106
+ Thite, Anish and
107
+ Wang, Ben and
108
+ Wang, Kevin and
109
+ Zou, Andy},
110
+ title = {A framework for few-shot language model evaluation},
111
+ month = sep,
112
+ year = 2021,
113
+ publisher = {Zenodo},
114
+ version = {v0.0.1},
115
+ doi = {10.5281/zenodo.5371628},
116
+ url = {https://doi.org/10.5281/zenodo.5371628}
117
+ }
118
+ @misc{clark2018think,
119
+ title={Think you have Solved Question Answering? Try ARC, the AI2 Reasoning Challenge},
120
+ author={Peter Clark and Isaac Cowhey and Oren Etzioni and Tushar Khot and Ashish Sabharwal and Carissa Schoenick and Oyvind Tafjord},
121
+ year={2018},
122
+ eprint={1803.05457},
123
+ archivePrefix={arXiv},
124
+ primaryClass={cs.AI}
125
+ }
126
+ @misc{zellers2019hellaswag,
127
+ title={HellaSwag: Can a Machine Really Finish Your Sentence?},
128
+ author={Rowan Zellers and Ari Holtzman and Yonatan Bisk and Ali Farhadi and Yejin Choi},
129
+ year={2019},
130
+ eprint={1905.07830},
131
+ archivePrefix={arXiv},
132
+ primaryClass={cs.CL}
133
+ }
134
+ @misc{hendrycks2021measuring,
135
+ title={Measuring Massive Multitask Language Understanding},
136
+ author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},
137
+ year={2021},
138
+ eprint={2009.03300},
139
+ archivePrefix={arXiv},
140
+ primaryClass={cs.CY}
141
+ }
142
+ @misc{lin2022truthfulqa,
143
+ title={TruthfulQA: Measuring How Models Mimic Human Falsehoods},
144
+ author={Stephanie Lin and Jacob Hilton and Owain Evans},
145
+ year={2022},
146
+ eprint={2109.07958},
147
+ archivePrefix={arXiv},
148
+ primaryClass={cs.CL}
149
+ }"""