j-tobias commited on
Commit
946726e
·
1 Parent(s): add0ba6

initial commit

Browse files
Files changed (9) hide show
  1. .DS_Store +0 -0
  2. README.md +7 -5
  3. app.py +127 -0
  4. constants.py +115 -0
  5. data.csv +35 -0
  6. init.py +93 -0
  7. old_app.py +159 -0
  8. requirements.txt +61 -0
  9. utils_display.py +48 -0
.DS_Store ADDED
Binary file (8.2 kB). View file
 
README.md CHANGED
@@ -1,12 +1,14 @@
1
  ---
2
  title: Open ASR Leaderboard
3
- emoji: 🐢
4
- colorFrom: green
5
- colorTo: indigo
6
  sdk: gradio
7
- sdk_version: 4.44.0
8
  app_file: app.py
9
- pinned: false
 
 
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
  title: Open ASR Leaderboard
3
+ emoji: 🏆
4
+ colorFrom: red
5
+ colorTo: blue
6
  sdk: gradio
7
+ sdk_version: 4.41.0
8
  app_file: app.py
9
+ pinned: true
10
+ tags:
11
+ - leaderboard
12
  ---
13
 
14
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import pandas as pd
3
+ import json
4
+ from constants import BANNER, INTRODUCTION_TEXT, CITATION_TEXT, METRICS_TAB_TEXT, DIR_OUTPUT_REQUESTS
5
+ from init import is_model_on_hub, upload_file, load_all_info_from_dataset_hub
6
+ from utils_display import AutoEvalColumn, fields, make_clickable_model, styled_error, styled_message
7
+ from datetime import datetime, timezone
8
+ from utils_display import make_best_bold
9
+
10
+ LAST_UPDATED = "Sep 11th 2024"
11
+
12
+ column_names = {
13
+ "MODEL": "Model",
14
+ "Avg. WER": "Average WER ⬇️ ",
15
+ "Avg. RTFx": "RTFx ⬆️ ",
16
+ "AMI WER": "AMI",
17
+ "Earnings22 WER": "Earnings22",
18
+ "Gigaspeech WER": "Gigaspeech",
19
+ "LS Clean WER": "LS Clean",
20
+ "LS Other WER": "LS Other",
21
+ "SPGISpeech WER": "SPGISpeech",
22
+ }
23
+
24
+ original_df = pd.read_csv("data.csv")
25
+ requested_models = []
26
+
27
+ # Formats the columns
28
+ def formatter(x):
29
+ if type(x) is str:
30
+ x = x
31
+ else:
32
+ x = round(x, 2)
33
+ return x
34
+
35
+ def format_df(df:pd.DataFrame):
36
+ for col in df.columns:
37
+ if col == "model":
38
+ df[col] = df[col].apply(lambda x: x.replace(x, make_clickable_model(x)))
39
+ else:
40
+ df[col] = make_best_bold(df[col], col)
41
+ return df
42
+
43
+ original_df = format_df(original_df)
44
+ original_df.rename(columns=column_names, inplace=True)
45
+ original_df.sort_values(by='Average WER ⬇️ ', inplace=True)
46
+
47
+ COLS = [c.name for c in fields(AutoEvalColumn)]
48
+ TYPES = [c.type for c in fields(AutoEvalColumn)]
49
+
50
+ def request_model(model_text, chbcoco2017):
51
+ # ... (keep the existing request_model function as is)
52
+ pass
53
+
54
+ def update_table(column_selection):
55
+ original_df = pd.read_csv("data.csv")
56
+
57
+ if column_selection == "All Columns":
58
+ new_df = original_df
59
+ elif column_selection == "Main Metrics":
60
+ new_df = original_df[["model", "Average WER ⬇️ ", "RTFx ⬆️ "]]
61
+ elif column_selection == "Narrated":
62
+ new_df = original_df[["model", "Average WER ⬇️ ", "RTFx ⬆️ ", "LS Clean", "LS Other", "Gigaspeech"]]
63
+ new_df["Average WER ⬇️ "] = new_df[["LS Clean", "LS Other", "Gigaspeech"]].mean(axis=1).round(2)
64
+ elif column_selection == "Oratory":
65
+ new_df = original_df[["model", "Average WER ⬇️ ", "RTFx ⬆️ ", "Tedlium", "SPGISpeech", "Earnings22"]]
66
+ new_df["Average WER ⬇️ "] = new_df[["Tedlium", "SPGISpeech", "Earnings22"]].mean(axis=1).round(2)
67
+ elif column_selection == "Spontaneous":
68
+ new_df = original_df[["model", "Average WER ⬇️ ", "RTFx ⬆️ ", "Gigaspeech", "SPGISpeech", "Earnings22", "AMI"]]
69
+ new_df["Average WER ⬇️ "] = new_df[["Gigaspeech", "SPGISpeech", "Earnings22", "AMI"]].mean(axis=1).round(2)
70
+
71
+
72
+ new_df = new_df.sort_values(by='Average WER ⬇️ ', ascending=True)
73
+ new_df = format_df(new_df)
74
+
75
+ return new_df
76
+
77
+ with gr.Blocks() as demo:
78
+ gr.HTML(BANNER, elem_id="banner")
79
+ gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
80
+
81
+ with gr.Tabs(elem_classes="tab-buttons") as tabs:
82
+ with gr.TabItem("🏅 Leaderboard", elem_id="od-benchmark-tab-table", id=0):
83
+ leaderboard_table = gr.components.Dataframe(
84
+ value=original_df,
85
+ datatype=TYPES,
86
+ elem_id="leaderboard-table",
87
+ interactive=False,
88
+ visible=True,
89
+ height=500,
90
+ )
91
+ column_radio = gr.Radio(
92
+ ["All Columns", "Main Metrics", "Narrated", "Oratory", "Spontaneous"],
93
+ label="Select columns to display",
94
+ value="All Columns"
95
+ )
96
+ column_radio.change(update_table, inputs=[column_radio], outputs=[leaderboard_table])
97
+
98
+ with gr.TabItem("📈 Metrics", elem_id="od-benchmark-tab-table", id=1):
99
+ gr.Markdown(METRICS_TAB_TEXT, elem_classes="markdown-text")
100
+
101
+ with gr.TabItem("✉️✨ Request a model here!", elem_id="od-benchmark-tab-table", id=2):
102
+ with gr.Column():
103
+ gr.Markdown("# ✉️✨ Request results for a new model here!", elem_classes="markdown-text")
104
+ with gr.Column():
105
+ gr.Markdown("Select a dataset:", elem_classes="markdown-text")
106
+ with gr.Column():
107
+ model_name_textbox = gr.Textbox(label="Model name (user_name/model_name)")
108
+ chb_coco2017 = gr.Checkbox(label="COCO validation 2017 dataset", visible=False, value=True, interactive=False)
109
+ with gr.Column():
110
+ mdw_submission_result = gr.Markdown()
111
+ btn_submitt = gr.Button(value="🚀 Request")
112
+ btn_submitt.click(request_model,
113
+ [model_name_textbox, chb_coco2017],
114
+ mdw_submission_result)
115
+
116
+ gr.Markdown(f"Last updated on **{LAST_UPDATED}**", elem_classes="markdown-text")
117
+
118
+ with gr.Row():
119
+ with gr.Accordion("📙 Citation", open=False):
120
+ gr.Textbox(
121
+ value=CITATION_TEXT, lines=7,
122
+ label="Copy the BibTeX snippet to cite this source",
123
+ elem_id="citation-button",
124
+ show_copy_button=True,
125
+ )
126
+
127
+ demo.launch()
constants.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+
3
+ # Directory where request by models are stored
4
+ DIR_OUTPUT_REQUESTS = Path("requested_models")
5
+ EVAL_REQUESTS_PATH = Path("eval_requests")
6
+
7
+ ##########################
8
+ # Text definitions #
9
+ ##########################
10
+
11
+ banner_url = "https://huggingface.co/datasets/reach-vb/random-images/resolve/main/asr_leaderboard.png"
12
+ BANNER = f'<div style="display: flex; justify-content: space-around;"><img src="{banner_url}" alt="Banner" style="width: 40vw; min-width: 300px; max-width: 600px;"> </div>'
13
+
14
+ TITLE = "<html> <head> <style> h1 {text-align: center;} </style> </head> <body> <h1> 🤗 Open Automatic Speech Recognition Leaderboard </b> </body> </html>"
15
+
16
+ INTRODUCTION_TEXT = "📐 The 🤗 Open ASR Leaderboard ranks and evaluates speech recognition models \
17
+ on the Hugging Face Hub. \
18
+ \nWe report the Average [WER](https://huggingface.co/spaces/evaluate-metric/wer) (⬇️ lower the better) and [RTFx](https://github.com/NVIDIA/DeepLearningExamples/blob/master/Kaldi/SpeechRecognition/README.md#metrics) (⬆️ higher the better). Models are ranked based on their Average WER, from lowest to highest. Check the 📈 Metrics tab to understand how the models are evaluated. \
19
+ \nIf you want results for a model that is not listed here, you can submit a request for it to be included ✉️✨. \
20
+ \nThe leaderboard currently focuses on English speech recognition, and will be expanded to multilingual evaluation in later versions."
21
+
22
+ CITATION_TEXT = """@misc{open-asr-leaderboard,
23
+ title = {Open Automatic Speech Recognition Leaderboard},
24
+ author = {Srivastav, Vaibhav and Majumdar, Somshubra and Koluguri, Nithin and Moumen, Adel and Gandhi, Sanchit and others},
25
+ year = 2023,
26
+ publisher = {Hugging Face},
27
+ howpublished = "\\url{https://huggingface.co/spaces/hf-audio/open_asr_leaderboard}"
28
+ }
29
+ """
30
+
31
+ METRICS_TAB_TEXT = """
32
+ Here you will find details about the speech recognition metrics and datasets reported in our leaderboard.
33
+
34
+ ## Metrics
35
+
36
+ Models are evaluated jointly using the Word Error Rate (WER) and Inverse Real Time Factor (RTFx) metrics. The WER metric
37
+ is used to assess the accuracy of a system, and the RTFx the inference speed. Models are ranked in the leaderboard based
38
+ on their WER, lowest to highest.
39
+
40
+ Crucially, the WER and RTFx values are computed for the same inference run using a single script. The implication of this is two-fold:
41
+ 1. The WER and RTFx values are coupled: for a given WER, one can expect to achieve the corresponding RTFx. This allows the proposer to trade-off lower WER for higher RTFx should they wish.
42
+ 2. The WER and RTFx values are averaged over all audios in the benchmark (in the order of thousands of audios).
43
+
44
+ For details on reproducing the benchmark numbers, refer to the [Open ASR GitHub repository](https://github.com/huggingface/open_asr_leaderboard#evaluate-a-model).
45
+
46
+ ### Word Error Rate (WER)
47
+
48
+ Word Error Rate is used to measure the **accuracy** of automatic speech recognition systems. It calculates the percentage
49
+ of words in the system's output that differ from the reference (correct) transcript. **A lower WER value indicates higher accuracy**.
50
+
51
+ Take the following example:
52
+
53
+ | Reference: | the | cat | sat | on | the | mat |
54
+ |-------------|-----|-----|---------|-----|-----|-----|
55
+ | Prediction: | the | cat | **sit** | on | the | | |
56
+ | Label: | ✅ | ✅ | S | ✅ | ✅ | D |
57
+
58
+ Here, we have:
59
+ * 1 substitution ("sit" instead of "sat")
60
+ * 0 insertions
61
+ * 1 deletion ("mat" is missing)
62
+
63
+ This gives 2 errors in total. To get our word error rate, we divide the total number of errors (substitutions + insertions + deletions) by the total number of words in our
64
+ reference (N), which for this example is 6:
65
+
66
+ ```
67
+ WER = (S + I + D) / N = (1 + 0 + 1) / 6 = 0.333
68
+ ```
69
+
70
+ Giving a WER of 0.33, or 33%. For a fair comparison, we calculate **zero-shot** (i.e. pre-trained models only) *normalised WER* for all the model checkpoints, meaning punctuation and casing is removed from the references and predictions. You can find the evaluation code on our [Github repository](https://github.com/huggingface/open_asr_leaderboard). To read more about how the WER is computed, refer to the [Audio Transformers Course](https://huggingface.co/learn/audio-course/chapter5/evaluation).
71
+
72
+ ### Inverse Real Time Factor (RTFx)
73
+
74
+ Inverse Real Time Factor is a measure of the **latency** of automatic speech recognition systems, i.e. how long it takes an
75
+ model to process a given amount of speech. It is defined as:
76
+ ```
77
+ RTFx = (number of seconds of audio inferred) / (compute time in seconds)
78
+ ```
79
+
80
+ Therefore, and RTFx of 1 means a system processes speech as fast as it's spoken, while an RTFx of 2 means it takes half the time.
81
+ Thus, **a higher RTFx value indicates lower latency**.
82
+
83
+ ## How to reproduce our results
84
+
85
+ The ASR Leaderboard will be a continued effort to benchmark open source/access speech recognition models where possible.
86
+ Along with the Leaderboard we're open-sourcing the codebase used for running these evaluations.
87
+ For more details head over to our repo at: https://github.com/huggingface/open_asr_leaderboard
88
+
89
+ P.S. We'd love to know which other models you'd like us to benchmark next. Contributions are more than welcome! ♥️
90
+
91
+ ## Benchmark datasets
92
+
93
+ Evaluating Speech Recognition systems is a hard problem. We use the multi-dataset benchmarking strategy proposed in the
94
+ [ESB paper](https://arxiv.org/abs/2210.13352) to obtain robust evaluation scores for each model.
95
+
96
+ ESB is a benchmark for evaluating the performance of a single automatic speech recognition (ASR) system across a broad
97
+ set of speech datasets. It comprises eight English speech recognition datasets, capturing a broad range of domains,
98
+ acoustic conditions, speaker styles, and transcription requirements. As such, it gives a better indication of how
99
+ a model is likely to perform on downstream ASR compared to evaluating it on one dataset alone.
100
+
101
+ The ESB score is calculated as a macro-average of the WER scores across the ESB datasets. The models in the leaderboard
102
+ are ranked based on their average WER scores, from lowest to highest.
103
+
104
+ | Dataset | Domain | Speaking Style | Train (h) | Dev (h) | Test (h) | Transcriptions | License |
105
+ |-----------------------------------------------------------------------------------------|-----------------------------|-----------------------|-----------|---------|----------|--------------------|-----------------|
106
+ | [LibriSpeech](https://huggingface.co/datasets/librispeech_asr) | Audiobook | Narrated | 960 | 11 | 11 | Normalised | CC-BY-4.0 |
107
+ | [VoxPopuli](https://huggingface.co/datasets/facebook/voxpopuli) | European Parliament | Oratory | 523 | 5 | 5 | Punctuated | CC0 |
108
+ | [TED-LIUM](https://huggingface.co/datasets/LIUM/tedlium) | TED talks | Oratory | 454 | 2 | 3 | Normalised | CC-BY-NC-ND 3.0 |
109
+ | [GigaSpeech](https://huggingface.co/datasets/speechcolab/gigaspeech) | Audiobook, podcast, YouTube | Narrated, spontaneous | 2500 | 12 | 40 | Punctuated | apache-2.0 |
110
+ | [SPGISpeech](https://huggingface.co/datasets/kensho/spgispeech) | Financial meetings | Oratory, spontaneous | 4900 | 100 | 100 | Punctuated & Cased | User Agreement |
111
+ | [Earnings-22](https://huggingface.co/datasets/revdotcom/earnings22) | Financial meetings | Oratory, spontaneous | 105 | 5 | 5 | Punctuated & Cased | CC-BY-SA-4.0 |
112
+ | [AMI](https://huggingface.co/datasets/edinburghcstr/ami) | Meetings | Spontaneous | 78 | 9 | 9 | Punctuated & Cased | CC-BY-4.0 |
113
+
114
+ For more details on the individual datasets and how models are evaluated to give the ESB score, refer to the [ESB paper](https://arxiv.org/abs/2210.13352).
115
+ """
data.csv ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model,Average WER ⬇️ ,RTFx ⬆️ ,AMI,Earnings22,Gigaspeech,LS Clean,LS Other,SPGISpeech,Tedlium
2
+ nvidia/canary-1b,6.5,235.34,13.9,12.19,10.12,1.48,2.93,2.06,3.56
3
+ nvidia/parakeet-tdt-1.1b,7.01,2390.61,15.87,14.49,9.52,1.4,2.6,3.16,3.59
4
+ nvidia/parakeet-rnnt-1.1b,7.12,2053.15,17.01,13.94,9.89,1.45,2.5,2.93,3.83
5
+ nvidia/parakeet-ctc-1.1b,7.4,2728.52,15.67,13.75,10.28,1.83,3.51,4.02,3.57
6
+ openai/whisper-large-v3,7.44,145.51,15.95,11.29,10.02,2.01,3.91,2.94,3.86
7
+ nvidia/parakeet-rnnt-0.6b,7.5,2815.72,17.4,14.66,10.01,1.62,3.02,3.32,3.85
8
+ distil-whisper/distil-large-v3,7.52,214.42,15.16,11.79,10.08,2.54,5.19,3.27,3.86
9
+ nvidia/parakeet-ctc-0.6b,7.69,4281.53,16.46,14.26,10.39,1.88,3.8,3.89,3.77
10
+ openai/whisper-large-v2,7.83,144.45,16.74,12.05,10.67,2.83,5.14,3.87,3.9
11
+ distil-whisper/distil-large-v2,7.92,202.95,14.67,12.19,10.32,2.94,6.84,3.3,4.87
12
+ openai/whisper-large,7.94,143.76,16.73,12.91,10.76,2.73,5.54,3.2,3.91
13
+ openai/whisper-medium.en,8.09,182.13,16.68,12.63,11.03,3.02,5.85,3.33,4.12
14
+ nvidia/stt_en_conformer_ctc_large,8.32,4295.01,15.95,15.99,11.6,2.05,4.15,5.57,4.41
15
+ distil-whisper/distil-small.en,8.57,331.89,16.16,13.15,10.87,3.48,7.73,3.82,4.54
16
+ openai/whisper-small.en,8.59,268.91,17.93,12.97,11.35,3.05,7.25,3.6,4.07
17
+ distil-whisper/distil-medium.en,8.77,279.73,16.12,12.99,11.3,3.69,8.35,3.83,4.84
18
+ nvidia/stt_en_fastconformer_ctc_large,8.96,6399.25,18.61,18.81,12.17,1.93,4.04,5.06,4.76
19
+ nvidia/stt_en_fastconformer_transducer_large,9.06,4097.43,19.09,19.41,12.31,1.8,3.97,4.97,4.46
20
+ stt_en_conformer_transducer_small,10.26,3714.36,20.28,18.13,13.7,2.77,6.47,6.63,6.21
21
+ openai/whisper-base.en,10.32,320.67,21.13,15.09,12.83,4.25,10.35,4.26,4.87
22
+ nvidia/stt_en_conformer_ctc_small,11.16,5686.9,20.43,18.84,14.46,3.59,7.92,7.8,7.16
23
+ openai/whisper-tiny.en,12.81,348.12,24.24,19.12,14.08,5.66,15.45,5.93,5.97
24
+ speechbrain/asr-wav2vec2-librispeech,14.35,451.18,32.05,28.52,16.92,1.77,3.83,10.39,7.58
25
+ facebook/wav2vec2-large-960h-lv60-self,21.27,509.32,36.77,31.68,23.94,11.13,12.42,17.94,14.88
26
+ facebook/mms-1b-all,22.54,230.79,42.02,31.17,26.44,12.63,15.99,16.95,17.48
27
+ facebook/hubert-xlarge-ls960-ft,22.55,361.32,39.11,36.13,24.74,11.3,12.22,18.58,15.83
28
+ facebook/hubert-large-ls960-ft,22.69,495.86,39.72,35.24,25.01,11.35,12.75,18.86,15.92
29
+ facebook/wav2vec2-large-robust-ft-libri-960h,22.93,503.81,37.75,36.22,25.12,11.84,13.76,19.03,16.46
30
+ facebook/data2vec-audio-large-960h,23.21,470.15,40.51,37.82,24.8,11.4,12.94,18.49,15.86
31
+ facebook/wav2vec2-conformer-rope-large-960h-ft,23.28,607.87,42.47,37.52,25.0,11.34,12.54,18.87,15.92
32
+ facebook/wav2vec2-conformer-rel-pos-large-960h-ft,23.29,522.46,42.39,38.33,24.96,11.2,12.44,18.85,15.77
33
+ facebook/wav2vec2-large-960h,26.77,516.58,42.66,43.75,27.74,12.81,15.46,22.82,18.85
34
+ facebook/data2vec-audio-base-960h,28.3,648.14,47.27,49.56,29.78,12.13,15.48,25.46,19.49
35
+ facebook/wav2vec2-base-960h,29.4,686.0,45.56,48.47,30.85,12.53,16.72,27.56,21.05
init.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from constants import EVAL_REQUESTS_PATH
3
+ from pathlib import Path
4
+ from huggingface_hub import HfApi, Repository
5
+
6
+ TOKEN_HUB = os.environ.get("TOKEN_HUB", None)
7
+ QUEUE_REPO = os.environ.get("QUEUE_REPO", None)
8
+ QUEUE_PATH = os.environ.get("QUEUE_PATH", None)
9
+
10
+ hf_api = HfApi(
11
+ endpoint="https://huggingface.co",
12
+ token=TOKEN_HUB,
13
+ )
14
+
15
+ def load_all_info_from_dataset_hub():
16
+ eval_queue_repo = None
17
+ requested_models = None
18
+
19
+ passed = True
20
+ if TOKEN_HUB is None:
21
+ passed = False
22
+ else:
23
+ print("Pulling evaluation requests and results.")
24
+
25
+ eval_queue_repo = Repository(
26
+ local_dir=QUEUE_PATH,
27
+ clone_from=QUEUE_REPO,
28
+ use_auth_token=TOKEN_HUB,
29
+ repo_type="dataset",
30
+ )
31
+ eval_queue_repo.git_pull()
32
+
33
+ # Local directory where dataset repo is cloned + folder with eval requests
34
+ directory = QUEUE_PATH / EVAL_REQUESTS_PATH
35
+ requested_models = get_all_requested_models(directory)
36
+ requested_models = [p.stem for p in requested_models]
37
+ # Local directory where dataset repo is cloned
38
+ csv_results = get_csv_with_results(QUEUE_PATH)
39
+ if csv_results is None:
40
+ passed = False
41
+ if not passed:
42
+ raise ValueError("No Hugging Face token provided. Skipping evaluation requests and results.")
43
+
44
+ return eval_queue_repo, requested_models, csv_results
45
+
46
+ def upload_file(requested_model_name, path_or_fileobj):
47
+ dest_repo_file = Path(EVAL_REQUESTS_PATH) / path_or_fileobj.name
48
+ dest_repo_file = str(dest_repo_file)
49
+ hf_api.upload_file(
50
+ path_or_fileobj=path_or_fileobj,
51
+ path_in_repo=str(dest_repo_file),
52
+ repo_id=QUEUE_REPO,
53
+ token=TOKEN_HUB,
54
+ repo_type="dataset",
55
+ commit_message=f"Add {requested_model_name} to eval queue")
56
+
57
+ def get_all_requested_models(directory):
58
+ directory = Path(directory)
59
+ all_requested_models = list(directory.glob("*.txt"))
60
+ return all_requested_models
61
+
62
+ def get_csv_with_results(directory):
63
+ directory = Path(directory)
64
+ all_csv_files = list(directory.glob("*.csv"))
65
+ latest = [f for f in all_csv_files if f.stem.endswith("latest")]
66
+ if len(latest) != 1:
67
+ return None
68
+ return latest[0]
69
+
70
+ def is_model_on_hub(model_name, revision="main") -> bool:
71
+
72
+ try:
73
+ model_name = model_name.replace(" ","")
74
+ author = model_name.split("/")[0]
75
+ model_id = model_name.split("/")[1]
76
+ if len(author) == 0 or len(model_id) == 0:
77
+ return False, "is not a valid model name. Please use the format `author/model_name`."
78
+ except Exception as e:
79
+ return False, "is not a valid model name. Please use the format `author/model_name`."
80
+
81
+ try:
82
+ models = list(hf_api.list_models(author=author, search=model_id))
83
+ matched = [model_name for m in models if m.modelId == model_name]
84
+ if len(matched) != 1:
85
+ return False, "was not found on the hub!"
86
+ else:
87
+ return True, None
88
+ except Exception as e:
89
+ print(f"Could not get the model from the hub.: {e}")
90
+ return False, "was not found on hub!"
91
+
92
+
93
+
old_app.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import pandas as pd
3
+ import json
4
+ from constants import BANNER, INTRODUCTION_TEXT, CITATION_TEXT, METRICS_TAB_TEXT, DIR_OUTPUT_REQUESTS
5
+ from init import is_model_on_hub, upload_file, load_all_info_from_dataset_hub
6
+ from utils_display import AutoEvalColumn, fields, make_clickable_model, styled_error, styled_message
7
+ from datetime import datetime, timezone
8
+
9
+
10
+
11
+ # imports for updated version
12
+ from utils_display import make_best_bold
13
+
14
+
15
+
16
+
17
+
18
+ LAST_UPDATED = "Sep 11th 2024"
19
+
20
+ column_names = {
21
+ "MODEL": "Model",
22
+ "Avg. WER": "Average WER ⬇️",
23
+ "Avg. RTFx": "RTFx ⬆️️",
24
+ "AMI WER": "AMI",
25
+ "Earnings22 WER": "Earnings22",
26
+ "Gigaspeech WER": "Gigaspeech",
27
+ "LS Clean WER": "LS Clean",
28
+ "LS Other WER": "LS Other",
29
+ "SPGISpeech WER": "SPGISpeech",
30
+ # "Tedlium WER": "Tedlium" - just changed for locally running version
31
+ }
32
+
33
+ # eval_queue_repo, requested_models, csv_results = load_all_info_from_dataset_hub()
34
+
35
+ # if not csv_results.exists():
36
+ # raise Exception(f"CSV file {csv_results} does not exist locally")
37
+
38
+ # Get csv with data and parse columns
39
+ original_df = pd.read_csv("data.csv")
40
+
41
+ requested_models = []
42
+
43
+ # Formats the columns
44
+ def formatter(x):
45
+ if type(x) is str:
46
+ x = x
47
+ else:
48
+ x = round(x, 2)
49
+ return x
50
+
51
+ for col in original_df.columns:
52
+ if col == "model":
53
+ original_df[col] = original_df[col].apply(lambda x: x.replace(x, make_clickable_model(x)))
54
+ else:
55
+ original_df[col] = make_best_bold(original_df[col], col) # For numerical values
56
+
57
+
58
+ original_df.rename(columns=column_names, inplace=True)
59
+ original_df.sort_values(by='Average WER ⬇️', inplace=True)
60
+
61
+ COLS = [c.name for c in fields(AutoEvalColumn)]
62
+ TYPES = [c.type for c in fields(AutoEvalColumn)]
63
+
64
+
65
+ def request_model(model_text, chbcoco2017):
66
+
67
+ # Determine the selected checkboxes
68
+ dataset_selection = []
69
+ if chbcoco2017:
70
+ dataset_selection.append("ESB Datasets tests only")
71
+
72
+ if len(dataset_selection) == 0:
73
+ return styled_error("You need to select at least one dataset")
74
+
75
+ base_model_on_hub, error_msg = is_model_on_hub(model_text)
76
+
77
+ if not base_model_on_hub:
78
+ return styled_error(f"Base model '{model_text}' {error_msg}")
79
+
80
+ # Construct the output dictionary
81
+ current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
82
+ required_datasets = ', '.join(dataset_selection)
83
+ eval_entry = {
84
+ "date": current_time,
85
+ "model": model_text,
86
+ "datasets_selected": required_datasets
87
+ }
88
+
89
+ # Prepare file path
90
+ DIR_OUTPUT_REQUESTS.mkdir(parents=True, exist_ok=True)
91
+
92
+ fn_datasets = '@ '.join(dataset_selection)
93
+ filename = model_text.replace("/","@") + "@@" + fn_datasets
94
+ if filename in requested_models:
95
+ return styled_error(f"A request for this model '{model_text}' and dataset(s) was already made.")
96
+ try:
97
+ filename_ext = filename + ".txt"
98
+ out_filepath = DIR_OUTPUT_REQUESTS / filename_ext
99
+
100
+ # Write the results to a text file
101
+ with open(out_filepath, "w") as f:
102
+ f.write(json.dumps(eval_entry))
103
+
104
+ upload_file(filename, out_filepath)
105
+
106
+ # Include file in the list of uploaded files
107
+ requested_models.append(filename)
108
+
109
+ # Remove the local file
110
+ out_filepath.unlink()
111
+
112
+ return styled_message("🤗 Your request has been submitted and will be evaluated soon!</p>")
113
+ except Exception as e:
114
+ return styled_error(f"Error submitting request!")
115
+
116
+ with gr.Blocks() as demo:
117
+ gr.HTML(BANNER, elem_id="banner")
118
+ gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
119
+
120
+ with gr.Tabs(elem_classes="tab-buttons") as tabs:
121
+ with gr.TabItem("🏅 Leaderboard", elem_id="od-benchmark-tab-table", id=0):
122
+ leaderboard_table = gr.components.Dataframe(
123
+ value=original_df,
124
+ datatype=TYPES,
125
+ elem_id="leaderboard-table",
126
+ interactive=False,
127
+ visible=True,
128
+ )
129
+
130
+ with gr.TabItem("📈 Metrics", elem_id="od-benchmark-tab-table", id=1):
131
+ gr.Markdown(METRICS_TAB_TEXT, elem_classes="markdown-text")
132
+
133
+ with gr.TabItem("✉️✨ Request a model here!", elem_id="od-benchmark-tab-table", id=2):
134
+ with gr.Column():
135
+ gr.Markdown("# ✉️✨ Request results for a new model here!", elem_classes="markdown-text")
136
+ with gr.Column():
137
+ gr.Markdown("Select a dataset:", elem_classes="markdown-text")
138
+ with gr.Column():
139
+ model_name_textbox = gr.Textbox(label="Model name (user_name/model_name)")
140
+ chb_coco2017 = gr.Checkbox(label="COCO validation 2017 dataset", visible=False, value=True, interactive=False)
141
+ with gr.Column():
142
+ mdw_submission_result = gr.Markdown()
143
+ btn_submitt = gr.Button(value="🚀 Request")
144
+ btn_submitt.click(request_model,
145
+ [model_name_textbox, chb_coco2017],
146
+ mdw_submission_result)
147
+
148
+ gr.Markdown(f"Last updated on **{LAST_UPDATED}**", elem_classes="markdown-text")
149
+
150
+ with gr.Row():
151
+ with gr.Accordion("📙 Citation", open=False):
152
+ gr.Textbox(
153
+ value=CITATION_TEXT, lines=7,
154
+ label="Copy the BibTeX snippet to cite this source",
155
+ elem_id="citation-button",
156
+ show_copy_button=True,
157
+ )
158
+
159
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ aiohttp==3.8.4
2
+ aiosignal==1.3.1
3
+ async-timeout==4.0.2
4
+ attrs==23.1.0
5
+ certifi==2023.7.22
6
+ charset-normalizer==3.2.0
7
+ cmake==3.26.4
8
+ contourpy==1.1.0
9
+ Cython==3.0.0
10
+ datasets==2.13.1
11
+ dill==0.3.6
12
+ filelock==3.12.2
13
+ fonttools==4.40.0
14
+ frozenlist==1.4.0
15
+ fsspec==2023.6.0
16
+ huggingface-hub==0.16.4
17
+ idna==3.4
18
+ Jinja2==3.1.2
19
+ kiwisolver==1.4.4
20
+ lit==16.0.6
21
+ MarkupSafe==2.1.3
22
+ matplotlib==3.7.2
23
+ mpmath==1.3.0
24
+ multidict==6.0.4
25
+ multiprocess==0.70.14
26
+ networkx==3.1
27
+ numpy==1.25.2
28
+ nvidia-cublas-cu11==11.10.3.66
29
+ nvidia-cuda-cupti-cu11==11.7.101
30
+ nvidia-cuda-nvrtc-cu11==11.7.99
31
+ nvidia-cuda-runtime-cu11==11.7.99
32
+ nvidia-cudnn-cu11==8.5.0.96
33
+ nvidia-cufft-cu11==10.9.0.58
34
+ nvidia-curand-cu11==10.2.10.91
35
+ nvidia-cusolver-cu11==11.4.0.1
36
+ nvidia-cusparse-cu11==11.7.4.91
37
+ nvidia-nccl-cu11==2.14.3
38
+ nvidia-nvtx-cu11==11.7.91
39
+ packaging==23.1
40
+ pandas==2.0.3
41
+ Pillow==10.0.0
42
+ pyarrow==12.0.1
43
+ python-dateutil==2.8.2
44
+ pytz==2023.3
45
+ PyYAML==6.0.1
46
+ regex==2023.6.3
47
+ requests==2.31.0
48
+ responses==0.18.0
49
+ safetensors==0.3.1
50
+ six==1.16.0
51
+ sympy==1.12
52
+ tokenizers==0.13.3
53
+ torch==2.0.1
54
+ torchvision==0.15.2
55
+ tqdm==4.65.0
56
+ triton==2.0.0
57
+ typing_extensions==4.7.1
58
+ tzdata==2023.3
59
+ urllib3==2.0.4
60
+ xxhash==3.2.0
61
+ yarl==1.9.2
utils_display.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+
3
+ # These classes are for user facing column names, to avoid having to change them
4
+ # all around the code when a modif is needed
5
+ @dataclass
6
+ class ColumnContent:
7
+ name: str
8
+ type: str
9
+
10
+ def fields(raw_class):
11
+ return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
12
+
13
+ @dataclass(frozen=True)
14
+ class AutoEvalColumn: # Auto evals column
15
+ model = ColumnContent("Model", "markdown")
16
+ avg_wer = ColumnContent("Average WER ⬇️ ", "markdown")
17
+ rtf = ColumnContent("RTFx ⬆️ ", "markdown")
18
+ ami_wer = ColumnContent("AMI", "markdown")
19
+ e22_wer = ColumnContent("Earnings22", "markdown")
20
+ gs_wer = ColumnContent("Gigaspeech", "markdown")
21
+ lsc_wer = ColumnContent("LS Clean", "markdown")
22
+ lso_wer = ColumnContent("LS Other", "markdown")
23
+ ss_wer = ColumnContent("SPGISpeech", "markdown")
24
+ tl_wer = ColumnContent("Tedlium", "markdown")
25
+ # vp_wer = ColumnContent("Voxpopuli", "markdown")
26
+
27
+
28
+ def make_clickable_model(model_name):
29
+ link = f"https://huggingface.co/{model_name}"
30
+ return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
31
+
32
+ def styled_error(error):
33
+ return f"<p style='color: red; font-size: 20px; text-align: center;'>{error}</p>"
34
+
35
+ def styled_warning(warn):
36
+ return f"<p style='color: orange; font-size: 20px; text-align: center;'>{warn}</p>"
37
+
38
+ def styled_message(message):
39
+ return f"<p style='color: green; font-size: 20px; text-align: center;'>{message}</p>"
40
+
41
+ def make_best_bold(column, column_name):
42
+ if column_name == "RTFx ⬆️ ":
43
+ best_value = column.max()
44
+ else:
45
+ best_value = column.min()
46
+ return column.apply(lambda x: f'#### <span style="color: red;">{float(x):.2f}</span>' if x == best_value else f"{x:.2f}")
47
+
48
+