wasertech reach-vb HF staff commited on
Commit
5b56cf9
0 Parent(s):

Duplicate from hf-audio/open_asr_leaderboard

Browse files

Co-authored-by: Vaibhav Srivastav <reach-vb@users.noreply.huggingface.co>

Files changed (9) hide show
  1. .DS_Store +0 -0
  2. .gitattributes +35 -0
  3. README.md +12 -0
  4. app.py +145 -0
  5. assets/.DS_Store +0 -0
  6. constants.py +98 -0
  7. init.py +93 -0
  8. requirements.txt +61 -0
  9. utils_display.py +40 -0
.DS_Store ADDED
Binary file (6.15 kB). View file
 
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Open ASR Leaderboard
3
+ emoji: 🏆
4
+ colorFrom: red
5
+ colorTo: blue
6
+ sdk: gradio
7
+ sdk_version: 3.42.0
8
+ app_file: app.py
9
+ duplicated_from: hf-audio/open_asr_leaderboard
10
+ ---
11
+
12
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import pandas as pd
3
+ import json
4
+ from constants import BANNER, INTRODUCTION_TEXT, CITATION_TEXT, METRICS_TAB_TEXT, DIR_OUTPUT_REQUESTS
5
+ from init import is_model_on_hub, upload_file, load_all_info_from_dataset_hub
6
+ from utils_display import AutoEvalColumn, fields, make_clickable_model, styled_error, styled_message
7
+ from datetime import datetime, timezone
8
+
9
+ LAST_UPDATED = "Sep 7th 2023"
10
+
11
+ column_names = {
12
+ "MODEL": "Model",
13
+ "Avg. WER": "Average WER ⬇️",
14
+ "RTF": "RTF (1e-3) ⬇️",
15
+ "AMI WER": "AMI",
16
+ "Earnings22 WER": "Earnings22",
17
+ "Gigaspeech WER": "Gigaspeech",
18
+ "LS Clean WER": "LS Clean",
19
+ "LS Other WER": "LS Other",
20
+ "SPGISpeech WER": "SPGISpeech",
21
+ "Tedlium WER": "Tedlium",
22
+ "Voxpopuli WER": "Voxpopuli",
23
+ "Common Voice WER": "Common Voice"}
24
+
25
+ eval_queue_repo, requested_models, csv_results = load_all_info_from_dataset_hub()
26
+
27
+ if not csv_results.exists():
28
+ raise Exception(f"CSV file {csv_results} does not exist locally")
29
+
30
+ # Get csv with data and parse columns
31
+ original_df = pd.read_csv(csv_results)
32
+
33
+ # Formats the columns
34
+ def formatter(x):
35
+ x = round(x, 2)
36
+ return x
37
+
38
+ for col in original_df.columns:
39
+ if col == "model":
40
+ original_df[col] = original_df[col].apply(lambda x: x.replace(x, make_clickable_model(x)))
41
+ else:
42
+ original_df[col] = original_df[col].apply(formatter) # For numerical values
43
+
44
+ original_df.rename(columns=column_names, inplace=True)
45
+ original_df.sort_values(by='Average WER ⬇️', inplace=True)
46
+
47
+ COLS = [c.name for c in fields(AutoEvalColumn)]
48
+ TYPES = [c.type for c in fields(AutoEvalColumn)]
49
+
50
+
51
+ def request_model(model_text, chbcoco2017):
52
+
53
+ # Determine the selected checkboxes
54
+ dataset_selection = []
55
+ if chbcoco2017:
56
+ dataset_selection.append("ESB Datasets tests only")
57
+
58
+ if len(dataset_selection) == 0:
59
+ return styled_error("You need to select at least one dataset")
60
+
61
+ base_model_on_hub, error_msg = is_model_on_hub(model_text)
62
+
63
+ if not base_model_on_hub:
64
+ return styled_error(f"Base model '{model_text}' {error_msg}")
65
+
66
+ # Construct the output dictionary
67
+ current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
68
+ required_datasets = ', '.join(dataset_selection)
69
+ eval_entry = {
70
+ "date": current_time,
71
+ "model": model_text,
72
+ "datasets_selected": required_datasets
73
+ }
74
+
75
+ # Prepare file path
76
+ DIR_OUTPUT_REQUESTS.mkdir(parents=True, exist_ok=True)
77
+
78
+ fn_datasets = '@ '.join(dataset_selection)
79
+ filename = model_text.replace("/","@") + "@@" + fn_datasets
80
+ if filename in requested_models:
81
+ return styled_error(f"A request for this model '{model_text}' and dataset(s) was already made.")
82
+ try:
83
+ filename_ext = filename + ".txt"
84
+ out_filepath = DIR_OUTPUT_REQUESTS / filename_ext
85
+
86
+ # Write the results to a text file
87
+ with open(out_filepath, "w") as f:
88
+ f.write(json.dumps(eval_entry))
89
+
90
+ upload_file(filename, out_filepath)
91
+
92
+ # Include file in the list of uploaded files
93
+ requested_models.append(filename)
94
+
95
+ # Remove the local file
96
+ out_filepath.unlink()
97
+
98
+ return styled_message("🤗 Your request has been submitted and will be evaluated soon!</p>")
99
+ except Exception as e:
100
+ return styled_error(f"Error submitting request!")
101
+
102
+ with gr.Blocks() as demo:
103
+ gr.HTML(BANNER, elem_id="banner")
104
+ gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
105
+
106
+ with gr.Tabs(elem_classes="tab-buttons") as tabs:
107
+ with gr.TabItem("🏅 Leaderboard", elem_id="od-benchmark-tab-table", id=0):
108
+ leaderboard_table = gr.components.Dataframe(
109
+ value=original_df,
110
+ datatype=TYPES,
111
+ max_rows=None,
112
+ elem_id="leaderboard-table",
113
+ interactive=False,
114
+ visible=True,
115
+ )
116
+
117
+ with gr.TabItem("📈 Metrics", elem_id="od-benchmark-tab-table", id=1):
118
+ gr.Markdown(METRICS_TAB_TEXT, elem_classes="markdown-text")
119
+
120
+ with gr.TabItem("✉️✨ Request a model here!", elem_id="od-benchmark-tab-table", id=2):
121
+ with gr.Column():
122
+ gr.Markdown("# ✉️✨ Request results for a new model here!", elem_classes="markdown-text")
123
+ with gr.Column():
124
+ gr.Markdown("Select a dataset:", elem_classes="markdown-text")
125
+ with gr.Column():
126
+ model_name_textbox = gr.Textbox(label="Model name (user_name/model_name)")
127
+ chb_coco2017 = gr.Checkbox(label="COCO validation 2017 dataset", visible=False, value=True, interactive=False)
128
+ with gr.Column():
129
+ mdw_submission_result = gr.Markdown()
130
+ btn_submitt = gr.Button(value="🚀 Request")
131
+ btn_submitt.click(request_model,
132
+ [model_name_textbox, chb_coco2017],
133
+ mdw_submission_result)
134
+
135
+ gr.Markdown(f"Last updated on **{LAST_UPDATED}**", elem_classes="markdown-text")
136
+
137
+ with gr.Row():
138
+ with gr.Accordion("📙 Citation", open=False):
139
+ gr.Textbox(
140
+ value=CITATION_TEXT, lines=7,
141
+ label="Copy the BibTeX snippet to cite this source",
142
+ elem_id="citation-button",
143
+ ).style(show_copy_button=True)
144
+
145
+ demo.launch()
assets/.DS_Store ADDED
Binary file (6.15 kB). View file
 
constants.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+
3
+ # Directory where request by models are stored
4
+ DIR_OUTPUT_REQUESTS = Path("requested_models")
5
+ EVAL_REQUESTS_PATH = Path("eval_requests")
6
+
7
+ ##########################
8
+ # Text definitions #
9
+ ##########################
10
+
11
+ banner_url = "https://huggingface.co/datasets/reach-vb/random-images/resolve/main/asr_leaderboard.png"
12
+ BANNER = f'<div style="display: flex; justify-content: space-around;"><img src="{banner_url}" alt="Banner" style="width: 40vw; min-width: 300px; max-width: 600px;"> </div>'
13
+
14
+ TITLE = "<html> <head> <style> h1 {text-align: center;} </style> </head> <body> <h1> 🤗 Open Automatic Speech Recognition Leaderboard </b> </body> </html>"
15
+
16
+ INTRODUCTION_TEXT = "📐 The 🤗 Open ASR Leaderboard ranks and evaluates speech recognition models \
17
+ on the Hugging Face Hub. \
18
+ \nWe report the Average [WER](https://huggingface.co/spaces/evaluate-metric/wer) (⬇️) and [RTF](https://openvoice-tech.net/index.php/Real-time-factor) (⬇️) - lower the better. Models are ranked based on their Average WER, from lowest to highest. Check the 📈 Metrics tab to understand how the models are evaluated. \
19
+ \nIf you want results for a model that is not listed here, you can submit a request for it to be included ✉️✨. \
20
+ \nThe leaderboard currently focuses on English speech recognition, and will be expanded to multilingual evaluation in later versions."
21
+
22
+ CITATION_TEXT = """@misc{open-asr-leaderboard,
23
+ title = {Open Automatic Speech Recognition Leaderboard},
24
+ author = {Srivastav, Vaibhav and Majumdar, Somshubra and Koluguri, Nithin and Moumen, Adel and Gandhi, Sanchit and Hugging Face Team and Nvidia NeMo Team and SpeechBrain Team},
25
+ year = 2023,
26
+ publisher = {Hugging Face},
27
+ howpublished = "\\url{https://huggingface.co/spaces/huggingface.co/spaces/open-asr-leaderboard/leaderboard}"
28
+ }
29
+ """
30
+
31
+ METRICS_TAB_TEXT = """
32
+ Here you will find details about the speech recognition metrics and datasets reported in our leaderboard.
33
+
34
+ ## Metrics
35
+
36
+ 🎯 Word Error Rate (WER) and Real-Time Factor (RTF) are popular metrics for evaluating the accuracy of speech recognition
37
+ models by estimating how accurate the predictions from the models are and how fast they are returned. We explain them each
38
+ below.
39
+
40
+ ### Word Error Rate (WER)
41
+
42
+ Word Error Rate is used to measure the **accuracy** of automatic speech recognition systems. It calculates the percentage
43
+ of words in the system's output that differ from the reference (correct) transcript. **A lower WER value indicates higher accuracy**.
44
+
45
+ ```
46
+ Example: If the reference transcript is "I really love cats," and the ASR system outputs "I don't love dogs,".
47
+ The WER would be `50%` because 2 out of 4 words are incorrect.
48
+ ```
49
+
50
+ For a fair comparison, we calculate **zero-shot** (i.e. pre-trained models only) *normalised WER* for all the model checkpoints. You can find the evaluation code on our [Github repository](https://github.com/huggingface/open_asr_leaderboard). To read more about how the WER is computed, refer to the [Audio Transformers Course](https://huggingface.co/learn/audio-course/chapter5/evaluation).
51
+
52
+ ### Real Time Factor (RTF)
53
+
54
+ Real Time Factor is a measure of the **latency** of automatic speech recognition systems, i.e. how long it takes an
55
+ model to process a given amount of speech. It's usually expressed as a multiple of real time. An RTF of 1 means it processes
56
+ speech as fast as it's spoken, while an RTF of 2 means it takes twice as long. Thus, **a lower RTF value indicates lower latency**.
57
+
58
+ ```
59
+ Example: If it takes an ASR system 10 seconds to transcribe 10 seconds of speech, the RTF is 1.
60
+ If it takes 20 seconds to transcribe the same 10 seconds of speech, the RTF is 2.
61
+ ```
62
+
63
+ For the benchmark, we report RTF averaged over a 10 minute audio sample with 5 warm up batches followed 3 graded batches.
64
+
65
+ ## How to reproduce our results
66
+
67
+ The ASR Leaderboard will be a continued effort to benchmark open source/access speech recognition models where possible.
68
+ Along with the Leaderboard we're open-sourcing the codebase used for running these evaluations.
69
+ For more details head over to our repo at: https://github.com/huggingface/open_asr_leaderboard
70
+
71
+ P.S. We'd love to know which other models you'd like us to benchmark next. Contributions are more than welcome! ♥️
72
+
73
+ ## Benchmark datasets
74
+
75
+ Evaluating Speech Recognition systems is a hard problem. We use the multi-dataset benchmarking strategy proposed in the
76
+ [ESB paper](https://arxiv.org/abs/2210.13352) to obtain robust evaluation scores for each model.
77
+
78
+ ESB is a benchmark for evaluating the performance of a single automatic speech recognition (ASR) system across a broad
79
+ set of speech datasets. It comprises eight English speech recognition datasets, capturing a broad range of domains,
80
+ acoustic conditions, speaker styles, and transcription requirements. As such, it gives a better indication of how
81
+ a model is likely to perform on downstream ASR compared to evaluating it on one dataset alone.
82
+
83
+ The ESB score is calculated as a macro-average of the WER scores across the ESB datasets. The models in the leaderboard
84
+ are ranked based on their average WER scores, from lowest to highest.
85
+
86
+ | Dataset | Domain | Speaking Style | Train (h) | Dev (h) | Test (h) | Transcriptions | License |
87
+ |-----------------------------------------------------------------------------------------|-----------------------------|-----------------------|-----------|---------|----------|--------------------|-----------------|
88
+ | [LibriSpeech](https://huggingface.co/datasets/librispeech_asr) | Audiobook | Narrated | 960 | 11 | 11 | Normalised | CC-BY-4.0 |
89
+ | [Common Voice 9](https://huggingface.co/datasets/mozilla-foundation/common_voice_9_0) | Wikipedia | Narrated | 1409 | 27 | 27 | Punctuated & Cased | CC0-1.0 |
90
+ | [VoxPopuli](https://huggingface.co/datasets/facebook/voxpopuli) | European Parliament | Oratory | 523 | 5 | 5 | Punctuated | CC0 |
91
+ | [TED-LIUM](https://huggingface.co/datasets/LIUM/tedlium) | TED talks | Oratory | 454 | 2 | 3 | Normalised | CC-BY-NC-ND 3.0 |
92
+ | [GigaSpeech](https://huggingface.co/datasets/speechcolab/gigaspeech) | Audiobook, podcast, YouTube | Narrated, spontaneous | 2500 | 12 | 40 | Punctuated | apache-2.0 |
93
+ | [SPGISpeech](https://huggingface.co/datasets/kensho/spgispeech) | Fincancial meetings | Oratory, spontaneous | 4900 | 100 | 100 | Punctuated & Cased | User Agreement |
94
+ | [Earnings-22](https://huggingface.co/datasets/revdotcom/earnings22) | Fincancial meetings | Oratory, spontaneous | 105 | 5 | 5 | Punctuated & Cased | CC-BY-SA-4.0 |
95
+ | [AMI](https://huggingface.co/datasets/edinburghcstr/ami) | Meetings | Spontaneous | 78 | 9 | 9 | Punctuated & Cased | CC-BY-4.0 |
96
+
97
+ For more details on the individual datasets and how models are evaluated to give the ESB score, refer to the [ESB paper](https://arxiv.org/abs/2210.13352).
98
+ """
init.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from constants import EVAL_REQUESTS_PATH
3
+ from pathlib import Path
4
+ from huggingface_hub import HfApi, Repository
5
+
6
+ TOKEN_HUB = os.environ.get("TOKEN_HUB", None)
7
+ QUEUE_REPO = os.environ.get("QUEUE_REPO", None)
8
+ QUEUE_PATH = os.environ.get("QUEUE_PATH", None)
9
+
10
+ hf_api = HfApi(
11
+ endpoint="https://huggingface.co",
12
+ token=TOKEN_HUB,
13
+ )
14
+
15
+ def load_all_info_from_dataset_hub():
16
+ eval_queue_repo = None
17
+ results_csv_path = None
18
+ requested_models = None
19
+
20
+ passed = True
21
+ if TOKEN_HUB is None:
22
+ passed = False
23
+ else:
24
+ print("Pulling evaluation requests and results.")
25
+
26
+ eval_queue_repo = Repository(
27
+ local_dir=QUEUE_PATH,
28
+ clone_from=QUEUE_REPO,
29
+ use_auth_token=TOKEN_HUB,
30
+ repo_type="dataset",
31
+ )
32
+ eval_queue_repo.git_pull()
33
+
34
+ # Local directory where dataset repo is cloned + folder with eval requests
35
+ directory = QUEUE_PATH / EVAL_REQUESTS_PATH
36
+ requested_models = get_all_requested_models(directory)
37
+ requested_models = [p.stem for p in requested_models]
38
+ # Local directory where dataset repo is cloned
39
+ csv_results = get_csv_with_results(QUEUE_PATH)
40
+ if csv_results is None:
41
+ passed = False
42
+ if not passed:
43
+ print("No HuggingFace token provided. Skipping evaluation requests and results.")
44
+
45
+ return eval_queue_repo, requested_models, csv_results
46
+
47
+
48
+ def upload_file(requested_model_name, path_or_fileobj):
49
+ dest_repo_file = Path(EVAL_REQUESTS_PATH) / path_or_fileobj.name
50
+ dest_repo_file = str(dest_repo_file)
51
+ hf_api.upload_file(
52
+ path_or_fileobj=path_or_fileobj,
53
+ path_in_repo=str(dest_repo_file),
54
+ repo_id=QUEUE_REPO,
55
+ token=TOKEN_HUB,
56
+ repo_type="dataset",
57
+ commit_message=f"Add {requested_model_name} to eval queue")
58
+
59
+ def get_all_requested_models(directory):
60
+ directory = Path(directory)
61
+ all_requested_models = list(directory.glob("*.txt"))
62
+ return all_requested_models
63
+
64
+ def get_csv_with_results(directory):
65
+ directory = Path(directory)
66
+ all_csv_files = list(directory.glob("*.csv"))
67
+ latest = [f for f in all_csv_files if f.stem.endswith("latest")]
68
+ if len(latest) != 1:
69
+ return None
70
+ return latest[0]
71
+
72
+
73
+
74
+ def is_model_on_hub(model_name, revision="main") -> bool:
75
+ try:
76
+ model_name = model_name.replace(" ","")
77
+ author = model_name.split("/")[0]
78
+ model_id = model_name.split("/")[1]
79
+ if len(author) == 0 or len(model_id) == 0:
80
+ return False, "is not a valid model name. Please use the format `author/model_name`."
81
+ except Exception as e:
82
+ return False, "is not a valid model name. Please use the format `author/model_name`."
83
+
84
+ try:
85
+ models = list(hf_api.list_models(author=author, search=model_id))
86
+ matched = [model_name for m in models if m.modelId == model_name]
87
+ if len(matched) != 1:
88
+ return False, "was not found on the hub!"
89
+ else:
90
+ return True, None
91
+ except Exception as e:
92
+ print(f"Could not get the model from the hub.: {e}")
93
+ return False, "was not found on hub!"
requirements.txt ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ aiohttp==3.8.4
2
+ aiosignal==1.3.1
3
+ async-timeout==4.0.2
4
+ attrs==23.1.0
5
+ certifi==2023.7.22
6
+ charset-normalizer==3.2.0
7
+ cmake==3.26.4
8
+ contourpy==1.1.0
9
+ Cython==3.0.0
10
+ datasets==2.13.1
11
+ dill==0.3.6
12
+ filelock==3.12.2
13
+ fonttools==4.40.0
14
+ frozenlist==1.4.0
15
+ fsspec==2023.6.0
16
+ huggingface-hub==0.16.4
17
+ idna==3.4
18
+ Jinja2==3.1.2
19
+ kiwisolver==1.4.4
20
+ lit==16.0.6
21
+ MarkupSafe==2.1.3
22
+ matplotlib==3.7.2
23
+ mpmath==1.3.0
24
+ multidict==6.0.4
25
+ multiprocess==0.70.14
26
+ networkx==3.1
27
+ numpy==1.25.2
28
+ nvidia-cublas-cu11==11.10.3.66
29
+ nvidia-cuda-cupti-cu11==11.7.101
30
+ nvidia-cuda-nvrtc-cu11==11.7.99
31
+ nvidia-cuda-runtime-cu11==11.7.99
32
+ nvidia-cudnn-cu11==8.5.0.96
33
+ nvidia-cufft-cu11==10.9.0.58
34
+ nvidia-curand-cu11==10.2.10.91
35
+ nvidia-cusolver-cu11==11.4.0.1
36
+ nvidia-cusparse-cu11==11.7.4.91
37
+ nvidia-nccl-cu11==2.14.3
38
+ nvidia-nvtx-cu11==11.7.91
39
+ packaging==23.1
40
+ pandas==2.0.3
41
+ Pillow==10.0.0
42
+ pyarrow==12.0.1
43
+ python-dateutil==2.8.2
44
+ pytz==2023.3
45
+ PyYAML==6.0.1
46
+ regex==2023.6.3
47
+ requests==2.31.0
48
+ responses==0.18.0
49
+ safetensors==0.3.1
50
+ six==1.16.0
51
+ sympy==1.12
52
+ tokenizers==0.13.3
53
+ torch==2.0.1
54
+ torchvision==0.15.2
55
+ tqdm==4.65.0
56
+ triton==2.0.0
57
+ typing_extensions==4.7.1
58
+ tzdata==2023.3
59
+ urllib3==2.0.4
60
+ xxhash==3.2.0
61
+ yarl==1.9.2
utils_display.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+
3
+ # These classes are for user facing column names, to avoid having to change them
4
+ # all around the code when a modif is needed
5
+ @dataclass
6
+ class ColumnContent:
7
+ name: str
8
+ type: str
9
+
10
+ def fields(raw_class):
11
+ return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
12
+
13
+ @dataclass(frozen=True)
14
+ class AutoEvalColumn: # Auto evals column
15
+ model = ColumnContent("Model", "markdown")
16
+ avg_wer = ColumnContent("Average WER ⬇️", "number")
17
+ rtf = ColumnContent("RTF (1e-3) ⬇️", "number")
18
+ ami_wer = ColumnContent("AMI", "number")
19
+ e22_wer = ColumnContent("Earnings22", "number")
20
+ gs_wer = ColumnContent("Gigaspeech", "number")
21
+ lsc_wer = ColumnContent("LS Clean", "number")
22
+ lso_wer = ColumnContent("LS Other", "number")
23
+ ss_wer = ColumnContent("SPGISpeech", "number")
24
+ tl_wer = ColumnContent("Tedlium", "number")
25
+ vp_wer = ColumnContent("Voxpopuli", "number")
26
+ cv_wer = ColumnContent("Common Voice", "number")
27
+
28
+
29
+ def make_clickable_model(model_name):
30
+ link = f"https://huggingface.co/{model_name}"
31
+ return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
32
+
33
+ def styled_error(error):
34
+ return f"<p style='color: red; font-size: 20px; text-align: center;'>{error}</p>"
35
+
36
+ def styled_warning(warn):
37
+ return f"<p style='color: orange; font-size: 20px; text-align: center;'>{warn}</p>"
38
+
39
+ def styled_message(message):
40
+ return f"<p style='color: green; font-size: 20px; text-align: center;'>{message}</p>"