wasertech commited on
Commit
2bac61c
1 Parent(s): c5c4284

Use abs delta

Browse files
Files changed (2) hide show
  1. app.py +6 -20
  2. constants.py +14 -0
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import gradio as gr
2
  import pandas as pd
3
  import json
4
- from constants import BANNER, INTRODUCTION_TEXT, CITATION_TEXT, METRICS_TAB_TEXT, DIR_OUTPUT_REQUESTS
5
  from init import is_model_on_hub, upload_file, load_all_info_from_dataset_hub
6
  from utils_display import AutoEvalColumn, fields, make_clickable_model, styled_error, styled_message
7
  from datetime import datetime, timezone
@@ -131,11 +131,11 @@ for col in original_df.columns:
131
  original_df.rename(columns=column_names, inplace=True)
132
 
133
  # Compute delta between average WER and CV WER
134
- original_df['Detla Avg. C.V. WER'] = original_df['Average WER ⬇️'] - original_df['Common Voice WER ⬇️']
135
- original_df['Detla Avg. C.V. WER'] = pd.to_numeric(original_df['Detla Avg. C.V. WER'], errors='coerce') # Convert to numerical data type
136
- original_df['Detla Avg. C.V. WER'] = original_df['Detla Avg. C.V. WER'].apply(lambda x: round(x, 2) if not pd.isna(x) else x) # Round and handle NaN values
137
 
138
- original_df.sort_values(by='Detla Avg. C.V. WER', inplace=True)
139
 
140
 
141
  COLS = [c.name for c in fields(AutoEvalColumn)]
@@ -195,21 +195,7 @@ def request_model(model_text, chbcoco2017):
195
 
196
  with gr.Blocks() as demo:
197
  gr.HTML(BANNER, elem_id="banner")
198
- gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
199
-
200
- CUSTOM_MESSAGE = """## Legend:
201
- This space is a fork of the original [hf-audio/open_asr_leaderboard](https://huggingface.co/spaces/hf-audio/open_asr_leaderboard). It aims to demonstrate how the CommonVoice Test Set provides a relatively accurate approximation of the average WER/CER (Word Error Rate/Character Error Rate) at a significantly lower computational cost.
202
-
203
- #### Why is this useful?
204
- This space offers a way to achieve standardized test set for most languages, enabling us to programmatically select a reasonably effective model for any language supported by CommonVoice.
205
-
206
- Model, RTF (1e-3) ⬇️, and Average WER ⬇️ were sourced from [hf-audio/open_asr_leaderboard](https://huggingface.co/spaces/hf-audio/open_asr_leaderboard) using the version from September 7, 2023.
207
-
208
- ### Results
209
- The CommonVoice Test provides a Word Error Rate (WER) within a 20-point margin of the average WER.
210
-
211
- While not perfect, this indicates that CommonVoice can be a useful tool for quickly identifying a suitable ASR model for a wide range of languages in a programmatic manner. However, it's important to note that it is not sufficient as the sole criterion for choosing the most appropriate architecture. Further considerations may be needed depending on the specific requirements of your ASR application.
212
- """
213
  gr.Markdown(CUSTOM_MESSAGE, elem_classes="markdown-text")
214
 
215
 
 
1
  import gradio as gr
2
  import pandas as pd
3
  import json
4
+ from constants import BANNER, INTRODUCTION_TEXT, CITATION_TEXT, METRICS_TAB_TEXT, DIR_OUTPUT_REQUESTS, CUSTOM_MESSAGE
5
  from init import is_model_on_hub, upload_file, load_all_info_from_dataset_hub
6
  from utils_display import AutoEvalColumn, fields, make_clickable_model, styled_error, styled_message
7
  from datetime import datetime, timezone
 
131
  original_df.rename(columns=column_names, inplace=True)
132
 
133
  # Compute delta between average WER and CV WER
134
+ original_df['Abs. Detla WER'] = abs(original_df['Average WER ⬇️'] - original_df['Common Voice WER ⬇️'])
135
+ original_df['Abs. Detla WER'] = pd.to_numeric(original_df['Abs. Detla WER'], errors='coerce') # Convert to numerical data type
136
+ original_df['Abs. Detla WER'] = original_df['Abs. Detla WER'].apply(lambda x: round(x, 2) if not pd.isna(x) else x) # Round and handle NaN values
137
 
138
+ original_df.sort_values(by='Abs. Detla WER', inplace=True)
139
 
140
 
141
  COLS = [c.name for c in fields(AutoEvalColumn)]
 
195
 
196
  with gr.Blocks() as demo:
197
  gr.HTML(BANNER, elem_id="banner")
198
+ gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
199
  gr.Markdown(CUSTOM_MESSAGE, elem_classes="markdown-text")
200
 
201
 
constants.py CHANGED
@@ -96,3 +96,17 @@ are ranked based on their average WER scores, from lowest to highest.
96
 
97
  For more details on the individual datasets and how models are evaluated to give the ESB score, refer to the [ESB paper](https://arxiv.org/abs/2210.13352).
98
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
 
97
  For more details on the individual datasets and how models are evaluated to give the ESB score, refer to the [ESB paper](https://arxiv.org/abs/2210.13352).
98
  """
99
+
100
+ CUSTOM_MESSAGE = """## Legend:
101
+ This space is a fork of the original [hf-audio/open_asr_leaderboard](https://huggingface.co/spaces/hf-audio/open_asr_leaderboard). It aims to demonstrate how the CommonVoice Test Set provides a relatively accurate approximation of the average WER/CER (Word Error Rate/Character Error Rate) at a significantly lower computational cost.
102
+
103
+ #### Why is this useful?
104
+ This space offers a way to achieve standardized test set for most languages, enabling us to programmatically select a reasonably effective model for any language supported by CommonVoice.
105
+
106
+ Model, RTF (1e-3) ⬇️, and Average WER ⬇️ were sourced from [hf-audio/open_asr_leaderboard](https://huggingface.co/spaces/hf-audio/open_asr_leaderboard) using the version from September 7, 2023.
107
+
108
+ ### Results
109
+ The CommonVoice Test provides a Word Error Rate (WER) within a 20-point margin of the average WER.
110
+
111
+ While not perfect, this indicates that CommonVoice can be a useful tool for quickly identifying a suitable ASR model for a wide range of languages in a programmatic manner. However, it's important to note that it is not sufficient as the sole criterion for choosing the most appropriate architecture. Further considerations may be needed depending on the specific requirements of your ASR application.
112
+ """