mlabonne CultriX commited on
Commit
10974a0
β€’
1 Parent(s): 5671b0a

Upload app.py (#7)

Browse files

- Upload app.py (d99d0376d489de9ff899d60ce46d59ec04bc4621)


Co-authored-by: CultriX <CultriX@users.noreply.huggingface.co>

Files changed (1) hide show
  1. app.py +58 -45
app.py CHANGED
@@ -10,6 +10,7 @@ from huggingface_hub.utils import RepositoryNotFoundError, RevisionNotFoundError
10
  from yall import create_yall
11
 
12
 
 
13
  def convert_markdown_table_to_dataframe(md_content):
14
  """
15
  Converts markdown table to Pandas DataFrame, handling special characters and links,
@@ -36,10 +37,10 @@ def convert_markdown_table_to_dataframe(md_content):
36
 
37
  return df
38
 
39
- @st.cache_data
40
  def get_model_info(df):
41
  api = HfApi()
42
-
43
  # Initialize new columns for likes and tags
44
  df['Likes'] = None
45
  df['Tags'] = None
@@ -58,7 +59,8 @@ def get_model_info(df):
58
 
59
  return df
60
 
61
-
 
62
  def create_bar_chart(df, category):
63
  """Create and display a bar chart for a given category."""
64
  st.write(f"### {category} Scores")
@@ -66,12 +68,12 @@ def create_bar_chart(df, category):
66
  # Sort the DataFrame based on the category score
67
  sorted_df = df[['Model', category]].sort_values(by=category, ascending=True)
68
 
69
- # Create the bar chart with color gradient
70
  fig = go.Figure(go.Bar(
71
  x=sorted_df[category],
72
  y=sorted_df['Model'],
73
  orientation='h',
74
- marker=dict(color=sorted_df[category], colorscale='Inferno')
75
  ))
76
 
77
  # Update layout for better readability
@@ -79,15 +81,18 @@ def create_bar_chart(df, category):
79
  margin=dict(l=20, r=20, t=20, b=20)
80
  )
81
 
82
- st.plotly_chart(fig, use_container_width=True, height=len(df)*35)
 
 
 
 
 
83
 
84
-
85
  def main():
86
  st.set_page_config(page_title="YALL - Yet Another LLM Leaderboard", layout="wide")
87
 
88
  st.title("πŸ† YALL - Yet Another LLM Leaderboard")
89
- st.markdown("Leaderboard made with 🧐 [LLM AutoEval](https://github.com/mlabonne/llm-autoeval) using [Nous](https://huggingface.co/NousResearch) benchmark suite.")
90
-
91
  content = create_yall()
92
  tab1, tab2 = st.tabs(["πŸ† Leaderboard", "πŸ“ About"])
93
 
@@ -96,7 +101,7 @@ def main():
96
  if content:
97
  try:
98
  score_columns = ['Average', 'AGIEval', 'GPT4All', 'TruthfulQA', 'Bigbench']
99
-
100
  # Display dataframe
101
  full_df = convert_markdown_table_to_dataframe(content)
102
  for col in score_columns:
@@ -105,18 +110,15 @@ def main():
105
  full_df = get_model_info(full_df)
106
  full_df['Tags'] = full_df['Tags'].fillna('')
107
  df = pd.DataFrame(columns=full_df.columns)
108
-
109
- # Toggles
110
- col1, col2, col3 = st.columns(3)
111
- with col1:
112
- show_phi = st.checkbox("Phi (2.8B)", value=True)
113
- with col2:
114
- show_mistral = st.checkbox("Mistral (7B)", value=True)
115
- with col3:
116
- show_other = st.checkbox("Other", value=True)
117
 
 
 
 
 
 
 
118
  dfs_to_concat = []
119
-
120
  if show_phi:
121
  dfs_to_concat.append(full_df[full_df['Tags'].str.lower().str.contains('phi,|phi-msft,')])
122
  if show_mistral:
@@ -124,15 +126,19 @@ def main():
124
  if show_other:
125
  other_df = full_df[~full_df['Tags'].str.lower().str.contains('phi,|phi-msft,|mistral,')]
126
  dfs_to_concat.append(other_df)
127
-
128
  # Concatenate the DataFrames
129
  if dfs_to_concat:
130
  df = pd.concat(dfs_to_concat, ignore_index=True)
131
-
132
- # Sort values
133
- df = df.sort_values(by='Average', ascending=False)
134
-
135
- # Display the DataFrame
 
 
 
 
136
  st.dataframe(
137
  df[['Model'] + score_columns + ['Likes', 'URL']],
138
  use_container_width=True,
@@ -145,26 +151,41 @@ def main():
145
  "URL": st.column_config.LinkColumn("URL"),
146
  },
147
  hide_index=True,
148
- height=len(df)*37,
149
  )
150
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
  # Full-width plot for the first category
152
  create_bar_chart(df, score_columns[0])
153
-
154
  # Next two plots in two columns
155
  col1, col2 = st.columns(2)
156
  with col1:
157
  create_bar_chart(df, score_columns[1])
158
  with col2:
159
  create_bar_chart(df, score_columns[2])
160
-
161
  # Last two plots in two columns
162
  col3, col4 = st.columns(2)
163
  with col3:
164
  create_bar_chart(df, score_columns[3])
165
  with col4:
166
  create_bar_chart(df, score_columns[4])
167
-
 
168
  except Exception as e:
169
  st.error("An error occurred while processing the markdown table.")
170
  st.error(str(e))
@@ -176,26 +197,18 @@ def main():
176
  st.markdown('''
177
  ### Nous benchmark suite
178
 
179
- Popularized by [Teknium](https://huggingface.co/teknium) and [NousResearch](https://huggingface.co/NousResearch), this benchmark suite aggregates four benchmarks:
180
-
181
- * [**AGIEval**](https://arxiv.org/abs/2304.06364) (0-shot): `agieval_aqua_rat,agieval_logiqa_en,agieval_lsat_ar,agieval_lsat_lr,agieval_lsat_rc,agieval_sat_en,agieval_sat_en_without_passage,agieval_sat_math`
182
- * **GPT4ALL** (0-shot): `hellaswag,openbookqa,winogrande,arc_easy,arc_challenge,boolq,piqa`
183
- * [**TruthfulQA**](https://arxiv.org/abs/2109.07958) (0-shot): `truthfulqa_mc`
184
- * [**Bigbench**](https://arxiv.org/abs/2206.04615) (0-shot): `bigbench_causal_judgement,bigbench_date_understanding,bigbench_disambiguation_qa,bigbench_geometric_shapes,bigbench_logical_deduction_five_objects,bigbench_logical_deduction_seven_objects,bigbench_logical_deduction_three_objects,bigbench_movie_recommendation,bigbench_navigate,bigbench_reasoning_about_colored_objects,bigbench_ruin_names,bigbench_salient_translation_error_detection,bigbench_snarks,bigbench_sports_understanding,bigbench_temporal_sequences,bigbench_tracking_shuffled_objects_five_objects,bigbench_tracking_shuffled_objects_seven_objects,bigbench_tracking_shuffled_objects_three_objects`
185
-
186
  ### Reproducibility
187
 
188
- You can easily reproduce these results using 🧐 [LLM AutoEval](https://github.com/mlabonne/llm-autoeval/tree/master), a colab notebook that automates the evaluation process (benchmark: `nous`). This will upload the results to GitHub as gists. You can find the entire table with the links to the detailed results [here](https://gist.github.com/mlabonne/90294929a2dbcb8877f9696f28105fdf).
189
-
190
  ### Clone this space
191
 
192
  You can create your own leaderboard with your LLM AutoEval results on GitHub Gist. You just need to clone this space and specify two variables:
193
 
194
- * Change the `gist_id` in [yall.py](https://huggingface.co/spaces/mlabonne/Yet_Another_LLM_Leaderboard/blob/main/yall.py#L126).
195
- * Create "New Secret" in Settings > Variables and secrets (name: "github", value: [your GitHub token](https://github.com/settings/tokens))
196
-
197
- A special thanks to [gblazex](https://huggingface.co/gblazex) for providing many evaluations.
198
- ''')
199
 
200
  if __name__ == "__main__":
201
- main()
 
10
  from yall import create_yall
11
 
12
 
13
+
14
  def convert_markdown_table_to_dataframe(md_content):
15
  """
16
  Converts markdown table to Pandas DataFrame, handling special characters and links,
 
37
 
38
  return df
39
 
40
+ @st.cache_data
41
  def get_model_info(df):
42
  api = HfApi()
43
+
44
  # Initialize new columns for likes and tags
45
  df['Likes'] = None
46
  df['Tags'] = None
 
59
 
60
  return df
61
 
62
+
63
+
64
  def create_bar_chart(df, category):
65
  """Create and display a bar chart for a given category."""
66
  st.write(f"### {category} Scores")
 
68
  # Sort the DataFrame based on the category score
69
  sorted_df = df[['Model', category]].sort_values(by=category, ascending=True)
70
 
71
+ # Create the bar chart with a color gradient (using 'Viridis' color scale as an example)
72
  fig = go.Figure(go.Bar(
73
  x=sorted_df[category],
74
  y=sorted_df['Model'],
75
  orientation='h',
76
+ marker=dict(color=sorted_df[category], colorscale='Twilight') # You can change 'Viridis' to another color scale
77
  ))
78
 
79
  # Update layout for better readability
 
81
  margin=dict(l=20, r=20, t=20, b=20)
82
  )
83
 
84
+ # Adjust the height of the chart based on the number of rows in the DataFrame
85
+ st.plotly_chart(fig, use_container_width=True, height=len(df) * 35)
86
+
87
+ # Example usage:
88
+ # create_bar_chart(your_dataframe, 'Your_Category')
89
+
90
 
 
91
  def main():
92
  st.set_page_config(page_title="YALL - Yet Another LLM Leaderboard", layout="wide")
93
 
94
  st.title("πŸ† YALL - Yet Another LLM Leaderboard")
95
+ st.markdown("Leaderboard made with 🧐 [LLM AutoEval](https:
 
96
  content = create_yall()
97
  tab1, tab2 = st.tabs(["πŸ† Leaderboard", "πŸ“ About"])
98
 
 
101
  if content:
102
  try:
103
  score_columns = ['Average', 'AGIEval', 'GPT4All', 'TruthfulQA', 'Bigbench']
104
+
105
  # Display dataframe
106
  full_df = convert_markdown_table_to_dataframe(content)
107
  for col in score_columns:
 
110
  full_df = get_model_info(full_df)
111
  full_df['Tags'] = full_df['Tags'].fillna('')
112
  df = pd.DataFrame(columns=full_df.columns)
 
 
 
 
 
 
 
 
 
113
 
114
+ # Toggles for filtering by tags
115
+ show_phi = st.checkbox("Phi (2.8B)", value=True)
116
+ show_mistral = st.checkbox("Mistral (7B)", value=True)
117
+ show_other = st.checkbox("Other", value=True)
118
+
119
+ # Create a DataFrame based on selected filters
120
  dfs_to_concat = []
121
+
122
  if show_phi:
123
  dfs_to_concat.append(full_df[full_df['Tags'].str.lower().str.contains('phi,|phi-msft,')])
124
  if show_mistral:
 
126
  if show_other:
127
  other_df = full_df[~full_df['Tags'].str.lower().str.contains('phi,|phi-msft,|mistral,')]
128
  dfs_to_concat.append(other_df)
129
+
130
  # Concatenate the DataFrames
131
  if dfs_to_concat:
132
  df = pd.concat(dfs_to_concat, ignore_index=True)
133
+
134
+ # Add a search bar
135
+ search_query = st.text_input("Search models", "")
136
+
137
+ # Filter the DataFrame based on the search query
138
+ if search_query:
139
+ df = df[df['Model'].str.contains(search_query, case=False)]
140
+
141
+ # Display the filtered DataFrame or the entire leaderboard
142
  st.dataframe(
143
  df[['Model'] + score_columns + ['Likes', 'URL']],
144
  use_container_width=True,
 
151
  "URL": st.column_config.LinkColumn("URL"),
152
  },
153
  hide_index=True,
154
+ height=len(df) * 37,
155
  )
156
 
157
+ # Add a button to export data to CSV
158
+ if st.button("Export to CSV"):
159
+ # Export the DataFrame to CSV
160
+ csv_data = df.to_csv(index=False)
161
+
162
+ # Create a link to download the CSV file
163
+ st.download_button(
164
+ label="Download CSV",
165
+ data=csv_data,
166
+ file_name="leaderboard.csv",
167
+ key="download-csv",
168
+ help="Click to download the CSV file",
169
+ )
170
+
171
  # Full-width plot for the first category
172
  create_bar_chart(df, score_columns[0])
173
+
174
  # Next two plots in two columns
175
  col1, col2 = st.columns(2)
176
  with col1:
177
  create_bar_chart(df, score_columns[1])
178
  with col2:
179
  create_bar_chart(df, score_columns[2])
180
+
181
  # Last two plots in two columns
182
  col3, col4 = st.columns(2)
183
  with col3:
184
  create_bar_chart(df, score_columns[3])
185
  with col4:
186
  create_bar_chart(df, score_columns[4])
187
+
188
+
189
  except Exception as e:
190
  st.error("An error occurred while processing the markdown table.")
191
  st.error(str(e))
 
197
  st.markdown('''
198
  ### Nous benchmark suite
199
 
200
+ Popularized by [Teknium](https:
201
+ * [**AGIEval**](https: * **GPT4ALL** (0-shot): `hellaswag,openbookqa,winogrande,arc_easy,arc_challenge,boolq,piqa`
202
+ * [**TruthfulQA**](https: * [**Bigbench**](https:
 
 
 
 
203
  ### Reproducibility
204
 
205
+ You can easily reproduce these results using 🧐 [LLM AutoEval](https:
 
206
  ### Clone this space
207
 
208
  You can create your own leaderboard with your LLM AutoEval results on GitHub Gist. You just need to clone this space and specify two variables:
209
 
210
+ * Change the `gist_id` in [yall.py](https: * Create "New Secret" in Settings > Variables and secrets (name: "github", value: [your GitHub token](https:
211
+ A special thanks to [gblazex](https: ''')
 
 
 
212
 
213
  if __name__ == "__main__":
214
+ main()