CultriX commited on
Commit
f733a1e
β€’
1 Parent(s): 4de04cf

Upload app.py.bak.py

Browse files
Files changed (1) hide show
  1. app.py.bak.py +295 -0
app.py.bak.py ADDED
@@ -0,0 +1,295 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pandas as pd
3
+ from huggingface_hub import HfApi
4
+ from huggingface_hub.utils import RepositoryNotFoundError, RevisionNotFoundError
5
+ from itertools import combinations
6
+ import re
7
+ from functools import cache
8
+ from io import StringIO
9
+ from yall import create_yall
10
+ import plotly.graph_objs as go
11
+
12
+ def calculate_pages(df, items_per_page):
13
+ return -(-len(df) // items_per_page) # Equivalent to math.ceil(len(df) / items_per_page)
14
+
15
+
16
+
17
+ # Function to get model info from Hugging Face API using caching
18
+ @cache
19
+ def cached_model_info(api, model):
20
+ try:
21
+ return api.model_info(repo_id=str(model))
22
+ except (RepositoryNotFoundError, RevisionNotFoundError):
23
+ return None
24
+
25
+ # Function to get model info from DataFrame and update it with likes and tags
26
+ @st.cache
27
+ def get_model_info(df):
28
+ api = HfApi()
29
+
30
+ for index, row in df.iterrows():
31
+ model_info = cached_model_info(api, row['Model'].strip())
32
+ if model_info:
33
+ df.loc[index, 'Likes'] = model_info.likes
34
+ df.loc[index, 'Tags'] = ', '.join(model_info.tags)
35
+ else:
36
+ df.loc[index, 'Likes'] = -1
37
+ df.loc[index, 'Tags'] = ''
38
+ return df
39
+
40
+ # Function to convert markdown table to DataFrame and extract Hugging Face URLs
41
+ def convert_markdown_table_to_dataframe(md_content):
42
+ """
43
+ Converts markdown table to Pandas DataFrame, handling special characters and links,
44
+ extracts Hugging Face URLs, and adds them to a new column.
45
+ """
46
+ # Remove leading and trailing | characters
47
+ cleaned_content = re.sub(r'\|\s*$', '', re.sub(r'^\|\s*', '', md_content, flags=re.MULTILINE), flags=re.MULTILINE)
48
+
49
+ # Create DataFrame from cleaned content
50
+ df = pd.read_csv(StringIO(cleaned_content), sep="\|", engine='python')
51
+
52
+ # Remove the first row after the header
53
+ df = df.drop(0, axis=0)
54
+
55
+ # Strip whitespace from column names
56
+ df.columns = df.columns.str.strip()
57
+
58
+ # Extract Hugging Face URLs and add them to a new column
59
+ model_link_pattern = r'\[(.*?)\]\((.*?)\)\s*\[.*?\]\(.*?\)'
60
+ df['URL'] = df['Model'].apply(lambda x: re.search(model_link_pattern, x).group(2) if re.search(model_link_pattern, x) else None)
61
+
62
+ # Clean Model column to have only the model link text
63
+ df['Model'] = df['Model'].apply(lambda x: re.sub(model_link_pattern, r'\1', x))
64
+
65
+ return df
66
+
67
+ @st.cache_data
68
+ def get_model_info(df):
69
+ api = HfApi()
70
+
71
+ # Initialize new columns for likes and tags
72
+ df['Likes'] = None
73
+ df['Tags'] = None
74
+
75
+ # Iterate through DataFrame rows
76
+ for index, row in df.iterrows():
77
+ model = row['Model'].strip()
78
+ try:
79
+ model_info = api.model_info(repo_id=str(model))
80
+ df.loc[index, 'Likes'] = model_info.likes
81
+ df.loc[index, 'Tags'] = ', '.join(model_info.tags)
82
+
83
+ except (RepositoryNotFoundError, RevisionNotFoundError):
84
+ df.loc[index, 'Likes'] = -1
85
+ df.loc[index, 'Tags'] = ''
86
+
87
+ return df
88
+
89
+ #def calculate_highest_combined_score(data, column):
90
+ # score_columns = ['Average', 'AGIEval', 'GPT4All', 'TruthfulQA', 'Bigbench']
91
+ # # Ensure the column exists and has numeric data
92
+ # if column not in data.columns or not pd.api.types.is_numeric_dtype(data[column]):
93
+ # return column, {}
94
+ # scores = data[column].dropna().tolist()
95
+ # models = data['Model'].tolist()
96
+ # top_combinations = {r: [] for r in range(2, 5)}
97
+ # for r in range(2, 5):
98
+ # for combination in combinations(zip(scores, models), r):
99
+ # combined_score = sum(score for score, _ in combination)
100
+ # top_combinations[r].append((combined_score, tuple(model for _, model in combination)))
101
+ # top_combinations[r].sort(key=lambda x: x[0], reverse=True)
102
+ # top_combinations[r] = top_combinations[r][:5]
103
+ # return column, top_combinations
104
+
105
+ ## Modified function to display the results of the highest combined scores using st.dataframe
106
+ #def display_highest_combined_scores(data):
107
+ # score_columns = ['Average', 'AGIEval', 'GPT4All', 'TruthfulQA', 'Bigbench']
108
+ # with st.spinner('Calculating highest combined scores...'):
109
+ # results = [calculate_highest_combined_score(data, col) for col in score_columns]
110
+ # for column, top_combinations in results:
111
+ # st.subheader(f"Top Combinations for {column}")
112
+ # for r, combinations in top_combinations.items():
113
+ # # Prepare data for DataFrame
114
+ # rows = [{'Score': score, 'Models': ', '.join(combination)} for score, combination in combinations]
115
+ # df = pd.DataFrame(rows)
116
+ #
117
+ # # Display using st.dataframe
118
+ # st.markdown(f"**Number of Models: {r}**")
119
+ # st.dataframe(df, height=150) # Adjust height as necessary
120
+
121
+
122
+
123
+
124
+ # Function to create bar chart for a given category
125
+ def create_bar_chart(df, category):
126
+ """Create and display a bar chart for a given category."""
127
+ st.write(f"### {category} Scores")
128
+
129
+ # Sort the DataFrame based on the category score
130
+ sorted_df = df[['Model', category]].sort_values(by=category, ascending=True)
131
+
132
+ # Create the bar chart with a color gradient (using 'Viridis' color scale as an example)
133
+ fig = go.Figure(go.Bar(
134
+ x=sorted_df[category],
135
+ y=sorted_df['Model'],
136
+ orientation='h',
137
+ marker=dict(color=sorted_df[category], colorscale='Spectral') # You can change 'Viridis' to another color scale
138
+ ))
139
+
140
+ # Update layout for better readability
141
+ fig.update_layout(
142
+ margin=dict(l=20, r=20, t=20, b=20)
143
+ )
144
+
145
+ # Adjust the height of the chart based on the number of rows in the DataFrame
146
+ st.plotly_chart(fig, use_container_width=True, height=len(df) * 35)
147
+
148
+ # Main function to run the Streamlit app
149
+ def main():
150
+ # Set page configuration and title
151
+ st.set_page_config(page_title="YALL - Yet Another LLM Leaderboard", layout="wide")
152
+
153
+ st.title("πŸ† YALL - Yet Another LLM Leaderboard")
154
+ st.markdown("Leaderboard made with 🧐 [LLM AutoEval](https://github.com/mlabonne/llm-autoeval) using [Nous](https://huggingface.co/NousResearch) benchmark suite.")
155
+
156
+ # Create tabs for leaderboard and about section
157
+ content = create_yall()
158
+ tab1, tab2 = st.tabs(["πŸ† Leaderboard", "πŸ“ About"])
159
+
160
+ # Leaderboard tab
161
+ with tab1:
162
+ if content:
163
+ try:
164
+ score_columns = ['Average', 'AGIEval', 'GPT4All', 'TruthfulQA', 'Bigbench']
165
+
166
+ # Display dataframe
167
+ full_df = convert_markdown_table_to_dataframe(content)
168
+
169
+ for col in score_columns:
170
+ # Corrected use of pd.to_numeric
171
+ full_df[col] = pd.to_numeric(full_df[col].str.strip(), errors='coerce')
172
+
173
+ full_df = get_model_info(full_df)
174
+ full_df['Tags'] = full_df['Tags'].fillna('')
175
+ df = pd.DataFrame(columns=full_df.columns)
176
+
177
+ # Toggles for filtering by tags
178
+ show_phi = st.checkbox("Phi (2.8B)", value=True)
179
+ show_mistral = st.checkbox("Mistral (7B)", value=True)
180
+ show_other = st.checkbox("Other", value=True)
181
+
182
+ # Create a DataFrame based on selected filters
183
+ dfs_to_concat = []
184
+
185
+ if show_phi:
186
+ dfs_to_concat.append(full_df[full_df['Tags'].str.lower().str.contains('phi,|phi-msft,')])
187
+ if show_mistral:
188
+ dfs_to_concat.append(full_df[full_df['Tags'].str.lower().str.contains('mistral,')])
189
+ if show_other:
190
+ other_df = full_df[~full_df['Tags'].str.lower().str.contains('phi,|phi-msft,|mistral,')]
191
+ dfs_to_concat.append(other_df)
192
+
193
+ # Concatenate the DataFrames
194
+ if dfs_to_concat:
195
+ df = pd.concat(dfs_to_concat, ignore_index=True)
196
+
197
+ # Add a search bar
198
+ search_query = st.text_input("Search models", "")
199
+
200
+ # Filter the DataFrame based on the search query
201
+ if search_query:
202
+ df = df[df['Model'].str.contains(search_query, case=False)]
203
+
204
+ # Add a selectbox for page selection
205
+ items_per_page = 30
206
+ pages = calculate_pages(df, items_per_page)
207
+ page = st.selectbox("Page", list(range(1, pages + 1)))
208
+
209
+ # Sort the DataFrame by 'Average' column in descending order
210
+ df = df.sort_values(by='Average', ascending=False)
211
+
212
+ # Slice the DataFrame based on the selected page
213
+ start = (page - 1) * items_per_page
214
+ end = start + items_per_page
215
+ df = df[start:end]
216
+
217
+ # Display the filtered DataFrame or the entire leaderboard
218
+ st.dataframe(
219
+ df[['Model'] + score_columns + ['Likes', 'URL']],
220
+ use_container_width=True,
221
+ column_config={
222
+ "Likes": st.column_config.NumberColumn(
223
+ "Likes",
224
+ help="Number of likes on Hugging Face",
225
+ format="%d ❀️",
226
+ ),
227
+ "URL": st.column_config.LinkColumn("URL"),
228
+ },
229
+ hide_index=True,
230
+ height=len(df) * 37,
231
+ )
232
+ selected_models = st.multiselect('Select models to compare', df['Model'].unique())
233
+ comparison_df = df[df['Model'].isin(selected_models)]
234
+ st.dataframe(comparison_df)
235
+ # Add a button to export data to CSV
236
+ if st.button("Export to CSV"):
237
+ # Export the DataFrame to CSV
238
+ csv_data = full_df.to_csv(index=False)
239
+
240
+ # Create a link to download the CSV file
241
+ st.download_button(
242
+ label="Download CSV",
243
+ data=csv_data,
244
+ file_name="leaderboard.csv",
245
+ key="download-csv",
246
+ help="Click to download the CSV file",
247
+ )
248
+
249
+ # Full-width plot for the first category
250
+ create_bar_chart(df, score_columns[0])
251
+
252
+ # Next two plots in two columns
253
+ col1, col2 = st.columns(2)
254
+ with col1:
255
+ create_bar_chart(df, score_columns[1])
256
+ with col2:
257
+ create_bar_chart(df, score_columns[2])
258
+
259
+ # Last two plots in two columns
260
+ col3, col4 = st.columns(2)
261
+ with col3:
262
+ create_bar_chart(df, score_columns[3])
263
+ with col4:
264
+ create_bar_chart(df, score_columns[4])
265
+
266
+ # display_highest_combined_scores(full_df) # Call to display the calculated scores
267
+ except Exception as e:
268
+ st.error("An error occurred while processing the markdown table.")
269
+ st.error(str(e))
270
+ else:
271
+ st.error("Failed to download the content from the URL provided.")
272
+ # About tab
273
+ with tab2:
274
+ st.markdown('''
275
+ ### Nous benchmark suite
276
+ Popularized by [Teknium](https://huggingface.co/teknium) and [NousResearch](https://huggingface.co/NousResearch), this benchmark suite aggregates four benchmarks:
277
+ * [**AGIEval**](https://arxiv.org/abs/2304.06364) (0-shot): `agieval_aqua_rat,agieval_logiqa_en,agieval_lsat_ar,agieval_lsat_lr,agieval_lsat_rc,agieval_sat_en,agieval_sat_en_without_passage,agieval_sat_math`
278
+ * **GPT4ALL** (0-shot): `hellaswag,openbookqa,winogrande,arc_easy,arc_challenge,boolq,piqa`
279
+ * [**TruthfulQA**](https://arxiv.org/abs/2109.07958) (0-shot): `truthfulqa_mc`
280
+ * [**Bigbench**](https://arxiv.org/abs/2206.04615) (0-shot): `bigbench_causal_judgement,bigbench_date_understanding,bigbench_disambiguation_qa,bigbench_geometric_shapes,bigbench_logical_deduction_five_objects,bigbench_logical_deduction_seven_objects,bigbench_logical_deduction_three_objects,bigbench_movie_recommendation,bigbench_navigate,bigbench_reasoning_about_colored_objects,bigbench_ruin_names,bigbench_salient_translation_error_detection,bigbench_snarks,bigbench_sports_understanding,bigbench_temporal_sequences,bigbench_tracking_shuffled_objects_five_objects,bigbench_tracking_shuffled_objects_seven_objects,bigbench_tracking_shuffled_objects_three_objects`
281
+ ### Reproducibility
282
+ You can easily reproduce these results using 🧐 [LLM AutoEval](https://github.com/mlabonne/llm-autoeval/tree/master), a colab notebook that automates the evaluation process (benchmark: `nous`). This will upload the results to GitHub as gists. You can find the entire table with the links to the detailed results [here](https://gist.github.com/mlabonne/90294929a2dbcb8877f9696f28105fdf).
283
+ ### Clone this space
284
+ You can create your own leaderboard with your LLM AutoEval results on GitHub Gist. You just need to clone this space and specify two variables:
285
+ * Change the `gist_id` in [yall.py](https://huggingface.co/spaces/mlabonne/Yet_Another_LLM_Leaderboard/blob/main/yall.py#L126).
286
+ * Create "New Secret" in Settings > Variables and secrets (name: "github", value: [your GitHub token](https://github.com/settings/tokens))
287
+ A special thanks to [gblazex](https://huggingface.co/gblazex) for providing many evaluations.
288
+ ''')
289
+
290
+
291
+
292
+
293
+ # Run the main function if this script is run directly
294
+ if __name__ == "__main__":
295
+ main()