File size: 11,155 Bytes
b338d34
 
 
 
 
 
3ece82c
 
b338d34
 
 
 
10974a0
b338d34
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10974a0
3ece82c
 
10974a0
3ece82c
9c40f3c
328c0c0
3ece82c
 
 
 
 
 
9c40f3c
3ece82c
 
 
9c40f3c
3ece82c
 
 
b338d34
10974a0
 
b338d34
 
 
 
 
 
 
10974a0
b338d34
 
 
 
fd45a63
b338d34
 
 
 
 
 
 
10974a0
37308a4
10974a0
 
 
 
b338d34
 
 
 
 
fcc23ad
b338d34
27cbb3d
 
 
 
 
 
 
10974a0
27cbb3d
3ece82c
27cbb3d
1d13608
 
ef3159e
328c0c0
3ece82c
328c0c0
fcc23ad
c6f6aa7
fcc23ad
c6f6aa7
fcc23ad
c6f6aa7
fcc23ad
c6f6aa7
 
 
10974a0
 
328c0c0
10974a0
328c0c0
8753c00
328c0c0
8753c00
c6f6aa7
 
8d72f71
8753c00
8d72f71
10974a0
328c0c0
 
d265ff6
10974a0
fd45a63
 
 
10974a0
 
 
 
 
 
 
 
d265ff6
 
 
 
 
 
 
 
 
0ebce0d
d265ff6
 
e55f9d4
d265ff6
1ac0a66
ac6a6cc
b425909
2aed67d
ac6a6cc
 
 
 
 
 
 
 
 
 
 
 
 
2aed67d
10974a0
 
 
 
 
 
 
 
 
 
 
 
 
 
27cbb3d
eb3a4e2
10974a0
27cbb3d
 
 
 
 
 
10974a0
27cbb3d
 
 
 
 
 
10974a0
 
27cbb3d
 
 
 
 
 
 
 
 
2167bbc
2325f20
7a236eb
 
 
 
 
 
2325f20
2167bbc
2325f20
7a236eb
2325f20
2167bbc
2325f20
2167bbc
2325f20
7a236eb
 
2325f20
7a236eb
 
 
b338d34
10974a0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
import re
import streamlit as st
import requests
import pandas as pd
from io import StringIO
import plotly.graph_objs as go
from huggingface_hub import HfApi
from huggingface_hub.utils import RepositoryNotFoundError, RevisionNotFoundError

from yall import create_yall



def convert_markdown_table_to_dataframe(md_content):
    """
    Converts markdown table to Pandas DataFrame, handling special characters and links,
    extracts Hugging Face URLs, and adds them to a new column.
    """
    # Remove leading and trailing | characters
    cleaned_content = re.sub(r'\|\s*$', '', re.sub(r'^\|\s*', '', md_content, flags=re.MULTILINE), flags=re.MULTILINE)

    # Create DataFrame from cleaned content
    df = pd.read_csv(StringIO(cleaned_content), sep="\|", engine='python')

    # Remove the first row after the header
    df = df.drop(0, axis=0)

    # Strip whitespace from column names
    df.columns = df.columns.str.strip()

    # Extract Hugging Face URLs and add them to a new column
    model_link_pattern = r'\[(.*?)\]\((.*?)\)\s*\[.*?\]\(.*?\)'
    df['URL'] = df['Model'].apply(lambda x: re.search(model_link_pattern, x).group(2) if re.search(model_link_pattern, x) else None)

    # Clean Model column to have only the model link text
    df['Model'] = df['Model'].apply(lambda x: re.sub(model_link_pattern, r'\1', x))

    return df

@st.cache_data
def get_model_info(df):
    api = HfApi()

    # Initialize new columns for likes and tags
    df['Likes'] = None
    df['Tags'] = None

    # Iterate through DataFrame rows
    for index, row in df.iterrows():
        model = row['Model'].strip()
        try:
            model_info = api.model_info(repo_id=str(model))
            df.loc[index, 'Likes'] = model_info.likes
            df.loc[index, 'Tags'] = ', '.join(model_info.tags)

        except (RepositoryNotFoundError, RevisionNotFoundError):
            df.loc[index, 'Likes'] = -1
            df.loc[index, 'Tags'] = ''

    return df



def create_bar_chart(df, category):
    """Create and display a bar chart for a given category."""
    st.write(f"### {category} Scores")

    # Sort the DataFrame based on the category score
    sorted_df = df[['Model', category]].sort_values(by=category, ascending=True)

    # Create the bar chart with a color gradient (using 'Viridis' color scale as an example)
    fig = go.Figure(go.Bar(
        x=sorted_df[category],
        y=sorted_df['Model'],
        orientation='h',
        marker=dict(color=sorted_df[category], colorscale='Inferno')
    ))

    # Update layout for better readability
    fig.update_layout(
        margin=dict(l=20, r=20, t=20, b=20)
    )

    # Adjust the height of the chart based on the number of rows in the DataFrame
    st.plotly_chart(fig, use_container_width=True, height=35)

# Example usage:
# create_bar_chart(your_dataframe, 'Your_Category')


def main():
    st.set_page_config(page_title="YALL - Yet Another LLM Leaderboard", layout="wide")

    st.title("πŸ† YALL - Yet Another LLM Leaderboard")
    st.markdown("Leaderboard made with 🧐 [LLM AutoEval](https://github.com/mlabonne/llm-autoeval) using [Nous](https://huggingface.co/NousResearch) benchmark suite.")
    content = create_yall()
    tab1, tab2 = st.tabs(["πŸ† Leaderboard", "πŸ“ About"])

    # Leaderboard tab
    with tab1:
        if content:
            try:
                score_columns = ['Average', 'AGIEval', 'GPT4All', 'TruthfulQA', 'Bigbench']

                # Display dataframe
                full_df = convert_markdown_table_to_dataframe(content)
                for col in score_columns:
                    # Corrected use of pd.to_numeric
                    full_df[col] = pd.to_numeric(full_df[col].str.strip(), errors='coerce')
                full_df = get_model_info(full_df)
                full_df['Tags'] = full_df['Tags'].fillna('')
                df = pd.DataFrame(columns=full_df.columns)

                # Toggles
                col1, col2, col3, col4 = st.columns(4)
                with col1:
                    show_phi = st.checkbox("Phi (2.8B)", value=False)
                with col2:
                    show_mistral = st.checkbox("Mistral (7B)", value=False)
                with col3:
                    show_llama = st.checkbox("Llama 3 (8B)", value=True)
                with col4:
                    show_other = st.checkbox("Other", value=False)

                # Create a DataFrame based on selected filters
                dfs_to_concat = []

                if show_phi:
                    dfs_to_concat.append(full_df[full_df['Tags'].str.lower().str.contains('phi,|phi-msft,')])
                if show_mistral:
                    dfs_to_concat.append(full_df[full_df['Tags'].str.lower().str.contains('mistral,')])
                if show_llama:
                    dfs_to_concat.append(full_df[full_df['Tags'].str.lower().str.contains('llama,')])
                if show_other:
                    other_df = full_df[~full_df['Tags'].str.lower().str.contains('phi,|phi-msft,|mistral,')]
                    dfs_to_concat.append(other_df)

                # Concatenate the DataFrames
                if dfs_to_concat:
                    df = pd.concat(dfs_to_concat, ignore_index=True)

                # Sort values
                df = df.sort_values(by='Average', ascending=False)

                # Add a search bar
                search_query = st.text_input("Search models", "")

                # Filter the DataFrame based on the search query
                if search_query:
                    df = df[df['Model'].str.contains(search_query, case=False)]

                # Display the filtered DataFrame or the entire leaderboard
                st.dataframe(
                    df[['Model'] + score_columns + ['Likes', 'URL']],
                    use_container_width=True,
                    column_config={
                        "Likes": st.column_config.NumberColumn(
                            "Likes",
                            help="Number of likes on Hugging Face",
                            format="%d ❀️",
                        ),
                        "URL": st.column_config.LinkColumn("URL"),
                    },
                    hide_index=True,
                    height=int(len(df) * 36.2),
                )

                # Comparison between models
                selected_models = st.multiselect('Select models to compare', df['Model'].unique())
                comparison_df = df[df['Model'].isin(selected_models)]
                st.dataframe(
                    comparison_df,
                    use_container_width=True,
                    column_config={
                        "Likes": st.column_config.NumberColumn(
                            "Likes",
                            help="Number of likes on Hugging Face",
                            format="%d ❀️",
                        ),
                        "URL": st.column_config.LinkColumn("URL"),
                    },
                    hide_index=True,
                )
                
                # Add a button to export data to CSV
                if st.button("Export to CSV"):
                    # Export the DataFrame to CSV
                    csv_data = df.to_csv(index=False)

                    # Create a link to download the CSV file
                    st.download_button(
                        label="Download CSV",
                        data=csv_data,
                        file_name="leaderboard.csv",
                        key="download-csv",
                        help="Click to download the CSV file",
                    )

                # Full-width plot for the first category
                create_bar_chart(df, score_columns[0])

                # Next two plots in two columns
                col1, col2 = st.columns(2)
                with col1:
                    create_bar_chart(df, score_columns[1])
                with col2:
                    create_bar_chart(df, score_columns[2])

                # Last two plots in two columns
                col3, col4 = st.columns(2)
                with col3:
                    create_bar_chart(df, score_columns[3])
                with col4:
                    create_bar_chart(df, score_columns[4])


            except Exception as e:
                st.error("An error occurred while processing the markdown table.")
                st.error(str(e))
        else:
            st.error("Failed to download the content from the URL provided.")

     # About tab
    with tab2:
        st.markdown('''
            ### Nous benchmark suite
            
            Popularized by [Teknium](https://huggingface.co/teknium) and [NousResearch](https://huggingface.co/NousResearch), this benchmark suite aggregates four benchmarks:
            
            * [**AGIEval**](https://arxiv.org/abs/2304.06364) (0-shot): `agieval_aqua_rat,agieval_logiqa_en,agieval_lsat_ar,agieval_lsat_lr,agieval_lsat_rc,agieval_sat_en,agieval_sat_en_without_passage,agieval_sat_math`
            * **GPT4ALL** (0-shot): `hellaswag,openbookqa,winogrande,arc_easy,arc_challenge,boolq,piqa`
            * [**TruthfulQA**](https://arxiv.org/abs/2109.07958) (0-shot): `truthfulqa_mc`
            * [**Bigbench**](https://arxiv.org/abs/2206.04615) (0-shot): `bigbench_causal_judgement,bigbench_date_understanding,bigbench_disambiguation_qa,bigbench_geometric_shapes,bigbench_logical_deduction_five_objects,bigbench_logical_deduction_seven_objects,bigbench_logical_deduction_three_objects,bigbench_movie_recommendation,bigbench_navigate,bigbench_reasoning_about_colored_objects,bigbench_ruin_names,bigbench_salient_translation_error_detection,bigbench_snarks,bigbench_sports_understanding,bigbench_temporal_sequences,bigbench_tracking_shuffled_objects_five_objects,bigbench_tracking_shuffled_objects_seven_objects,bigbench_tracking_shuffled_objects_three_objects`
            
            ### Reproducibility
            
            You can easily reproduce these results using 🧐 [LLM AutoEval](https://github.com/mlabonne/llm-autoeval/tree/master), a colab notebook that automates the evaluation process (benchmark: `nous`). This will upload the results to GitHub as gists. You can find the entire table with the links to the detailed results [here](https://gist.github.com/mlabonne/90294929a2dbcb8877f9696f28105fdf).
            
            ### Clone this space
            
            You can create your own leaderboard with your LLM AutoEval results on GitHub Gist. You just need to clone this space and specify two variables:
            
            * Change the `gist_id` in [yall.py](https://huggingface.co/spaces/mlabonne/Yet_Another_LLM_Leaderboard/blob/main/yall.py#L126).
            * Create "New Secret" in Settings > Variables and secrets (name: "github", value: [your GitHub token](https://github.com/settings/tokens))
            
            A special thanks to [gblazex](https://huggingface.co/gblazex) for providing many evaluations and [CultriX](https://huggingface.co/CultriX) for the CSV export and search bar.
        ''')
        
if __name__ == "__main__":
    main()