sherzod-hakimov commited on
Commit
3bf3a79
1 Parent(s): 8073e7d

Upload 8 files

Browse files

updated for trend plot

Files changed (8) hide show
  1. README.md +1 -1
  2. app.py +46 -26
  3. leaderboard_utils.py +137 -0
  4. plot_utils.py +281 -0
  5. requirements.txt +1 -1
  6. text_content.py +68 -0
  7. trend_utils.py +402 -0
  8. version_utils.py +63 -0
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 🏆
4
  colorFrom: yellow
5
  colorTo: green
6
  sdk: gradio
7
- sdk_version: 4.40.0
8
  app_file: app.py
9
  pinned: false
10
  ---
 
4
  colorFrom: yellow
5
  colorTo: green
6
  sdk: gradio
7
+ sdk_version: 5.8.0
8
  app_file: app.py
9
  pinned: false
10
  ---
app.py CHANGED
@@ -8,15 +8,14 @@ from src.assets.text_content import TITLE, INTRODUCTION_TEXT, CLEMSCORE_TEXT, MU
8
  from src.leaderboard_utils import query_search, get_github_data
9
  from src.plot_utils import split_models, plotly_plot, get_plot_df, update_open_models, update_closed_models
10
  from src.plot_utils import reset_show_all, reset_show_names, reset_show_legend, reset_mobile_view
11
- from src.version_utils import get_versions_data
 
12
 
13
  """
14
  CONSTANTS
15
  """
16
  # For restarting the gradio application every 24 Hrs
17
  TIME = 43200 # in seconds # Reload will not work locally - requires HFToken # The app launches locally as expected - only without the reload utility
18
- # For Leaderboard table
19
- dataframe_height = 800 # Height of the table in pixels # Set on average considering all possible devices
20
 
21
 
22
  """
@@ -33,26 +32,27 @@ def restart_space():
33
  GITHUB UTILS
34
  """
35
  github_data = get_github_data()
36
- multimodal_leaderboard = github_data["multimodal"][0] # Get multimodal leaderboard for its available latest version.
37
 
38
- # Show only First 4 columns for the leaderboards
 
39
  multimodal_leaderboard = multimodal_leaderboard.iloc[:, :4]
40
- print(f"Showing the following columns for the multimodal leaderboard: {multimodal_leaderboard.columns}")
41
 
42
 
43
  """
44
  VERSIONS UTILS
45
  """
46
- versions_data = get_versions_data()
47
- latest_version = versions_data['latest'] # Always show latest version in text-only benchmark
48
- last_updated_date = versions_data['date']
49
- version_names = list(versions_data.keys())
50
- version_names = [v for v in version_names if v.startswith("v")] # Remove "latest" and "date" keys
51
 
52
  global version_df
53
- version_df = versions_data[latest_version]
54
  def select_version_df(name):
55
- return versions_data[name]
 
 
56
 
57
  """
58
  MAIN APPLICATION
@@ -64,10 +64,8 @@ with hf_app:
64
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
65
 
66
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
67
-
68
-
69
  """
70
- ####################### SECOND TAB - MULTIMODAL LEADERBOARD #######################
71
  """
72
  with gr.TabItem(MULTIMODAL_NAME, elem_id="mm-llm-benchmark-tab-table", id=1):
73
  with gr.Row():
@@ -81,13 +79,12 @@ with hf_app:
81
  value=multimodal_leaderboard,
82
  elem_id="mm-leaderboard-table",
83
  interactive=False,
84
- visible=True,
85
- height=dataframe_height
86
  )
87
 
88
  # Show information about the clemscore and last updated date below the table
89
  gr.HTML(CLEMSCORE_TEXT)
90
- gr.HTML(f"Last updated - {github_data['date']}")
91
 
92
  # Add a dummy leaderboard to handle search queries in leaderboard_table
93
  # This will show a temporary leaderboard based on the searched value
@@ -107,10 +104,9 @@ with hf_app:
107
  )
108
 
109
  """
110
- ####################### THIRD TAB - PLOTS - %PLAYED V/S QUALITY SCORE #######################
111
  """
112
- with gr.TabItem("📈 Plots", elem_id="plots", id=2):
113
-
114
  """
115
  Accordion Groups to select individual models - Hidden by default
116
  """
@@ -229,7 +225,6 @@ with hf_app:
229
  queue=True
230
  )
231
 
232
-
233
  open_models_selection.change(
234
  reset_show_all,
235
  outputs=[show_all],
@@ -242,11 +237,37 @@ with hf_app:
242
  queue=True
243
  )
244
 
 
 
 
 
 
 
245
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
246
  """
247
  ####################### FOURTH TAB - VERSIONS AND DETAILS #######################
248
  """
249
- with gr.TabItem("🔄 Versions and Details", elem_id="versions-details-tab", id=3):
250
  with gr.Row():
251
  version_select = gr.Dropdown(
252
  version_names, label="Select Version 🕹️", value=latest_version
@@ -262,8 +283,7 @@ with hf_app:
262
  value=version_df,
263
  elem_id="version-leaderboard-table",
264
  interactive=False,
265
- visible=True,
266
- height=dataframe_height
267
  )
268
 
269
  dummy_prev_table = gr.Dataframe(
 
8
  from src.leaderboard_utils import query_search, get_github_data
9
  from src.plot_utils import split_models, plotly_plot, get_plot_df, update_open_models, update_closed_models
10
  from src.plot_utils import reset_show_all, reset_show_names, reset_show_legend, reset_mobile_view
11
+ from src.version_utils import get_version_data
12
+ from src.trend_utils import get_final_trend_plot
13
 
14
  """
15
  CONSTANTS
16
  """
17
  # For restarting the gradio application every 24 Hrs
18
  TIME = 43200 # in seconds # Reload will not work locally - requires HFToken # The app launches locally as expected - only without the reload utility
 
 
19
 
20
 
21
  """
 
32
  GITHUB UTILS
33
  """
34
  github_data = get_github_data()
35
+ multimodal_leaderboard = github_data["multimodal"]["dataframes"][0] # Get the latest version of multimodal leaderboard
36
 
37
+ # Show only First 4 columns for the leaderboard
38
+ # Should be Model Name, Clemscore, %Played, and Quality Score
39
  multimodal_leaderboard = multimodal_leaderboard.iloc[:, :4]
 
40
 
41
 
42
  """
43
  VERSIONS UTILS
44
  """
45
+ versions_data = get_version_data()
46
+ latest_version = versions_data['versions'][0]['name']
47
+ last_updated_date = versions_data['versions'][0]['last_updated'][0]
48
+ version_names = [v['name'] for v in versions_data['versions']]
 
49
 
50
  global version_df
51
+ version_df = versions_data['dataframes'][0]
52
  def select_version_df(name):
53
+ for i, v in enumerate(versions_data['versions']):
54
+ if v['name'] == name:
55
+ return versions_data['dataframes'][i]
56
 
57
  """
58
  MAIN APPLICATION
 
64
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
65
 
66
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
 
 
67
  """
68
+ ####################### FIRST TAB - MULTIMODAL LEADERBOARD #######################
69
  """
70
  with gr.TabItem(MULTIMODAL_NAME, elem_id="mm-llm-benchmark-tab-table", id=1):
71
  with gr.Row():
 
79
  value=multimodal_leaderboard,
80
  elem_id="mm-leaderboard-table",
81
  interactive=False,
82
+ visible=True
 
83
  )
84
 
85
  # Show information about the clemscore and last updated date below the table
86
  gr.HTML(CLEMSCORE_TEXT)
87
+ gr.HTML(f"Last updated - {github_data['multimodal']['version_data'][0]['last_updated'][0]}")
88
 
89
  # Add a dummy leaderboard to handle search queries in leaderboard_table
90
  # This will show a temporary leaderboard based on the searched value
 
104
  )
105
 
106
  """
107
+ ####################### SECOND TAB - PLOTS - %PLAYED V/S QUALITY SCORE #######################
108
  """
109
+ with gr.TabItem("📊 Plots", elem_id="plots", id=2):
 
110
  """
111
  Accordion Groups to select individual models - Hidden by default
112
  """
 
225
  queue=True
226
  )
227
 
 
228
  open_models_selection.change(
229
  reset_show_all,
230
  outputs=[show_all],
 
237
  queue=True
238
  )
239
 
240
+ """
241
+ ####################### THIRD TAB - TRENDS #######################
242
+ """
243
+ with gr.TabItem("📈Trends", elem_id="trends-tab", id=3):
244
+ with gr.Row():
245
+ mkd_text = gr.Markdown("### Commercial v/s Open-Weight models - clemscore over time. The size of the circles represents the scaled value of the parameters of the models. Larger circles indicate higher parameter values.")
246
 
247
+ with gr.Row():
248
+ trend_plot = gr.Plot(get_final_trend_plot(False, 1200), show_label=False)
249
+
250
+ with gr.Row():
251
+ mobile_view = gr.CheckboxGroup(
252
+ choices=["Mobile View"],
253
+ value=[],
254
+ label="View plot on smaller screens 📱",
255
+ elem_id="value-select-8",
256
+ interactive=True,
257
+ )
258
+
259
+ mobile_view.change(
260
+ get_final_trend_plot,
261
+ [mobile_view],
262
+ [trend_plot],
263
+ queue=True
264
+ )
265
+
266
+
267
  """
268
  ####################### FOURTH TAB - VERSIONS AND DETAILS #######################
269
  """
270
+ with gr.TabItem("🔄 Versions and Details", elem_id="versions-details-tab", id=4):
271
  with gr.Row():
272
  version_select = gr.Dropdown(
273
  version_names, label="Select Version 🕹️", value=latest_version
 
283
  value=version_df,
284
  elem_id="version-leaderboard-table",
285
  interactive=False,
286
+ visible=True
 
287
  )
288
 
289
  dummy_prev_table = gr.Dataframe(
leaderboard_utils.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pandas as pd
3
+ import requests
4
+ import json
5
+ from io import StringIO
6
+ from datetime import datetime
7
+
8
+ from src.assets.text_content import REPO, BENCHMARK_FILE
9
+
10
+ def get_github_data():
11
+ """
12
+ Read and process data from CSV files hosted on GitHub. - https://github.com/clembench/clembench-runs (REPO)
13
+ Set the path in src/assets/text_content/REPO
14
+
15
+ Returns:
16
+ github_data (dict): Dictionary containing:
17
+ - "text": List of DataFrames for each version's textual leaderboard data.
18
+ - "multimodal": List of DataFrames for each version's multimodal leaderboard data.
19
+ - "date": Formatted date of the latest version in "DD Month YYYY" format.
20
+ """
21
+ json_url = REPO + BENCHMARK_FILE
22
+ response = requests.get(json_url)
23
+
24
+ # Check if the JSON file request was successful
25
+ if response.status_code != 200:
26
+ print(f"Failed to read JSON file - {BENCHMARK_FILE} in repo {REPO}: Status Code: {response.status_code}")
27
+ return None, None, None, None
28
+
29
+ json_data = response.json()
30
+ versions = json_data['versions']
31
+
32
+ # Sort the versions in benchmark by latest first
33
+ version_names = sorted(
34
+ [ver['version'] for ver in versions],
35
+ key=lambda v: list(map(int, v[1:].split('_')[0].split('.'))),
36
+ reverse=True
37
+ )
38
+
39
+ # Collect Dataframes - Text and Multimodal Only - Ignoring _quantized, _backends, _ascii
40
+ text_data = {
41
+ 'version_data': [],
42
+ 'dataframes': []
43
+ }
44
+ multimodal_data = {
45
+ 'version_data': [],
46
+ 'dataframes': []
47
+ }
48
+
49
+ for version in version_names:
50
+ results_url = f"{REPO}{version}/results.csv"
51
+ csv_response = requests.get(results_url)
52
+ if csv_response.status_code == 200:
53
+ df = pd.read_csv(StringIO(csv_response.text))
54
+ df = process_df(df)
55
+ df = df.sort_values(by=df.columns[1], ascending=False) # Sort by Clemscore
56
+
57
+ version_data = {
58
+ 'name': version,
59
+ 'last_updated': [datetime.strptime(v['last_updated'], '%Y-%m-%d').strftime("%d %b %Y") for v in versions if v['version'] == version],
60
+ 'release_date': [datetime.strptime(v['release_date'], '%Y-%m-%d').strftime("%d %b %Y") for v in versions if v['version'] == version]
61
+ }
62
+
63
+ if 'multimodal' in version:
64
+ multimodal_data['dataframes'].append(df)
65
+ multimodal_data['version_data'].append(version_data)
66
+ else:
67
+ text_data['dataframes'].append(df)
68
+ text_data['version_data'].append(version_data)
69
+
70
+
71
+ github_data = {
72
+ 'text': text_data,
73
+ 'multimodal': multimodal_data
74
+ }
75
+
76
+ return github_data
77
+
78
+
79
+ def process_df(df: pd.DataFrame) -> pd.DataFrame:
80
+ """
81
+ Process dataframe:
82
+ - Convert datatypes to sort by "float" instead of "str"
83
+ - Remove repetition in model names
84
+ - Update column names
85
+
86
+ Args:
87
+ df: Unprocessed Dataframe (after using update_cols)
88
+
89
+ Returns:
90
+ df: Processed Dataframe
91
+ """
92
+
93
+ # Convert column values to float, apart from the model names column
94
+ for col in df.columns[1:]:
95
+ df[col] = pd.to_numeric(df[col], errors='coerce')
96
+
97
+ # Remove repetition in model names
98
+ df[df.columns[0]] = df[df.columns[0]].str.replace('-t0.0', '', regex=True)
99
+ df[df.columns[0]] = df[df.columns[0]].apply(lambda x: '--'.join(set(x.split('--'))))
100
+
101
+ # Update column names
102
+ custom_column_names = ['Model', 'Clemscore', '% Played', 'Quality Score']
103
+ for i, col in enumerate(df.columns[4:]): # Start Capitalizing from the 5th column
104
+ parts = col.split(',')
105
+ custom_name = f"{parts[0].strip().capitalize()} {parts[1].strip()}"
106
+ custom_column_names.append(custom_name)
107
+
108
+ # Rename columns
109
+ df.columns = custom_column_names
110
+
111
+ return df
112
+
113
+
114
+ def query_search(df: pd.DataFrame, query: str) -> pd.DataFrame:
115
+ """
116
+ Filter the dataframe based on the search query.
117
+
118
+ Args:
119
+ df (pd.DataFrame): Unfiltered dataframe.
120
+ query (str): A string of queries separated by ";".
121
+ Returns:
122
+ pd.DataFrame: Filtered dataframe containing searched queries in the 'Model' column.
123
+ """
124
+ if not query.strip(): # Reset Dataframe if empty query is passed
125
+ return df
126
+
127
+ queries = [q.strip().lower() for q in query.split(';') if q.strip()] # Normalize and split queries
128
+
129
+ # Filter dataframe based on queries in 'Model' column
130
+ filtered_df = df[df['Model'].str.lower().str.contains('|'.join(queries))]
131
+
132
+ return filtered_df
133
+
134
+ if __name__=='__main__':
135
+ data = get_github_data()
136
+ print(data['text']['version_data'])
137
+ print(data['multimodal']['version_data'])
plot_utils.py ADDED
@@ -0,0 +1,281 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import plotly.express as px
3
+ import requests
4
+ import json
5
+ import gradio as gr
6
+
7
+ from src.assets.text_content import SHORT_NAMES, TEXT_NAME, MULTIMODAL_NAME, REGISTRY_URL
8
+ from src.leaderboard_utils import get_github_data
9
+
10
+
11
+ def plotly_plot(df: pd.DataFrame, list_op: list, list_co: list,
12
+ show_all: list, show_names: list, show_legend: list,
13
+ mobile_view: list):
14
+ """
15
+ Takes in a list of models for a plotly plot
16
+ Args:
17
+ df: A dummy dataframe of latest version
18
+ list_op: The list of open source models to show in the plot, updated from frontend
19
+ list_co: The list of commercial models to show in the plot, updated from frontend
20
+ show_all: Either [] or ["Show All Models"] - toggle view to plot all models
21
+ show_names: Either [] or ["Show Names"] - toggle view to show model names on plot
22
+ show_legend: Either [] or ["Show Legend"] - toggle view to show legend on plot
23
+ mobile_view: Either [] or ["Mobile View"] - toggle view to for smaller screens
24
+ Returns:
25
+ Fig: plotly figure of % played v/s quality score
26
+ """
27
+
28
+ LIST = list_op + list_co
29
+ # Get list of all models and append short names column to df
30
+ list_columns = list(df.columns)
31
+ ALL_LIST = list(df[list_columns[0]].unique())
32
+ short_names = label_map(ALL_LIST)
33
+ list_short_names = list(short_names.values())
34
+ df["Short"] = list_short_names
35
+
36
+ if show_all:
37
+ LIST = ALL_LIST
38
+ # Filter dataframe based on the provided list of models
39
+ df = df[df[list_columns[0]].isin(LIST)]
40
+
41
+ if show_names:
42
+ fig = px.scatter(df, x=list_columns[2], y=list_columns[3], color=list_columns[0], symbol=list_columns[0],
43
+ color_discrete_map={"category1": "blue", "category2": "red"},
44
+ hover_name=list_columns[0], template="plotly_white", text="Short")
45
+ fig.update_traces(textposition='top center')
46
+ else:
47
+ fig = px.scatter(df, x=list_columns[2], y=list_columns[3], color=list_columns[0], symbol=list_columns[0],
48
+ color_discrete_map={"category1": "blue", "category2": "red"},
49
+ hover_name=list_columns[0], template="plotly_white")
50
+
51
+ if not show_legend:
52
+ fig.update_layout(showlegend=False)
53
+
54
+ fig.update_layout(
55
+ xaxis_title='% Played',
56
+ yaxis_title='Quality Score',
57
+ title='Overview of benchmark results',
58
+ height=1000
59
+ )
60
+
61
+ fig.update_xaxes(range=[-5, 105])
62
+ fig.update_yaxes(range=[-5, 105])
63
+
64
+ if mobile_view:
65
+ fig.update_layout(height=300)
66
+
67
+ if mobile_view and show_legend:
68
+ fig.update_layout(height=450)
69
+ fig.update_layout(legend=dict(
70
+ yanchor="bottom",
71
+ y=-5.52,
72
+ xanchor="left",
73
+ x=0.01
74
+ ))
75
+
76
+ fig.update_layout(
77
+ xaxis_title="",
78
+ yaxis_title="",
79
+ title="% Played v/s Quality Score"
80
+ )
81
+
82
+ return fig
83
+
84
+
85
+ def shorten_model_name(full_name):
86
+ # Split the name into parts
87
+ parts = full_name.split('-')
88
+
89
+ # Process the name parts to keep only the parts with digits (model sizes and versions)
90
+ short_name_parts = [part for part in parts if any(char.isdigit() for char in part)]
91
+
92
+ if len(parts) == 1:
93
+ short_name = ''.join(full_name[0:min(3, len(full_name))])
94
+ else:
95
+ # Join the parts to form the short name
96
+ short_name = '-'.join(short_name_parts)
97
+
98
+ # Remove any leading or trailing hyphens
99
+ short_name = full_name[0] + '-' + short_name.strip('-')
100
+
101
+ return short_name
102
+
103
+
104
+ def label_map(model_list: list) -> dict:
105
+ """
106
+ Generate a map from long names to short names, to plot them in frontend graph
107
+ Define the short names in src/assets/text_content.py
108
+ Args:
109
+ model_list: A list of long model names
110
+ Returns:
111
+ short_name: A dict from long to short name
112
+ """
113
+ short_names = {}
114
+ for model_name in model_list:
115
+ if model_name in SHORT_NAMES:
116
+ short_name = SHORT_NAMES[model_name]
117
+ else:
118
+ short_name = shorten_model_name(model_name)
119
+
120
+ # Define the short name and indicate both models are same
121
+ short_names[model_name] = short_name
122
+
123
+ return short_names
124
+
125
+
126
+ def split_models(model_list: list):
127
+ """
128
+ Split the models into open source and commercial
129
+ """
130
+ open_models = []
131
+ commercial_models = []
132
+
133
+ # Load model registry data from main repo
134
+ response = requests.get(REGISTRY_URL)
135
+
136
+ if response.status_code == 200:
137
+ json_data = json.loads(response.text)
138
+
139
+ for model_name in model_list:
140
+ for entry in json_data:
141
+ if entry["model_name"] == model_name:
142
+ open_model = entry["open_weight"]
143
+
144
+ if open_model:
145
+ open_models.append(model_name)
146
+ else:
147
+ commercial_models.append(model_name)
148
+ break
149
+
150
+ else:
151
+ print(f"Failed to read JSON file: Status Code : {response.status_code}")
152
+
153
+ open_models.sort(key=lambda o: o.upper())
154
+ commercial_models.sort(key=lambda c: c.upper())
155
+
156
+ # Add missing model from the model_registry
157
+ if "dolphin-2.5-mixtral-8x7b" in model_list:
158
+ open_models.append("dolphin-2.5-mixtral-8x7b")
159
+
160
+ return open_models, commercial_models
161
+
162
+ """
163
+ Update Functions, for when the leaderboard selection changes
164
+ """
165
+ def update_open_models():
166
+ """
167
+ Change the checkbox group of Open Models based on the leaderboard selected
168
+
169
+ Args:
170
+ leaderboard: Selected leaderboard from the frontend [Default - Text Leaderboard]
171
+ Return:
172
+ Updated checkbox group for Open Models, based on the leaderboard selected
173
+ """
174
+ github_data = get_github_data()
175
+ leaderboard_data = github_data["multimodal"]['dataframes'][0]
176
+ models = leaderboard_data.iloc[:, 0].unique().tolist()
177
+ open_models, _ = split_models(models)
178
+ return gr.CheckboxGroup(
179
+ open_models,
180
+ value=[],
181
+ elem_id="value-select-1",
182
+ interactive=True,
183
+ )
184
+
185
+ def update_closed_models():
186
+ """
187
+ Change the checkbox group of Closed Models based on the leaderboard selected
188
+
189
+ Args:
190
+ leaderboard: Selected leaderboard from the frontend [Default - Text Leaderboard]
191
+ Return:
192
+ Updated checkbox group for Closed Models, based on the leaderboard selected
193
+ """
194
+ github_data = get_github_data()
195
+ leaderboard_data = github_data["multimodal"]['dataframes'][0]
196
+ models = leaderboard_data.iloc[:, 0].unique().tolist()
197
+ _, commercial_models = split_models(models)
198
+ return gr.CheckboxGroup(
199
+ commercial_models,
200
+ value=[],
201
+ elem_id="value-select-2",
202
+ interactive=True,
203
+ )
204
+
205
+ def get_plot_df() -> pd.DataFrame:
206
+ """
207
+ Get the DataFrame for plotting based on the selected leaderboard.
208
+ Args:
209
+ leaderboard: Selected leaderboard.
210
+ Returns:
211
+ DataFrame with model data.
212
+ """
213
+ github_data = get_github_data()
214
+ return github_data["multimodal"]['dataframes'][0]
215
+
216
+
217
+ """
218
+ Reset Functions for when the Leaderboard selection changes
219
+ """
220
+ def reset_show_all():
221
+ return gr.CheckboxGroup(
222
+ ["Select All Models"],
223
+ label="Show plot for all models 🤖",
224
+ value=[],
225
+ elem_id="value-select-3",
226
+ interactive=True,
227
+ )
228
+
229
+ def reset_show_names():
230
+ return gr.CheckboxGroup(
231
+ ["Show Names"],
232
+ label="Show names of models on the plot 🏷️",
233
+ value=[],
234
+ elem_id="value-select-4",
235
+ interactive=True,
236
+ )
237
+
238
+
239
+ def reset_show_legend():
240
+ return gr.CheckboxGroup(
241
+ ["Show Legend"],
242
+ label="Show legend on the plot 💡",
243
+ value=[],
244
+ elem_id="value-select-5",
245
+ interactive=True,
246
+ )
247
+
248
+
249
+ def reset_mobile_view():
250
+ return gr.CheckboxGroup(
251
+ ["Mobile View"],
252
+ label="View plot on smaller screens 📱",
253
+ value=[],
254
+ elem_id="value-select-6",
255
+ interactive=True,
256
+ )
257
+
258
+
259
+ if __name__ == '__main__':
260
+ mm_model_list = ['gpt-4o-2024-05-13', 'gpt-4-1106-vision-preview', 'claude-3-opus-20240229', 'gemini-1.5-pro-latest',
261
+ 'gemini-1.5-flash-latest', 'llava-v1.6-34b-hf', 'llava-v1.6-vicuna-13b-hf', 'idefics-80b-instruct',
262
+ 'llava-1.5-13b-hf', 'idefics-9b-instruct']
263
+
264
+ text_model_list = ['vicuna-33b-v1.3', 'gpt-4-0125-preview', 'gpt-4-turbo-2024-04-09', 'claude-3-5-sonnet-20240620', 'gpt-4-1106-preview',
265
+ 'gpt-4-0613', 'gpt-4o-2024-05-13', 'claude-3-opus-20240229', 'gemini-1.5-pro-latest',
266
+ 'Meta-Llama-3-70B-Instruct-hf', 'claude-2.1', 'gemini-1.5-flash-latest', 'claude-3-sonnet-20240229',
267
+ 'Qwen1.5-72B-Chat', 'mistral-large-2402', 'gpt-3.5-turbo-0125', 'gemini-1.0-pro', 'command-r-plus', 'openchat_3.5',
268
+ 'claude-3-haiku-20240307', 'sheep-duck-llama-2-70b-v1.1', 'Meta-Llama-3-8B-Instruct-hf', 'openchat-3.5-1210',
269
+ 'WizardLM-70b-v1.0', 'openchat-3.5-0106', 'Qwen1.5-14B-Chat', 'mistral-medium-2312', 'Qwen1.5-32B-Chat',
270
+ 'codegemma-7b-it', 'dolphin-2.5-mixtral-8x7b', 'CodeLlama-34b-Instruct-hf', 'command-r', 'gemma-1.1-7b-it',
271
+ 'SUS-Chat-34B', 'Mixtral-8x22B-Instruct-v0.1', 'tulu-2-dpo-70b', 'Nous-Hermes-2-Mixtral-8x7B-SFT',
272
+ 'WizardLM-13b-v1.2', 'Mistral-7B-Instruct-v0.2', 'Yi-34B-Chat', 'Mixtral-8x7B-Instruct-v0.1',
273
+ 'Mistral-7B-Instruct-v0.1', 'Yi-1.5-34B-Chat', 'vicuna-13b-v1.5', 'Yi-1.5-6B-Chat', 'Starling-LM-7B-beta',
274
+ 'sheep-duck-llama-2-13b', 'Yi-1.5-9B-Chat', 'gemma-1.1-2b-it', 'Qwen1.5-7B-Chat', 'gemma-7b-it',
275
+ 'llama-2-70b-chat-hf', 'Qwen1.5-0.5B-Chat', 'Qwen1.5-1.8B-Chat']
276
+
277
+ om, cm = split_models(mm_model_list)
278
+ print("Open")
279
+ print(om)
280
+ print("Closed")
281
+ print(cm)
requirements.txt CHANGED
@@ -1,4 +1,4 @@
1
- gradio==4.40.0
2
  pandas==2.2.2
3
  plotly==5.18.0
4
  apscheduler==3.10.4
 
1
+ gradio==5.8.0
2
  pandas==2.2.2
3
  plotly==5.18.0
4
  apscheduler==3.10.4
text_content.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ TITLE = """<h1 align="center" id="space-title"> 🏆 Multimodal CLEM Leaderboard</h1>"""
2
+
3
+ REPO = "https://raw.githubusercontent.com/clembench/clembench-runs/main/"
4
+ HF_REPO = "colab-potsdam/multimodal-clem-leaderboard"
5
+ REGISTRY_URL = "https://raw.githubusercontent.com/clp-research/clembench/refs/heads/main/backends/model_registry.json"
6
+ BENCHMARK_FILE = "benchmark_runs.json"
7
+
8
+ TEXT_NAME = "🥇 CLEM Leaderboard"
9
+ MULTIMODAL_NAME = "🥇 Multimodal CLEM Leaderboard"
10
+
11
+ INTRODUCTION_TEXT = """
12
+ <h6 align="center">
13
+
14
+ The CLEM Leaderboard aims to track, rank and evaluate current cLLMs (chat-optimized Large Language Models) with the suggested pronounciation “clems”.
15
+
16
+ The multimodal benchmark is described in [Using Game Play to Investigate Multimodal and Conversational Grounding in Large Multimodal Models](https://arxiv.org/abs/2406.14035)
17
+
18
+ The original benchmarking approach for text-only models is described in [Clembench: Using Game Play to Evaluate Chat-Optimized Language Models as Conversational Agents](https://aclanthology.org/2023.emnlp-main.689.pdf).
19
+
20
+ Source code for benchmarking "clems" is available here: [Clembench](https://github.com/clembench/clembench)
21
+
22
+ All generated files and results from the benchmark runs are available here: [clembench-runs](https://github.com/clembench/clembench-runs) </h6>
23
+ """
24
+
25
+ CLEMSCORE_TEXT = """
26
+ The <i>clemscore</i> combines a score representing the overall ability to just follow the game instructions (separately scored in field <i>Played</i>) and the quality of the play in attempt where instructions were followed (field <i>Quality Scores</i>). For details about the games / interaction settings, and for results on older versions of the benchmark, see the tab <i>Versions and Details</i>.
27
+ """
28
+
29
+ SHORT_NAMES = {
30
+ "t0.0": "",
31
+ "claude-v1.3": "cl-1.3",
32
+ "claude-2": "cl-2",
33
+ "claude-2.1": "cl-2.1",
34
+ "claude-instant-1.2": "cl-ins-1.2",
35
+ "gpt-3.5-turbo-0613": "3.5-0613",
36
+ "gpt-3.5-turbo-1106": "3.5-1106",
37
+ "gpt-4-0613": "4-0613",
38
+ "gpt-4-1106-preview": "4-1106",
39
+ "gpt-4-0314": "4-0314",
40
+ "gpt-4": "4",
41
+ "text-davinci-003": "3",
42
+ "luminous-supreme": "lm",
43
+ "koala-13b": "k-13b",
44
+ "falcon-40b": "fal-40b",
45
+ "falcon-7b-instruct": "fal-7b",
46
+ "falcon-40b-instruct": "flc-i-40b",
47
+ "oasst-12b": "oas-12b",
48
+ "oasst-sft-4-pythia-12b-epoch-3.5": "ost-12b",
49
+ "vicuna-13b": "vic-13b",
50
+ "vicuna-33b-v1.3": "vic-33b-v1.3",
51
+ "sheep-duck-llama-2-70b-v1.1": "sd-l2-70b-v1.1",
52
+ "sheep-duck-llama-2-13b": "sd-l2-13b",
53
+ "WizardLM-70b-v1.0": "w-70b-v1.0",
54
+ "CodeLlama-34b-Instruct-hf": "cl-34b",
55
+ "command": "com",
56
+ "Mistral-7B-Instruct-v0.1": "m-i-7b-v0.1",
57
+ "Wizard-Vicuna-13B-Uncensored-HF": "vcn-13b",
58
+ "llama-2-13b-chat-hf": "l2-13b",
59
+ "llama-2-70b-chat-hf": "l2-70b",
60
+ "llama-2-7b-chat-hf": "l2-7b",
61
+ "koala-13B-HF": "k-13b",
62
+ "WizardLM-13b-v1.2": "w-13b-v1.2",
63
+ "vicuna-7b-v1.5": "vic-7b-v1.5",
64
+ "vicuna-13b-v1.5": "vic-13b-v1.5",
65
+ "gpt4all-13b-snoozy": "g4a-13b-s",
66
+ "zephyr-7b-alpha": "z-7b-a",
67
+ "zephyr-7b-beta": "z-7b-b"
68
+ }
trend_utils.py ADDED
@@ -0,0 +1,402 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Fetch Model Registry and clemscores
2
+ import requests
3
+ import pandas as pd
4
+ from datetime import datetime
5
+ import pandas as pd
6
+ import plotly.express as px
7
+ import plotly.graph_objects as go
8
+ import numpy as np
9
+
10
+ from src.assets.text_content import REGISTRY_URL, REPO, BENCHMARK_FILE
11
+ from src.leaderboard_utils import get_github_data
12
+
13
+ # Cut-off date from where to start the trendgraph
14
+ START_DATE = '2023-06-01'
15
+
16
+ def get_param_size(params: str) -> float:
17
+ """Convert parameter size from string to float.
18
+
19
+ Args:
20
+ params (str): The parameter size as a string (e.g., '1000B', '1T').
21
+
22
+ Returns:
23
+ float: The size of parameters in float.
24
+ """
25
+ if not params:
26
+ param_size = 0
27
+ else:
28
+ if params[-1] == "B":
29
+ param_size = params[:-1]
30
+ param_size = float(param_size)
31
+ elif params[-1] == "T":
32
+ param_size = params[:-1]
33
+ param_size = float(param_size)
34
+ param_size *= 1000
35
+ else:
36
+ print("Not a valid parameter size")
37
+
38
+ return param_size
39
+
40
+ def date_difference(date_str1: str, date_str2: str) -> int:
41
+ """Calculate the difference in days between two dates.
42
+
43
+ Args:
44
+ date_str1 (str): The first date as a string in 'YYYY-MM-DD' format.
45
+ date_str2 (str): The second date as a string in 'YYYY-MM-DD' format.
46
+
47
+ Returns:
48
+ int: The difference in days between the two dates.
49
+ """
50
+ date_format = "%Y-%m-%d"
51
+ date1 = datetime.strptime(date_str1, date_format)
52
+ date2 = datetime.strptime(date_str2, date_format)
53
+ return (date1 - date2).days
54
+
55
+
56
+ def populate_list(df: pd.DataFrame, abs_diff: float) -> list:
57
+ """Create a list of models based on clemscore differences.
58
+
59
+ Args:
60
+ df (pd.DataFrame): DataFrame containing model data.
61
+ abs_diff (float): The absolute difference threshold for clemscore.
62
+
63
+ Returns:
64
+ list: A list of model names that meet the criteria.
65
+ """
66
+ l = [df.iloc[0]['model']]
67
+ prev_clemscore = df.iloc[0]['clemscore']
68
+ prev_date = df.iloc[0]['release_date']
69
+
70
+ for i in range(1, len(df)):
71
+ curr_clemscore = df.iloc[i]['clemscore']
72
+ curr_date = df.iloc[i]['release_date']
73
+ date_diff = date_difference(curr_date, prev_date)
74
+
75
+ if curr_clemscore - prev_clemscore >= abs_diff:
76
+ if date_diff == 0:
77
+ l[-1] = df.iloc[i]['model']
78
+ else:
79
+ l.append(df.iloc[i]['model'])
80
+
81
+ prev_clemscore = curr_clemscore
82
+ prev_date = curr_date
83
+
84
+ # # Add the last model if the difference between the last and previous date is greater than 15 days
85
+ # last_date = df.iloc[-1]['release_date']
86
+ # if date_difference(last_date, prev_date) > 15:
87
+ # l.append(df.iloc[-1]['model'])
88
+
89
+ return l
90
+
91
+
92
+ def get_models_to_display(result_df: pd.DataFrame, open_dip: float = 0, comm_dip: float = 0) -> tuple:
93
+ """Retrieve models to display based on clemscore differences.
94
+
95
+ Args:
96
+ result_df (pd.DataFrame): DataFrame containing model data.
97
+ open_dip (float, optional): Threshold for open models. Defaults to 0.
98
+ comm_dip (float, optional): Threshold for commercial models. Defaults to 0.
99
+
100
+ Returns:
101
+ tuple: Two lists of model names (open and commercial).
102
+ """
103
+ open_model_df = result_df[result_df['open_weight']==True]
104
+ comm_model_df = result_df[result_df['open_weight']==False]
105
+
106
+ open_model_df = open_model_df.sort_values(by='release_date', ascending=True)
107
+ comm_model_df = comm_model_df.sort_values(by='release_date', ascending=True)
108
+ open_models = populate_list(open_model_df, open_dip)
109
+ comm_models = populate_list(comm_model_df, comm_dip)
110
+ return open_models, comm_models
111
+
112
+
113
+ def get_trend_data(text_dfs: list, model_registry_data: list) -> pd.DataFrame:
114
+ """Process text data frames to extract model information.
115
+
116
+ Args:
117
+ text_dfs (list): List of DataFrames containing model information.
118
+ model_registry_data (list): List of dictionaries containing model registry data.
119
+
120
+ Returns:
121
+ pd.DataFrame: DataFrame containing processed model data.
122
+ """
123
+ visited = set() # Track models that have been processed
124
+ result_df = pd.DataFrame(columns=['model', 'clemscore', 'open_weight', 'release_date', 'parameters', 'est_flag'])
125
+
126
+ for df in text_dfs:
127
+ for i in range(len(df)):
128
+ model_name = df['Model'].iloc[i]
129
+ if model_name not in visited:
130
+ visited.add(model_name)
131
+ for dict_obj in model_registry_data:
132
+ if dict_obj["model_name"] == model_name:
133
+ if dict_obj["parameters"] == "" :
134
+ params = "1000B"
135
+ est_flag = True
136
+ else:
137
+ params = dict_obj['parameters']
138
+ est_flag = False
139
+
140
+ param_size = get_param_size(params)
141
+ new_data = {'model': model_name, 'clemscore': df['Clemscore'].iloc[i], 'open_weight':dict_obj['open_weight'],
142
+ 'release_date': dict_obj['release_date'], 'parameters': param_size, 'est_flag': est_flag}
143
+ result_df.loc[len(result_df)] = new_data
144
+ break
145
+ return result_df # Return the compiled DataFrame
146
+
147
+
148
+ def get_plot(df: pd.DataFrame, start_date: str = '2023-06-01', end_date: str = '2024-12-30',
149
+ benchmark_ticks: dict = {}, benchmark_update = {}, **plot_kwargs) -> go.Figure:
150
+ """Generate a scatter plot for the given DataFrame.
151
+
152
+ Args:
153
+ df (pd.DataFrame): DataFrame containing model data.
154
+ start_date (str, optional): Start date for filtering. Defaults to '2023-06-01'.
155
+ end_date (str, optional): End date for filtering. Defaults to '2024-12-30'.
156
+ benchmark_ticks (dict, optional): Custom benchmark ticks for the version dates. Defaults to {}.
157
+ benchmark_update (dict, optional): Custom benchmark metadata containing last_updated date for the versions. Defaults to {}.
158
+
159
+ Keyword Args:
160
+ open_dip (float, optional): Threshold for open models' clemscore differences. Max dip in clemscore allowed to be considered in trend.
161
+ comm_dip (float, optional): Threshold for commercial models' clemscore differences. Max dip in clemscore allowed to be considered in trend.
162
+ height (int, optional): Height of the plot in pixels. Adjusted for mobile or desktop views.
163
+ mobile_view (bool, optional): Flag to indicate if the plot should be optimized for mobile display. Defaults to False.
164
+
165
+ Returns:
166
+ go.Figure: The generated plot.
167
+ """
168
+
169
+ open_dip = plot_kwargs['open_dip']
170
+ comm_dip = plot_kwargs['comm_dip']
171
+ height = plot_kwargs['height']
172
+ width = plot_kwargs['width']
173
+
174
+ mobile_view = True if plot_kwargs['mobile_view'] else False
175
+
176
+ max_clemscore = df['clemscore'].max()
177
+ # Convert 'release_date' to datetime
178
+ df['Release date'] = pd.to_datetime(df['release_date'], format='ISO8601')
179
+ # Filter out data before April 2023/START_DATE
180
+ df = df[df['Release date'] >= pd.to_datetime(start_date)]
181
+ open_model_list, comm_model_list = get_models_to_display(df, open_dip, comm_dip)
182
+ models_to_display = open_model_list + comm_model_list
183
+ print(f"open_model_list: {open_model_list}, comm_model_list: {comm_model_list}")
184
+
185
+ # Create a column to indicate if the model should be labeled
186
+ df['label_model'] = df['model'].apply(lambda x: x if x in models_to_display else "")
187
+
188
+ # If mobile_view, then show only the models in models_to_display i.e. on the trend line #minimalistic
189
+ if mobile_view:
190
+ df = df[df['model'].isin(models_to_display)]
191
+
192
+ # Add an identifier column to each DataFrame
193
+ df['Model Type'] = df['open_weight'].map({True: 'Open-Weight', False: 'Commercial'})
194
+
195
+ marker_size = df['parameters'].apply(lambda x: np.sqrt(x) if x > 0 else np.sqrt(400)).astype(float) # Arbitrary sqrt value to scale marker size based on parameter size
196
+
197
+ open_color = 'red'
198
+ comm_color = 'blue'
199
+
200
+ # Create the scatter plot
201
+ fig = px.scatter(df,
202
+ x="Release date",
203
+ y="clemscore",
204
+ color="Model Type", # Differentiates the datasets by color
205
+ hover_name="model",
206
+ size=marker_size,
207
+ size_max=40, # Max size of the circles
208
+ template="plotly_white",
209
+ hover_data={ # Customize hover information
210
+ "Release date": True, # Show the release date
211
+ "clemscore": True, # Show the clemscore
212
+ "Model Type": True # Show the model type
213
+ },
214
+ custom_data=["model", "Release date", "clemscore"] # Specify custom data columns for hover
215
+ )
216
+
217
+ fig.update_traces(
218
+ hovertemplate='Model Name: %{customdata[0]}<br>Release date: %{customdata[1]}<br>Clemscore: %{customdata[2]}<br>'
219
+ )
220
+
221
+ # Sort dataframes for line plotting
222
+ df_open = df[df['model'].isin(open_model_list)].sort_values(by='Release date')
223
+ df_commercial = df[df['model'].isin(comm_model_list)].sort_values(by='Release date')
224
+
225
+ ## Custom tics for x axis
226
+ # Define the start and end dates
227
+ start_date = pd.to_datetime(start_date)
228
+ end_date = pd.to_datetime(end_date)
229
+ # Generate ticks every two months
230
+ date_range = pd.date_range(start=start_date, end=end_date, freq='2MS') # '2MS' stands for 2 Months Start frequency
231
+ # Create labels for these ticks
232
+ custom_ticks = {date: date.strftime('%b %Y') for date in date_range}
233
+
234
+ ## Benchmark Version ticks
235
+ benchmark_tickvals = list(pd.to_datetime(list(benchmark_ticks.keys())))
236
+ custom_ticks = {k:v for k,v in custom_ticks.items() if k not in benchmark_tickvals}
237
+ custom_tickvals = list(custom_ticks.keys())
238
+
239
+
240
+ for date, version in benchmark_ticks.items():
241
+ # Find the corresponding update date from benchmark_update based on the version name
242
+ update_date = next((update_date for update_date, ver in benchmark_update.items() if version in ver), None)
243
+
244
+ if update_date:
245
+ # Add vertical black dotted line for each benchmark_tick date
246
+ fig.add_shape(
247
+ go.layout.Shape(
248
+ type='line',
249
+ x0=date,
250
+ x1=date,
251
+ y0=0,
252
+ y1=1,
253
+ yref='paper',
254
+ line=dict(color='#A9A9A9', dash='dash'), # Black dotted line
255
+ )
256
+ )
257
+
258
+ # Add hover information across the full y-axis range
259
+ fig.add_trace(
260
+ go.Scatter(
261
+ x=[date]*100,
262
+ y=list(range(0,100)), # Covers full y-axis range
263
+ mode='markers',
264
+ line=dict(color='rgba(255,255,255,0)', width=0), # Fully transparent line
265
+ hovertext=[
266
+ f"Version: {version} released on {date.strftime('%d %b %Y')}, last updated on: {update_date.strftime('%d %b %Y')}"
267
+ for _ in range(100)
268
+ ], # Unique hovertext for all points
269
+ hoverinfo="text",
270
+ hoveron='points',
271
+ showlegend=False
272
+ )
273
+ )
274
+
275
+
276
+ if mobile_view:
277
+ # Remove custom_tickvals within -1 month to +1 month of benchmark_tickvals for better visibility
278
+ one_month = pd.DateOffset(months=1)
279
+ filtered_custom_tickvals = [
280
+ date for date in custom_tickvals
281
+ if not any((benchmark_date - one_month <= date <= benchmark_date + one_month) for benchmark_date in benchmark_tickvals)
282
+ ]
283
+ # Alternate <br> for benchmark ticks based on date difference (Eg. v1.6, v1.6.5 too close to each other for MM benchmark)
284
+ benchmark_tick_texts = []
285
+ for i in range(len(benchmark_tickvals)):
286
+ if i == 0:
287
+ benchmark_tick_texts.append(f"<br><br><b>{benchmark_ticks[benchmark_tickvals[i]]}</b>")
288
+ else:
289
+ date_diff = (benchmark_tickvals[i] - benchmark_tickvals[i - 1]).days
290
+ if date_diff <= 75:
291
+ benchmark_tick_texts.append(f"<br><br><br><b>{benchmark_ticks[benchmark_tickvals[i]]}</b>")
292
+ else:
293
+ benchmark_tick_texts.append(f"<br><br><b>{benchmark_ticks[benchmark_tickvals[i]]}</b>")
294
+ fig.update_xaxes(
295
+ tickvals=filtered_custom_tickvals + benchmark_tickvals, # Use filtered_custom_tickvals
296
+ ticktext=[f"{date.strftime('%b')}<br>{date.strftime('%y')}" for date in filtered_custom_tickvals] +
297
+ benchmark_tick_texts, # Use the new benchmark tick texts
298
+ tickangle=0,
299
+ tickfont=dict(size=10)
300
+ )
301
+ fig.update_yaxes(range=[0, 110]) # Set y-axis range to 110 for better visibility of legend and avoiding overlap with interactivity block of plotly on top-right
302
+ display_mode = 'lines+markers'
303
+ else:
304
+ fig.update_xaxes(
305
+ tickvals=custom_tickvals + benchmark_tickvals, # Use filtered_custom_tickvals
306
+ ticktext=[f"{date.strftime('%b')} {date.strftime('%Y')}" for date in custom_tickvals] +
307
+ [f"<br><span style='font-size:12px;'><b>{benchmark_ticks[date]}</b></span>" for date in benchmark_tickvals], # Added <br> for vertical alignment
308
+ tickangle=0,
309
+ tickfont=dict(size=10)
310
+ )
311
+ fig.update_yaxes(range=[0, max_clemscore+10])
312
+ display_mode = 'lines+markers+text'
313
+
314
+
315
+ # Add lines connecting the points for open models
316
+ fig.add_trace(go.Scatter(x=df_open['Release date'], y=df_open['clemscore'],
317
+ mode=display_mode, # Include 'text' in the mode
318
+ name='Open Models Trendline',
319
+ text=df_open['label_model'], # Use label_model for text labels
320
+ textposition='top center', # Position of the text labels
321
+ line=dict(color=open_color), showlegend=False))
322
+
323
+ # Add lines connecting the points for commercial models
324
+ fig.add_trace(go.Scatter(x=df_commercial['Release date'], y=df_commercial['clemscore'],
325
+ mode=display_mode, # Include 'text' in the mode
326
+ name='Commercial Models Trendline',
327
+ text=df_commercial['label_model'], # Use label_model for text labels
328
+ textposition='top center', # Position of the text labels
329
+ line=dict(color=comm_color), showlegend=False))
330
+
331
+
332
+ # Update layout to ensure text labels are visible
333
+ fig.update_traces(textposition='top center')
334
+
335
+ # Update the Legend Position and plot dimensions
336
+ fig.update_layout(height=height,
337
+ legend=dict(
338
+ yanchor="top",
339
+ y=0.99,
340
+ xanchor="left",
341
+ x=0.01
342
+ )
343
+ )
344
+
345
+ if width:
346
+ print("Custom Setting Width :")
347
+ fig.update_layout(width=width)
348
+
349
+ return fig
350
+
351
+ def get_final_trend_plot(mobile_view: bool = False, custom_width: int = 0) -> go.Figure:
352
+ """Fetch and generate the final trend plot for all models.
353
+
354
+ Args:
355
+ custom_width: The custom width to use for loading the graph first time.
356
+ mobile_view (bool, optional): Flag to indicate mobile view. Defaults to False.
357
+
358
+ Returns:
359
+ go.Figure: The generated trend plot for selected benchmark.
360
+ """
361
+ # Fetch Model Registry
362
+ response = requests.get(REGISTRY_URL)
363
+ model_registry_data = response.json()
364
+ # Custom tick labels
365
+ json_url = REPO + BENCHMARK_FILE
366
+ response = requests.get(json_url)
367
+
368
+ # Check if the JSON file request was successful
369
+ if response.status_code != 200:
370
+ print(f"Failed to read JSON file: Status Code: {response.status_code}")
371
+
372
+ json_data = response.json()
373
+ versions = json_data['versions']
374
+
375
+ if mobile_view:
376
+ height = 450
377
+ width = 375
378
+ else:
379
+ height = 1000
380
+ width = None
381
+
382
+ if custom_width:
383
+ width = custom_width
384
+
385
+ plot_kwargs = {'height': height, 'width': width, 'open_dip': 0, 'comm_dip': 0,
386
+ 'mobile_view': mobile_view}
387
+
388
+ benchmark_ticks = {}
389
+ benchmark_update = {}
390
+ mm_dfs = get_github_data()['multimodal']['dataframes']
391
+ result_df = get_trend_data(mm_dfs, model_registry_data)
392
+ df = result_df
393
+ for ver in versions:
394
+ if 'multimodal' in ver['version']:
395
+ temp_ver = ver['version']
396
+ temp_ver = temp_ver.replace('_multimodal', '')
397
+ benchmark_ticks[pd.to_datetime(ver['release_date'])] = temp_ver ## MM benchmark dates considered after v1.6 (incl.)
398
+ benchmark_update[pd.to_datetime(ver['last_updated'])] = temp_ver
399
+
400
+ fig = get_plot(df, start_date=START_DATE, end_date=datetime.now().strftime('%Y-%m-%d'), benchmark_ticks=benchmark_ticks, benchmark_update=benchmark_update, **plot_kwargs)
401
+
402
+ return fig
version_utils.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ from datetime import datetime
3
+ import pandas as pd
4
+ import json
5
+ from io import StringIO
6
+
7
+ from src.leaderboard_utils import process_df
8
+ from src.assets.text_content import REPO, BENCHMARK_FILE
9
+
10
+ def get_version_data():
11
+ """
12
+ Read and process data from CSV files of all available multimodal versions hosted on GitHub. - https://github.com/clembench/clembench-runs
13
+
14
+ Returns:
15
+ version_data:
16
+ -
17
+ """
18
+ base_repo = REPO
19
+ json_url = base_repo + BENCHMARK_FILE
20
+ response = requests.get(json_url)
21
+
22
+ # Check if the JSON file request was successful
23
+ if response.status_code != 200:
24
+ print(f"Failed to read JSON file: Status Code: {response.status_code}")
25
+ return None, None, None, None
26
+
27
+ json_data = response.json()
28
+ versions = json_data['versions']
29
+
30
+ version_names = sorted(
31
+ [ver['version'] for ver in versions],
32
+ key=lambda v: list(map(int, v[1:].split('_')[0].split('.'))),
33
+ reverse=True
34
+ )
35
+
36
+ version_data = {
37
+ 'versions': [],
38
+ 'dataframes': []
39
+ }
40
+
41
+ for version in version_names:
42
+ if 'multimodal' in version: # Only include multimodal versions
43
+ base_url = f"{base_repo}{version}/results.csv"
44
+ response = requests.get(base_url)
45
+ if response.status_code == 200:
46
+ df = pd.read_csv(StringIO(response.text))
47
+ df = process_df(df)
48
+ df = df.sort_values(by=df.columns[1], ascending=False) # Sort by clemscore column
49
+ version_data['dataframes'].append(df)
50
+ metadata = {
51
+ 'name': version,
52
+ 'last_updated': [datetime.strptime(v['last_updated'], '%Y-%m-%d').strftime("%d %b %Y") for v in versions if v['version'] == version],
53
+ 'release_date': [datetime.strptime(v['release_date'], '%Y-%m-%d').strftime("%d %b %Y") for v in versions if v['version'] == version]
54
+ }
55
+ version_data['versions'].append(metadata)
56
+
57
+
58
+ return version_data
59
+
60
+
61
+ if __name__ == "__main__":
62
+ version_data = get_version_data()
63
+ print(version_data['versions'])