kennymckormick commited on
Commit
a6e43e6
1 Parent(s): b11357b
Files changed (6) hide show
  1. .pre-commit-config.yaml +33 -0
  2. README.md +1 -1
  3. app.py +37 -32
  4. gen_table.py +146 -0
  5. lb_info.py → meta_data.py +1 -136
  6. requirements.txt +1 -1
.pre-commit-config.yaml ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ exclude: |
2
+ (?x)^(
3
+ meta_data.py
4
+ )
5
+ repos:
6
+ - repo: https://github.com/PyCQA/flake8
7
+ rev: 5.0.4
8
+ hooks:
9
+ - id: flake8
10
+ args: ["--max-line-length=120", "--ignore=F401,F403,F405,E402"]
11
+ exclude: ^configs/
12
+ - repo: https://github.com/PyCQA/isort
13
+ rev: 5.11.5
14
+ hooks:
15
+ - id: isort
16
+ - repo: https://github.com/pre-commit/mirrors-yapf
17
+ rev: v0.30.0
18
+ hooks:
19
+ - id: yapf
20
+ args: ["--style={column_limit=120}"]
21
+ - repo: https://github.com/pre-commit/pre-commit-hooks
22
+ rev: v3.1.0
23
+ hooks:
24
+ - id: trailing-whitespace
25
+ - id: check-yaml
26
+ - id: end-of-file-fixer
27
+ - id: requirements-txt-fixer
28
+ - id: double-quote-string-fixer
29
+ - id: check-merge-conflict
30
+ - id: fix-encoding-pragma
31
+ args: ["--remove"]
32
+ - id: mixed-line-ending
33
+ args: ["--fix=lf"]
README.md CHANGED
@@ -12,4 +12,4 @@ tags:
12
  - leaderboard
13
  ---
14
 
15
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
12
  - leaderboard
13
  ---
14
 
15
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -1,6 +1,9 @@
1
  import abc
 
2
  import gradio as gr
3
- from lb_info import *
 
 
4
 
5
  with gr.Blocks() as demo:
6
  struct = load_results()
@@ -24,30 +27,30 @@ with gr.Blocks() as demo:
24
  checkbox_group = gr.CheckboxGroup(
25
  choices=check_box['all'],
26
  value=check_box['required'],
27
- label="Evaluation Dimension",
28
  interactive=True,
29
  )
30
  headers = check_box['essential'] + checkbox_group.value
31
  with gr.Row():
32
  model_size = gr.CheckboxGroup(
33
- choices=MODEL_SIZE,
34
- value=MODEL_SIZE,
35
  label='Model Size',
36
  interactive=True
37
  )
38
  model_type = gr.CheckboxGroup(
39
- choices=MODEL_TYPE,
40
- value=MODEL_TYPE,
41
  label='Model Type',
42
  interactive=True
43
  )
44
  data_component = gr.components.DataFrame(
45
- value=table[headers],
46
- type="pandas",
47
  datatype=[type_map[x] for x in headers],
48
- interactive=False,
49
  visible=True)
50
-
51
  def filter_df(fields, model_size, model_type):
52
  headers = check_box['essential'] + fields
53
  df = cp.deepcopy(table)
@@ -58,12 +61,12 @@ with gr.Blocks() as demo:
58
  df['flag'] = [model_type_flag(df.iloc[i], model_type) for i in range(len(df))]
59
  df = df[df['flag']]
60
  df.pop('flag')
61
-
62
  comp = gr.components.DataFrame(
63
- value=df[headers],
64
- type="pandas",
65
  datatype=[type_map[x] for x in headers],
66
- interactive=False,
67
  visible=True)
68
  return comp
69
 
@@ -84,31 +87,31 @@ with gr.Blocks() as demo:
84
  s.checkbox_group = gr.CheckboxGroup(
85
  choices=s.check_box['all'],
86
  value=s.check_box['required'],
87
- label=f"{dataset} CheckBoxes",
88
  interactive=True,
89
  )
90
  s.headers = s.check_box['essential'] + s.checkbox_group.value
91
  with gr.Row():
92
  s.model_size = gr.CheckboxGroup(
93
- choices=MODEL_SIZE,
94
- value=MODEL_SIZE,
95
  label='Model Size',
96
  interactive=True
97
  )
98
  s.model_type = gr.CheckboxGroup(
99
- choices=MODEL_TYPE,
100
- value=MODEL_TYPE,
101
  label='Model Type',
102
  interactive=True
103
  )
104
  s.data_component = gr.components.DataFrame(
105
- value=s.table[s.headers],
106
- type="pandas",
107
  datatype=[s.type_map[x] for x in s.headers],
108
- interactive=False,
109
  visible=True)
110
  s.dataset = gr.Textbox(value=dataset, label=dataset, visible=False)
111
-
112
  def filter_df_l2(dataset_name, fields, model_size, model_type):
113
  s = structs[DATASETS.index(dataset_name)]
114
  headers = s.check_box['essential'] + fields
@@ -120,25 +123,27 @@ with gr.Blocks() as demo:
120
  df['flag'] = [model_type_flag(df.iloc[i], model_type) for i in range(len(df))]
121
  df = df[df['flag']]
122
  df.pop('flag')
123
-
124
  comp = gr.components.DataFrame(
125
- value=df[headers],
126
- type="pandas",
127
  datatype=[s.type_map[x] for x in headers],
128
- interactive=False,
129
  visible=True)
130
  return comp
131
 
132
  for cbox in [s.checkbox_group, s.model_size, s.model_type]:
133
- cbox.change(fn=filter_df_l2, inputs=[s.dataset, s.checkbox_group, s.model_size, s.model_type], outputs=s.data_component)
134
-
 
 
135
 
136
  with gr.Row():
137
- with gr.Accordion("Citation", open=False):
138
  citation_button = gr.Textbox(
139
- value=CITATION_BUTTON_TEXT,
140
  label=CITATION_BUTTON_LABEL,
141
  elem_id='citation-button')
142
 
143
  if __name__ == '__main__':
144
- demo.launch(server_name='0.0.0.0')
 
1
  import abc
2
+
3
  import gradio as gr
4
+
5
+ from gen_table import *
6
+ from meta_data import *
7
 
8
  with gr.Blocks() as demo:
9
  struct = load_results()
 
27
  checkbox_group = gr.CheckboxGroup(
28
  choices=check_box['all'],
29
  value=check_box['required'],
30
+ label='Evaluation Dimension',
31
  interactive=True,
32
  )
33
  headers = check_box['essential'] + checkbox_group.value
34
  with gr.Row():
35
  model_size = gr.CheckboxGroup(
36
+ choices=MODEL_SIZE,
37
+ value=MODEL_SIZE,
38
  label='Model Size',
39
  interactive=True
40
  )
41
  model_type = gr.CheckboxGroup(
42
+ choices=MODEL_TYPE,
43
+ value=MODEL_TYPE,
44
  label='Model Type',
45
  interactive=True
46
  )
47
  data_component = gr.components.DataFrame(
48
+ value=table[headers],
49
+ type='pandas',
50
  datatype=[type_map[x] for x in headers],
51
+ interactive=False,
52
  visible=True)
53
+
54
  def filter_df(fields, model_size, model_type):
55
  headers = check_box['essential'] + fields
56
  df = cp.deepcopy(table)
 
61
  df['flag'] = [model_type_flag(df.iloc[i], model_type) for i in range(len(df))]
62
  df = df[df['flag']]
63
  df.pop('flag')
64
+
65
  comp = gr.components.DataFrame(
66
+ value=df[headers],
67
+ type='pandas',
68
  datatype=[type_map[x] for x in headers],
69
+ interactive=False,
70
  visible=True)
71
  return comp
72
 
 
87
  s.checkbox_group = gr.CheckboxGroup(
88
  choices=s.check_box['all'],
89
  value=s.check_box['required'],
90
+ label=f'{dataset} CheckBoxes',
91
  interactive=True,
92
  )
93
  s.headers = s.check_box['essential'] + s.checkbox_group.value
94
  with gr.Row():
95
  s.model_size = gr.CheckboxGroup(
96
+ choices=MODEL_SIZE,
97
+ value=MODEL_SIZE,
98
  label='Model Size',
99
  interactive=True
100
  )
101
  s.model_type = gr.CheckboxGroup(
102
+ choices=MODEL_TYPE,
103
+ value=MODEL_TYPE,
104
  label='Model Type',
105
  interactive=True
106
  )
107
  s.data_component = gr.components.DataFrame(
108
+ value=s.table[s.headers],
109
+ type='pandas',
110
  datatype=[s.type_map[x] for x in s.headers],
111
+ interactive=False,
112
  visible=True)
113
  s.dataset = gr.Textbox(value=dataset, label=dataset, visible=False)
114
+
115
  def filter_df_l2(dataset_name, fields, model_size, model_type):
116
  s = structs[DATASETS.index(dataset_name)]
117
  headers = s.check_box['essential'] + fields
 
123
  df['flag'] = [model_type_flag(df.iloc[i], model_type) for i in range(len(df))]
124
  df = df[df['flag']]
125
  df.pop('flag')
126
+
127
  comp = gr.components.DataFrame(
128
+ value=df[headers],
129
+ type='pandas',
130
  datatype=[s.type_map[x] for x in headers],
131
+ interactive=False,
132
  visible=True)
133
  return comp
134
 
135
  for cbox in [s.checkbox_group, s.model_size, s.model_type]:
136
+ cbox.change(
137
+ fn=filter_df_l2,
138
+ inputs=[s.dataset, s.checkbox_group, s.model_size, s.model_type],
139
+ outputs=s.data_component)
140
 
141
  with gr.Row():
142
+ with gr.Accordion('Citation', open=False):
143
  citation_button = gr.Textbox(
144
+ value=CITATION_BUTTON_TEXT,
145
  label=CITATION_BUTTON_LABEL,
146
  elem_id='citation-button')
147
 
148
  if __name__ == '__main__':
149
+ demo.launch(server_name='0.0.0.0')
gen_table.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy as cp
2
+ import json
3
+ from collections import defaultdict
4
+ from urllib.request import urlopen
5
+
6
+ import gradio as gr
7
+ import numpy as np
8
+ import pandas as pd
9
+
10
+ from meta_data import META_FIELDS, URL
11
+
12
+
13
+ def listinstr(lst, s):
14
+ assert isinstance(lst, list)
15
+ for item in lst:
16
+ if item in s:
17
+ return True
18
+ return False
19
+
20
+
21
+ def load_results():
22
+ data = json.loads(urlopen(URL).read())
23
+ return data
24
+
25
+
26
+ def nth_large(val, vals):
27
+ return sum([1 for v in vals if v > val]) + 1
28
+
29
+
30
+ def format_timestamp(timestamp):
31
+ date = timestamp[:2] + '.' + timestamp[2:4] + '.' + timestamp[4:6]
32
+ time = timestamp[6:8] + ':' + timestamp[8:10] + ':' + timestamp[10:12]
33
+ return date + ' ' + time
34
+
35
+
36
+ def model_size_flag(sz, FIELDS):
37
+ if pd.isna(sz) and 'Unknown' in FIELDS:
38
+ return True
39
+ if pd.isna(sz):
40
+ return False
41
+ if '<10B' in FIELDS and sz < 10:
42
+ return True
43
+ if '10B-20B' in FIELDS and sz >= 10 and sz < 20:
44
+ return True
45
+ if '20B-40B' in FIELDS and sz >= 20 and sz < 40:
46
+ return True
47
+ if '>40B' in FIELDS and sz >= 40:
48
+ return True
49
+ return False
50
+
51
+
52
+ def model_type_flag(line, FIELDS):
53
+ if 'OpenSource' in FIELDS and line['OpenSource'] == 'Yes':
54
+ return True
55
+ if 'API' in FIELDS and line['OpenSource'] == 'No' and line['Verified'] == 'Yes':
56
+ return True
57
+ if 'Proprietary' in FIELDS and line['OpenSource'] == 'No' and line['Verified'] == 'No':
58
+ return True
59
+ return False
60
+
61
+
62
+ def BUILD_L1_DF(results, fields):
63
+ res = defaultdict(list)
64
+ for i, m in enumerate(results):
65
+ item = results[m]
66
+ meta = item['META']
67
+ for k in META_FIELDS:
68
+ if k == 'Parameters (B)':
69
+ param = meta['Parameters']
70
+ res[k].append(float(param.replace('B', '')) if param != '' else None)
71
+ elif k == 'Method':
72
+ name, url = meta['Method']
73
+ res[k].append(f'<a href="{url}">{name}</a>')
74
+ else:
75
+ res[k].append(meta[k])
76
+ scores, ranks = [], []
77
+ for d in fields:
78
+ res[d].append(item[d]['Overall'])
79
+ if d == 'MME':
80
+ scores.append(item[d]['Overall'] / 28)
81
+ else:
82
+ scores.append(item[d]['Overall'])
83
+ ranks.append(nth_large(item[d]['Overall'], [x[d]['Overall'] for x in results.values()]))
84
+ res['Avg Score'].append(round(np.mean(scores), 1))
85
+ res['Avg Rank'].append(round(np.mean(ranks), 2))
86
+
87
+ df = pd.DataFrame(res)
88
+ df = df.sort_values('Avg Rank')
89
+
90
+ check_box = {}
91
+ check_box['essential'] = ['Method', 'Parameters (B)', 'Language Model', 'Vision Model']
92
+ check_box['required'] = ['Avg Score', 'Avg Rank']
93
+ check_box['all'] = check_box['required'] + ['OpenSource', 'Verified'] + fields
94
+ type_map = defaultdict(lambda: 'number')
95
+ type_map['Method'] = 'html'
96
+ type_map['Language Model'] = type_map['Vision Model'] = type_map['OpenSource'] = type_map['Verified'] = 'str'
97
+ check_box['type_map'] = type_map
98
+ return df, check_box
99
+
100
+
101
+ def BUILD_L2_DF(results, dataset):
102
+ res = defaultdict(list)
103
+ fields = list(list(results.values())[0][dataset].keys())
104
+ non_overall_fields = [x for x in fields if 'Overall' not in x]
105
+ overall_fields = [x for x in fields if 'Overall' in x]
106
+ if dataset == 'MME':
107
+ non_overall_fields = [x for x in non_overall_fields if not listinstr(['Perception', 'Cognition'], x)]
108
+ overall_fields = overall_fields + ['Perception', 'Cognition']
109
+
110
+ for m in results:
111
+ item = results[m]
112
+ meta = item['META']
113
+ for k in META_FIELDS:
114
+ if k == 'Parameters (B)':
115
+ param = meta['Parameters']
116
+ res[k].append(float(param.replace('B', '')) if param != '' else None)
117
+ elif k == 'Method':
118
+ name, url = meta['Method']
119
+ res[k].append(f'<a href="{url}">{name}</a>')
120
+ else:
121
+ res[k].append(meta[k])
122
+ fields = [x for x in fields]
123
+
124
+ for d in non_overall_fields:
125
+ res[d].append(item[dataset][d])
126
+ for d in overall_fields:
127
+ res[d].append(item[dataset][d])
128
+
129
+ df = pd.DataFrame(res)
130
+ all_fields = overall_fields + non_overall_fields
131
+ # Use the first 5 non-overall fields as required fields
132
+ required_fields = overall_fields if len(overall_fields) else non_overall_fields[:5]
133
+
134
+ if 'Overall' in overall_fields:
135
+ df = df.sort_values('Overall')
136
+ df = df.iloc[::-1]
137
+
138
+ check_box = {}
139
+ check_box['essential'] = ['Method', 'Parameters (B)', 'Language Model', 'Vision Model']
140
+ check_box['required'] = required_fields
141
+ check_box['all'] = all_fields
142
+ type_map = defaultdict(lambda: 'number')
143
+ type_map['Method'] = 'html'
144
+ type_map['Language Model'] = type_map['Vision Model'] = type_map['OpenSource'] = type_map['Verified'] = 'str'
145
+ check_box['type_map'] = type_map
146
+ return df, check_box
lb_info.py → meta_data.py RENAMED
@@ -1,17 +1,3 @@
1
- import json
2
- import pandas as pd
3
- from collections import defaultdict
4
- import gradio as gr
5
- import copy as cp
6
- import numpy as np
7
-
8
- def listinstr(lst, s):
9
- assert isinstance(lst, list)
10
- for item in lst:
11
- if item in s:
12
- return True
13
- return False
14
-
15
  # CONSTANTS-URL
16
  URL = "http://opencompass.openxlab.space/utils/OpenVLM.json"
17
  VLMEVALKIT_README = 'https://raw.githubusercontent.com/open-compass/VLMEvalKit/main/README.md'
@@ -138,125 +124,4 @@ LEADERBOARD_MD['ScienceQA_VAL'] = """
138
  - During evaluation, we use `GPT-3.5-Turbo-0613` as the choice extractor for all VLMs if the choice can not be extracted via heuristic matching. **Zero-shot** inference is adopted.
139
  """
140
 
141
- LEADERBOARD_MD['ScienceQA_TEST'] = LEADERBOARD_MD['ScienceQA_VAL']
142
-
143
- from urllib.request import urlopen
144
-
145
- def load_results():
146
- data = json.loads(urlopen(URL).read())
147
- return data
148
-
149
- def nth_large(val, vals):
150
- return sum([1 for v in vals if v > val]) + 1
151
-
152
- def format_timestamp(timestamp):
153
- return timestamp[:2] + '.' + timestamp[2:4] + '.' + timestamp[4:6] + ' ' + timestamp[6:8] + ':' + timestamp[8:10] + ':' + timestamp[10:12]
154
-
155
- def model_size_flag(sz, FIELDS):
156
- if pd.isna(sz) and 'Unknown' in FIELDS:
157
- return True
158
- if pd.isna(sz):
159
- return False
160
- if '<10B' in FIELDS and sz < 10:
161
- return True
162
- if '10B-20B' in FIELDS and sz >= 10 and sz < 20:
163
- return True
164
- if '20B-40B' in FIELDS and sz >= 20 and sz < 40:
165
- return True
166
- if '>40B' in FIELDS and sz >= 40:
167
- return True
168
- return False
169
-
170
- def model_type_flag(line, FIELDS):
171
- if 'OpenSource' in FIELDS and line['OpenSource'] == 'Yes':
172
- return True
173
- if 'API' in FIELDS and line['OpenSource'] == 'No' and line['Verified'] == 'Yes':
174
- return True
175
- if 'Proprietary' in FIELDS and line['OpenSource'] == 'No' and line['Verified'] == 'No':
176
- return True
177
- return False
178
-
179
- def BUILD_L1_DF(results, fields):
180
- res = defaultdict(list)
181
- for i, m in enumerate(results):
182
- item = results[m]
183
- meta = item['META']
184
- for k in META_FIELDS:
185
- if k == 'Parameters (B)':
186
- param = meta['Parameters']
187
- res[k].append(float(param.replace('B', '')) if param != '' else None)
188
- elif k == 'Method':
189
- name, url = meta['Method']
190
- res[k].append(f'<a href="{url}">{name}</a>')
191
- else:
192
- res[k].append(meta[k])
193
- scores, ranks = [], []
194
- for d in fields:
195
- res[d].append(item[d]['Overall'])
196
- if d == 'MME':
197
- scores.append(item[d]['Overall'] / 28)
198
- else:
199
- scores.append(item[d]['Overall'])
200
- ranks.append(nth_large(item[d]['Overall'], [x[d]['Overall'] for x in results.values()]))
201
- res['Avg Score'].append(round(np.mean(scores), 1))
202
- res['Avg Rank'].append(round(np.mean(ranks), 2))
203
-
204
- df = pd.DataFrame(res)
205
- df = df.sort_values('Avg Rank')
206
-
207
- check_box = {}
208
- check_box['essential'] = ['Method', 'Parameters (B)', 'Language Model', 'Vision Model']
209
- check_box['required'] = ['Avg Score', 'Avg Rank']
210
- check_box['all'] = check_box['required'] + ['OpenSource', 'Verified'] + fields
211
- type_map = defaultdict(lambda: 'number')
212
- type_map['Method'] = 'html'
213
- type_map['Language Model'] = type_map['Vision Model'] = type_map['OpenSource'] = type_map['Verified'] = 'str'
214
- check_box['type_map'] = type_map
215
- return df, check_box
216
-
217
- def BUILD_L2_DF(results, dataset):
218
- res = defaultdict(list)
219
- fields = list(list(results.values())[0][dataset].keys())
220
- non_overall_fields = [x for x in fields if 'Overall' not in x]
221
- overall_fields = [x for x in fields if 'Overall' in x]
222
- if dataset == 'MME':
223
- non_overall_fields = [x for x in non_overall_fields if not listinstr(['Perception', 'Cognition'], x)]
224
- overall_fields = overall_fields + ['Perception', 'Cognition']
225
-
226
- for m in results:
227
- item = results[m]
228
- meta = item['META']
229
- for k in META_FIELDS:
230
- if k == 'Parameters (B)':
231
- param = meta['Parameters']
232
- res[k].append(float(param.replace('B', '')) if param != '' else None)
233
- elif k == 'Method':
234
- name, url = meta['Method']
235
- res[k].append(f'<a href="{url}">{name}</a>')
236
- else:
237
- res[k].append(meta[k])
238
- fields = [x for x in fields]
239
-
240
- for d in non_overall_fields:
241
- res[d].append(item[dataset][d])
242
- for d in overall_fields:
243
- res[d].append(item[dataset][d])
244
-
245
- df = pd.DataFrame(res)
246
- all_fields = overall_fields + non_overall_fields
247
- # Use the first 5 non-overall fields as required fields
248
- required_fields = overall_fields if len(overall_fields) else non_overall_fields[:5]
249
-
250
- if 'Overall' in overall_fields:
251
- df = df.sort_values('Overall')
252
- df = df.iloc[::-1]
253
-
254
- check_box = {}
255
- check_box['essential'] = ['Method', 'Parameters (B)', 'Language Model', 'Vision Model']
256
- check_box['required'] = required_fields
257
- check_box['all'] = all_fields
258
- type_map = defaultdict(lambda: 'number')
259
- type_map['Method'] = 'html'
260
- type_map['Language Model'] = type_map['Vision Model'] = type_map['OpenSource'] = type_map['Verified'] = 'str'
261
- check_box['type_map'] = type_map
262
- return df, check_box
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  # CONSTANTS-URL
2
  URL = "http://opencompass.openxlab.space/utils/OpenVLM.json"
3
  VLMEVALKIT_README = 'https://raw.githubusercontent.com/open-compass/VLMEvalKit/main/README.md'
 
124
  - During evaluation, we use `GPT-3.5-Turbo-0613` as the choice extractor for all VLMs if the choice can not be extracted via heuristic matching. **Zero-shot** inference is adopted.
125
  """
126
 
127
+ LEADERBOARD_MD['ScienceQA_TEST'] = LEADERBOARD_MD['ScienceQA_VAL']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1,3 +1,3 @@
 
1
  numpy>=1.23.4
2
  pandas>=1.5.3
3
- gradio==4.15.0
 
1
+ gradio==4.15.0
2
  numpy>=1.23.4
3
  pandas>=1.5.3