FinancialSupport commited on
Commit
23c9328
β€’
1 Parent(s): 96ed4d6
Files changed (1) hide show
  1. app.py +424 -77
app.py CHANGED
@@ -1,96 +1,443 @@
 
1
  import gradio as gr
2
  import pandas as pd
 
 
3
 
4
- csv_filename = 'leaderboard.csv'
5
- # url = 'https://docs.google.com/spreadsheets/d/1Oh3nrbdWjKuh9twJsc9yJLppiJeD_BZyKgCTOxRkALM/export?format=csv'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
- def get_data_classifica():
8
- dataset = pd.read_csv("leaderboard_general.csv", sep=',')
9
- if 'model ' in dataset.columns:
10
- dataset.rename(columns={'model ': 'model'}, inplace=True)
11
- df_classifica = dataset[['model', 'helloswag_it acc norm', 'arc_it acc norm', 'm_mmlu_it acc shot 5']]
12
- df_classifica['media'] = df_classifica[['helloswag_it acc norm', 'arc_it acc norm', 'm_mmlu_it acc shot 5']].mean(axis=1)
13
- df_classifica['media'] = df_classifica['media'].round(3)
14
- df_classifica = df_classifica.sort_values(by='media', ascending=False)
15
- df_classifica = df_classifica[['model', 'media', 'helloswag_it acc norm', 'arc_it acc norm', 'm_mmlu_it acc shot 5']]
16
 
17
- return df_classifica
 
18
 
19
- def get_data_totale():
20
- dataset = pd.read_csv("leaderboard_general.csv", sep=',')
21
- if 'model ' in dataset.columns:
22
- dataset.rename(columns={'model ': 'model'}, inplace=True)
23
- return dataset
 
 
 
 
 
 
 
 
 
24
 
25
- with gr.Blocks() as demo:
26
 
27
- with gr.Tab('Classifica Generale'):
 
28
 
29
- gr.Markdown('''# Classifica generale degli LLM italiani''')
30
- discord_link = 'https://discord.gg/m7sS3mduY2'
31
- gr.Markdown('''
32
- I modelli sottostanti sono stati testati con [lm_evaluation_harness](https://github.com/EleutherAI/lm-evaluation-harness) su task specifici per l'italiano introdotti con questa [PR](https://github.com/EleutherAI/lm-evaluation-harness/pull/1358).
33
- L'intero progetto, i modelli e i dataset sono rigorosamente open source e tutti i risultati sono riproducibili lanciando i seguenti comandi:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
- ```
36
- lm_eval --model hf --model_args pretrained=HUGGINGFACE_MODEL_ID --tasks hellaswag_it,arc_it --device cuda:0 --batch_size auto:2
37
- ```
38
 
39
- ```
40
- lm_eval --model hf --model_args pretrained=HUGGINGFACE_MODEL_ID --tasks m_mmlu_it --num_fewshot 5 --device cuda:0 --batch_size auto:2
41
- ```
42
- ''')
43
- gr.DataFrame(get_data_classifica, every=3600)
44
- gr.Markdown(f"Contributore principale: @giux78")
45
- gr.Markdown('''
46
- ### Risultati su modelli "internazionali" (instruct)
47
-
48
- | Model | Arc-c | HellaS | MMUL | AVG |
49
- | --- | --- | --- | --- | --- |
50
- | Mixtral 8x22b | 55.3 | 77.1 | 75.8 | 69.4 |
51
- | LLama3 70b | 52.9 | 70.3 | 74.8 | 66.0 |
52
- | command-r-plus | 49.5 | 74.9 | 67.6 | 64.0 |
53
- | Mixtral 8x7b | 51.1 | 72.9 | 65.9 | 63.3 |
54
- | LLama2 70b | 49.4 | 70.9 | 65.1 | 61.8 |
55
- | command-r-v01 | 50.8 | 72.3 | 60.0 | 61.0 |
56
- | Phi-3-mini | 43.46 | 61.44 | 56.55 | 53.8 |
57
- | LLama3 8b | 44.3 | 59.9 | 55.7 | 53.3 |
58
- | LLama1 34b | 42.9 | 65.4 | 49.0 | 52.4 |
59
- | Mistral 7b | 41.49 | 61.22 | 52.53 | 51.7 |
60
- | Gemma 1.1 7b | 41.75 | 54.07 | 49.45 | 48.4 |
61
-
62
- ''')
63
-
64
-
65
- with gr.Tab('Classifica RAG'):
66
-
67
- gr.Markdown('''# Classifica RAG degli LLM italiani''')
68
- gr.Markdown(f'''In questa sezione i modelli sono valutati su dei task di Q&A e ordinati per F1 Score e EM (Exact Match). La repo di riferimento Γ¨ [questa](https://github.com/C080/open-llm-ita-leaderboard).
69
- I modelli in cima alla classifica sono ritenuti preferibili per i task di Retrieval Augmented Generation.''')
70
- gr.Dataframe(pd.read_csv(csv_filename, sep=';'))
71
- gr.Markdown(f"Si ringrazia il @galatolo per il codice dell'eval.")
72
 
73
 
74
- with gr.Tab('Eval aggiuntive'):
75
 
76
- gr.Markdown('''# Altre evaluation''')
77
- gr.Markdown('''Qui ci sono altri test di altri modelli, che non sono ancora stati integrati nella classifica generale.''')
78
- gr.DataFrame(get_data_totale, every=3600)
79
 
80
- with gr.Tab('Informazioni'):
81
 
82
- form_link = "https://forms.gle/Gc9Dfu52xSBhQPpAA"
83
- gr.Markdown('''# Community discord
84
- Se vuoi contribuire al progetto o semplicemente unirti alla community di LLM italiani unisciti al nostro [discord!](https://discord.gg/m7sS3mduY2)
85
- # Aggiungi il tuo modello
86
- Se hai sviluppato un tuo modello che vuoi far valutare, compila il form [qui](https://forms.gle/Gc9Dfu52xSBhQPpAA) Γ¨ tutto gratuito!
87
- ''')
88
 
89
- with gr.Tab('Sponsor'):
90
 
91
- gr.Markdown('''
92
- # Sponsor
93
- Le evaluation della classifica generale sono state gentilmente offerte da un provider cloud italiano [seeweb.it](https://www.seeweb.it/) specializzato in servizi di GPU cloud e AI.
94
- ''')
95
 
96
- demo.launch()
 
1
+ import subprocess
2
  import gradio as gr
3
  import pandas as pd
4
+ from apscheduler.schedulers.background import BackgroundScheduler
5
+ from huggingface_hub import snapshot_download
6
 
7
+ from src.about import (
8
+ CITATION_BUTTON_LABEL,
9
+ CITATION_BUTTON_TEXT,
10
+ EVALUATION_QUEUE_TEXT,
11
+ INTRODUCTION_TEXT,
12
+ LLM_BENCHMARKS_TEXT,
13
+ TITLE,
14
+ )
15
+ from src.display.css_html_js import custom_css
16
+ from src.display.utils import (
17
+ BENCHMARK_COLS,
18
+ COLS,
19
+ EVAL_COLS,
20
+ EVAL_TYPES,
21
+ NUMERIC_INTERVALS,
22
+ TYPES,
23
+ AutoEvalColumn,
24
+ ModelType,
25
+ fields,
26
+ WeightType,
27
+ Precision
28
+ )
29
+ from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
30
+ from src.populate import get_evaluation_queue_df, get_leaderboard_df
31
+ from src.submission.submit import add_new_eval
32
 
 
 
 
 
 
 
 
 
 
33
 
34
+ def restart_space():
35
+ API.restart_space(repo_id=REPO_ID)
36
 
37
+ try:
38
+ print(EVAL_REQUESTS_PATH)
39
+ snapshot_download(
40
+ repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
41
+ )
42
+ except Exception:
43
+ restart_space()
44
+ try:
45
+ print(EVAL_RESULTS_PATH)
46
+ snapshot_download(
47
+ repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
48
+ )
49
+ except Exception:
50
+ restart_space()
51
 
 
52
 
53
+ raw_data, original_df = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
54
+ leaderboard_df = original_df.copy()
55
 
56
+ (
57
+ finished_eval_queue_df,
58
+ running_eval_queue_df,
59
+ pending_eval_queue_df,
60
+ ) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
61
+
62
+
63
+ # Searching and filtering
64
+ def update_table(
65
+ hidden_df: pd.DataFrame,
66
+ columns: list,
67
+ type_query: list,
68
+ precision_query: str,
69
+ size_query: list,
70
+ show_deleted: bool,
71
+ query: str,
72
+ ):
73
+ filtered_df = filter_models(hidden_df, type_query, size_query, precision_query, show_deleted)
74
+ filtered_df = filter_queries(query, filtered_df)
75
+ df = select_columns(filtered_df, columns)
76
+ return df
77
+
78
+
79
+ def search_table(df: pd.DataFrame, query: str) -> pd.DataFrame:
80
+ return df[(df[AutoEvalColumn.model.name].str.contains(query, case=False))]
81
+
82
+
83
+ def select_columns(df: pd.DataFrame, columns: list) -> pd.DataFrame:
84
+ always_here_cols = [
85
+ AutoEvalColumn.model_type_symbol.name,
86
+ AutoEvalColumn.model.name,
87
+ ]
88
+ # We use COLS to maintain sorting
89
+ filtered_df = df[
90
+ always_here_cols + [c for c in COLS if c in df.columns and c in columns]
91
+ ]
92
+ return filtered_df
93
+
94
+
95
+ def filter_queries(query: str, filtered_df: pd.DataFrame) -> pd.DataFrame:
96
+ final_df = []
97
+ if query != "":
98
+ queries = [q.strip() for q in query.split(";")]
99
+ for _q in queries:
100
+ _q = _q.strip()
101
+ if _q != "":
102
+ temp_filtered_df = search_table(filtered_df, _q)
103
+ if len(temp_filtered_df) > 0:
104
+ final_df.append(temp_filtered_df)
105
+ if len(final_df) > 0:
106
+ filtered_df = pd.concat(final_df)
107
+ filtered_df = filtered_df.drop_duplicates(
108
+ subset=[AutoEvalColumn.model.name, AutoEvalColumn.precision.name, AutoEvalColumn.revision.name]
109
+ )
110
+
111
+ return filtered_df
112
+
113
+
114
+ def filter_models(
115
+ df: pd.DataFrame, type_query: list, size_query: list, precision_query: list, show_deleted: bool
116
+ ) -> pd.DataFrame:
117
+ # Show all models
118
+ if show_deleted:
119
+ filtered_df = df
120
+ else: # Show only still on the hub models
121
+ filtered_df = df[df[AutoEvalColumn.still_on_hub.name] == True]
122
+
123
+ type_emoji = [t[0] for t in type_query]
124
+ filtered_df = filtered_df.loc[df[AutoEvalColumn.model_type_symbol.name].isin(type_emoji)]
125
+ filtered_df = filtered_df.loc[df[AutoEvalColumn.precision.name].isin(precision_query + ["None"])]
126
+
127
+ numeric_interval = pd.IntervalIndex(sorted([NUMERIC_INTERVALS[s] for s in size_query]))
128
+ params_column = pd.to_numeric(df[AutoEvalColumn.params.name], errors="coerce")
129
+ mask = params_column.apply(lambda x: any(numeric_interval.contains(x)))
130
+ filtered_df = filtered_df.loc[mask]
131
+
132
+ return filtered_df
133
+
134
+
135
+ demo = gr.Blocks(css=custom_css)
136
+ with demo:
137
+ gr.HTML(TITLE)
138
+ gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
139
+
140
+ with gr.Tabs(elem_classes="tab-buttons") as tabs:
141
+ with gr.TabItem("πŸ… LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
142
+ with gr.Row():
143
+ with gr.Column():
144
+ with gr.Row():
145
+ search_bar = gr.Textbox(
146
+ placeholder=" πŸ” Search for your model (separate multiple queries with `;`) and press ENTER...",
147
+ show_label=False,
148
+ elem_id="search-bar",
149
+ )
150
+ with gr.Row():
151
+ shown_columns = gr.CheckboxGroup(
152
+ choices=[
153
+ c.name
154
+ for c in fields(AutoEvalColumn)
155
+ if not c.hidden and not c.never_hidden
156
+ ],
157
+ value=[
158
+ c.name
159
+ for c in fields(AutoEvalColumn)
160
+ if c.displayed_by_default and not c.hidden and not c.never_hidden
161
+ ],
162
+ label="Select columns to show",
163
+ elem_id="column-select",
164
+ interactive=True,
165
+ )
166
+ with gr.Row():
167
+ deleted_models_visibility = gr.Checkbox(
168
+ value=False, label="Show gated/private/deleted models", interactive=True
169
+ )
170
+ with gr.Column(min_width=320):
171
+ #with gr.Box(elem_id="box-filter"):
172
+ filter_columns_type = gr.CheckboxGroup(
173
+ label="Model types",
174
+ choices=[t.to_str() for t in ModelType],
175
+ value=[t.to_str() for t in ModelType],
176
+ interactive=True,
177
+ elem_id="filter-columns-type",
178
+ )
179
+ filter_columns_precision = gr.CheckboxGroup(
180
+ label="Precision",
181
+ choices=[i.value.name for i in Precision],
182
+ value=[i.value.name for i in Precision],
183
+ interactive=True,
184
+ elem_id="filter-columns-precision",
185
+ )
186
+ filter_columns_size = gr.CheckboxGroup(
187
+ label="Model sizes (in billions of parameters)",
188
+ choices=list(NUMERIC_INTERVALS.keys()),
189
+ value=list(NUMERIC_INTERVALS.keys()),
190
+ interactive=True,
191
+ elem_id="filter-columns-size",
192
+ )
193
+
194
+ leaderboard_table = gr.components.Dataframe(
195
+ value=leaderboard_df[
196
+ [c.name for c in fields(AutoEvalColumn) if c.never_hidden]
197
+ + shown_columns.value
198
+ ],
199
+ headers=[c.name for c in fields(AutoEvalColumn) if c.never_hidden] + shown_columns.value,
200
+ datatype=TYPES,
201
+ elem_id="leaderboard-table",
202
+ interactive=False,
203
+ visible=True,
204
+ )
205
+
206
+ # Dummy leaderboard for handling the case when the user uses backspace key
207
+ hidden_leaderboard_table_for_search = gr.components.Dataframe(
208
+ value=original_df[COLS],
209
+ headers=COLS,
210
+ datatype=TYPES,
211
+ visible=False,
212
+ )
213
+ search_bar.submit(
214
+ update_table,
215
+ [
216
+ hidden_leaderboard_table_for_search,
217
+ shown_columns,
218
+ filter_columns_type,
219
+ filter_columns_precision,
220
+ filter_columns_size,
221
+ deleted_models_visibility,
222
+ search_bar,
223
+ ],
224
+ leaderboard_table,
225
+ )
226
+ for selector in [shown_columns, filter_columns_type, filter_columns_precision, filter_columns_size, deleted_models_visibility]:
227
+ selector.change(
228
+ update_table,
229
+ [
230
+ hidden_leaderboard_table_for_search,
231
+ shown_columns,
232
+ filter_columns_type,
233
+ filter_columns_precision,
234
+ filter_columns_size,
235
+ deleted_models_visibility,
236
+ search_bar,
237
+ ],
238
+ leaderboard_table,
239
+ queue=True,
240
+ )
241
+
242
+ with gr.TabItem("πŸ“ About", elem_id="llm-benchmark-tab-table", id=2):
243
+ gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
244
+
245
+ with gr.TabItem("πŸš€ Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
246
+ with gr.Column():
247
+ with gr.Row():
248
+ gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
249
+
250
+ with gr.Column():
251
+ with gr.Accordion(
252
+ f"βœ… Finished Evaluations ({len(finished_eval_queue_df)})",
253
+ open=False,
254
+ ):
255
+ with gr.Row():
256
+ finished_eval_table = gr.components.Dataframe(
257
+ value=finished_eval_queue_df,
258
+ headers=EVAL_COLS,
259
+ datatype=EVAL_TYPES,
260
+ row_count=5,
261
+ )
262
+ with gr.Accordion(
263
+ f"πŸ”„ Running Evaluation Queue ({len(running_eval_queue_df)})",
264
+ open=False,
265
+ ):
266
+ with gr.Row():
267
+ running_eval_table = gr.components.Dataframe(
268
+ value=running_eval_queue_df,
269
+ headers=EVAL_COLS,
270
+ datatype=EVAL_TYPES,
271
+ row_count=5,
272
+ )
273
+
274
+ with gr.Accordion(
275
+ f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
276
+ open=False,
277
+ ):
278
+ with gr.Row():
279
+ pending_eval_table = gr.components.Dataframe(
280
+ value=pending_eval_queue_df,
281
+ headers=EVAL_COLS,
282
+ datatype=EVAL_TYPES,
283
+ row_count=5,
284
+ )
285
+ with gr.Row():
286
+ gr.Markdown("# βœ‰οΈβœ¨ Submit your model here!", elem_classes="markdown-text")
287
+
288
+ with gr.Row():
289
+ with gr.Column():
290
+ model_name_textbox = gr.Textbox(label="Model name")
291
+ revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
292
+ model_type = gr.Dropdown(
293
+ choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
294
+ label="Model type",
295
+ multiselect=False,
296
+ value=None,
297
+ interactive=True,
298
+ )
299
+
300
+ with gr.Column():
301
+ precision = gr.Dropdown(
302
+ choices=[i.value.name for i in Precision if i != Precision.Unknown],
303
+ label="Precision",
304
+ multiselect=False,
305
+ value="float16",
306
+ interactive=True,
307
+ )
308
+ weight_type = gr.Dropdown(
309
+ choices=[i.value.name for i in WeightType],
310
+ label="Weights type",
311
+ multiselect=False,
312
+ value="Original",
313
+ interactive=True,
314
+ )
315
+ base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
316
+
317
+ submit_button = gr.Button("Submit Eval")
318
+ submission_result = gr.Markdown()
319
+ submit_button.click(
320
+ add_new_eval,
321
+ [
322
+ model_name_textbox,
323
+ base_model_name_textbox,
324
+ revision_name_textbox,
325
+ precision,
326
+ weight_type,
327
+ model_type,
328
+ ],
329
+ submission_result,
330
+ )
331
+
332
+ with gr.Row():
333
+ with gr.Accordion("πŸ“™ Citation", open=False):
334
+ citation_button = gr.Textbox(
335
+ value=CITATION_BUTTON_TEXT,
336
+ label=CITATION_BUTTON_LABEL,
337
+ lines=20,
338
+ elem_id="citation-button",
339
+ show_copy_button=True,
340
+ )
341
+
342
+ scheduler = BackgroundScheduler()
343
+ scheduler.add_job(restart_space, "interval", seconds=1800)
344
+ scheduler.start()
345
+ demo.queue(default_concurrency_limit=40).launch()
346
+
347
+
348
+ # import gradio as gr
349
+ # import pandas as pd
350
+
351
+ # csv_filename = 'leaderboard.csv'
352
+ # # url = 'https://docs.google.com/spreadsheets/d/1Oh3nrbdWjKuh9twJsc9yJLppiJeD_BZyKgCTOxRkALM/export?format=csv'
353
+
354
+ # def get_data_classifica():
355
+ # dataset = pd.read_csv("leaderboard_general.csv", sep=',')
356
+ # if 'model ' in dataset.columns:
357
+ # dataset.rename(columns={'model ': 'model'}, inplace=True)
358
+ # df_classifica = dataset[['model', 'helloswag_it acc norm', 'arc_it acc norm', 'm_mmlu_it acc shot 5']]
359
+ # df_classifica['media'] = df_classifica[['helloswag_it acc norm', 'arc_it acc norm', 'm_mmlu_it acc shot 5']].mean(axis=1)
360
+ # df_classifica['media'] = df_classifica['media'].round(3)
361
+ # df_classifica = df_classifica.sort_values(by='media', ascending=False)
362
+ # df_classifica = df_classifica[['model', 'media', 'helloswag_it acc norm', 'arc_it acc norm', 'm_mmlu_it acc shot 5']]
363
+
364
+ # return df_classifica
365
+
366
+ # def get_data_totale():
367
+ # dataset = pd.read_csv("leaderboard_general.csv", sep=',')
368
+ # if 'model ' in dataset.columns:
369
+ # dataset.rename(columns={'model ': 'model'}, inplace=True)
370
+ # return dataset
371
+
372
+ # with gr.Blocks() as demo:
373
+
374
+ # with gr.Tab('Classifica Generale'):
375
+
376
+ # gr.Markdown('''# Classifica generale degli LLM italiani''')
377
+ # discord_link = 'https://discord.gg/m7sS3mduY2'
378
+ # gr.Markdown('''
379
+ # I modelli sottostanti sono stati testati con [lm_evaluation_harness](https://github.com/EleutherAI/lm-evaluation-harness) su task specifici per l'italiano introdotti con questa [PR](https://github.com/EleutherAI/lm-evaluation-harness/pull/1358).
380
+ # L'intero progetto, i modelli e i dataset sono rigorosamente open source e tutti i risultati sono riproducibili lanciando i seguenti comandi:
381
 
382
+ # ```
383
+ # lm_eval --model hf --model_args pretrained=HUGGINGFACE_MODEL_ID --tasks hellaswag_it,arc_it --device cuda:0 --batch_size auto:2
384
+ # ```
385
 
386
+ # ```
387
+ # lm_eval --model hf --model_args pretrained=HUGGINGFACE_MODEL_ID --tasks m_mmlu_it --num_fewshot 5 --device cuda:0 --batch_size auto:2
388
+ # ```
389
+ # ''')
390
+ # gr.DataFrame(get_data_classifica, every=3600)
391
+ # gr.Markdown(f"Contributore principale: @giux78")
392
+ # gr.Markdown('''
393
+ # ### Risultati su modelli "internazionali" (instruct)
394
+
395
+ # | Model | Arc-c | HellaS | MMUL | AVG |
396
+ # | --- | --- | --- | --- | --- |
397
+ # | Mixtral 8x22b | 55.3 | 77.1 | 75.8 | 69.4 |
398
+ # | LLama3 70b | 52.9 | 70.3 | 74.8 | 66.0 |
399
+ # | command-r-plus | 49.5 | 74.9 | 67.6 | 64.0 |
400
+ # | Mixtral 8x7b | 51.1 | 72.9 | 65.9 | 63.3 |
401
+ # | LLama2 70b | 49.4 | 70.9 | 65.1 | 61.8 |
402
+ # | command-r-v01 | 50.8 | 72.3 | 60.0 | 61.0 |
403
+ # | Phi-3-mini | 43.46 | 61.44 | 56.55 | 53.8 |
404
+ # | LLama3 8b | 44.3 | 59.9 | 55.7 | 53.3 |
405
+ # | LLama1 34b | 42.9 | 65.4 | 49.0 | 52.4 |
406
+ # | Mistral 7b | 41.49 | 61.22 | 52.53 | 51.7 |
407
+ # | Gemma 1.1 7b | 41.75 | 54.07 | 49.45 | 48.4 |
408
+
409
+ # ''')
410
+
411
+
412
+ # with gr.Tab('Classifica RAG'):
413
+
414
+ # gr.Markdown('''# Classifica RAG degli LLM italiani''')
415
+ # gr.Markdown(f'''In questa sezione i modelli sono valutati su dei task di Q&A e ordinati per F1 Score e EM (Exact Match). La repo di riferimento Γ¨ [questa](https://github.com/C080/open-llm-ita-leaderboard).
416
+ # I modelli in cima alla classifica sono ritenuti preferibili per i task di Retrieval Augmented Generation.''')
417
+ # gr.Dataframe(pd.read_csv(csv_filename, sep=';'))
418
+ # gr.Markdown(f"Si ringrazia il @galatolo per il codice dell'eval.")
419
 
420
 
421
+ # with gr.Tab('Eval aggiuntive'):
422
 
423
+ # gr.Markdown('''# Altre evaluation''')
424
+ # gr.Markdown('''Qui ci sono altri test di altri modelli, che non sono ancora stati integrati nella classifica generale.''')
425
+ # gr.DataFrame(get_data_totale, every=3600)
426
 
427
+ # with gr.Tab('Informazioni'):
428
 
429
+ # form_link = "https://forms.gle/Gc9Dfu52xSBhQPpAA"
430
+ # gr.Markdown('''# Community discord
431
+ # Se vuoi contribuire al progetto o semplicemente unirti alla community di LLM italiani unisciti al nostro [discord!](https://discord.gg/m7sS3mduY2)
432
+ # # Aggiungi il tuo modello
433
+ # Se hai sviluppato un tuo modello che vuoi far valutare, compila il form [qui](https://forms.gle/Gc9Dfu52xSBhQPpAA) Γ¨ tutto gratuito!
434
+ # ''')
435
 
436
+ # with gr.Tab('Sponsor'):
437
 
438
+ # gr.Markdown('''
439
+ # # Sponsor
440
+ # Le evaluation della classifica generale sono state gentilmente offerte da un provider cloud italiano [seeweb.it](https://www.seeweb.it/) specializzato in servizi di GPU cloud e AI.
441
+ # ''')
442
 
443
+ # demo.launch()