File size: 6,387 Bytes
13f2d15 3e026a0 13f2d15 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 |
# coding=utf-8
# Copyright 2024 The Mexa Authors.
# Lint as: python3
# This space is built based on uonlp/open_multilingual_llm_leaderboard.
# Mexa Space
import os
import json
import glob
from collections import defaultdict
import pandas as pd
import gradio as gr
import glob
# cache_data
def language_names(json_path):
with open(json_path, 'r') as json_file:
data = json.load(json_file)
return data
label2name = language_names("assets/language_names.json")
def get_name(label):
"""Get the name of language from label"""
iso_3 = label.split('_')[0]
name = label2name[iso_3]
return name
FLORES_ARC = {'name': 'FLORES - ARC style', 'csv': 'evals/flores-mean-arc.csv'}
Bible_ARC = {'name': 'Bible - ARC style', 'csv': 'evals/bible-mean-arc.csv'}
FLORES_Belebele = {'name': 'FLORES - Belebele style', 'csv': 'evals/flores-max-belebele.csv'}
Bible_Belebele = {'name': 'Bible - Belebele style', 'csv': 'evals/bible-max-belebele.csv'}
BENCHMARKS = [FLORES_ARC, Bible_ARC, FLORES_Belebele, Bible_Belebele]
def collect_results():
performance_dict = defaultdict(lambda: defaultdict(dict))
for bench in BENCHMARKS:
task = bench['name']
results = pd.read_csv(bench['csv'], index_col = 0).to_dict()
# add performances
for model, d_lang_value in results.items():
if model == 'avg':
continue
for lang, value in d_lang_value.items():
performance_dict[task.split(' ')[0]][(model, lang)][task] = value
return performance_dict
def get_leaderboard_df(results_dict, parallel_data = 'FLORES'):
df = list()
performance_dict = results_dict.get(parallel_data, defaultdict(dict))
for (pretrained, lang), perfs in performance_dict.items():
lang_name = get_name(lang)
perfs_num = [0, 0]
if f'{parallel_data} - ARC style' in perfs:
perfs_num = [perfs[f'{parallel_data} - ARC style'], perfs[f'{parallel_data} - Belebele style']]
avg = round(sum(perfs_num) / len(perfs_num), 4)
notes = ' '.join([pretrained, lang_name])
row = [pretrained, lang_name, lang, avg] + perfs_num + [notes]
df.append(row)
df = pd.DataFrame.from_records(df, columns=COLS)
# Sort
language_aggregate = df.groupby("Language")["Average"].mean().reset_index().sort_values(by='Average', ascending=False).reset_index()
df = df.sort_values(by='Average', ascending=False)
df = df.set_index("Language").loc[language_aggregate["Language"]].reset_index()
df = df[COLS]
return df
MODEL_COL = "Model"
LANG_COL = "Language"
CODE_COL = "Code"
ARC_COL = 'ARC Style'
BELEBELE_COL = 'Belebele Style'
AVERAGE_COL = "Average"
NOTES_COL = "Notes" # For search only
COLS = [MODEL_COL, LANG_COL, CODE_COL, AVERAGE_COL, ARC_COL, BELEBELE_COL, NOTES_COL]
TYPES = ["str", "str", "str", "number", "number", "number", "number", "number", "str"]
performance = collect_results()
def search_table(query, selection):
df = get_leaderboard_df(performance, selection)
filtered_df = df[df[NOTES_COL].str.contains(query, case=False)]
return filtered_df
def update_dataframe(selection):
if selection == "FLORES":
return get_leaderboard_df(performance, selection)
elif selection == "Bible":
return get_leaderboard_df(performance, selection)
CUSTOM_CSS= """
/* Hides the final column */
table td:last-child,
table th:last-child {
display: none;
}
# table td:first-child,
# table th:first-child {
# max-width: 400px;
# overflow: auto;
# white-space: nowrap;
# }
"""
TITLE = '<h1 align="center" id="space-title">Mexa: Multilingual Evaluation of Open English-Centric LLMs via Cross-Lingual Alignment</h1>'
INTRO_TEXT = f"""
## About
We introduce Mexa, a method for assessing the multilingual capabilities of English-centric large language models (LLMs). Mexa builds on the observation that English-centric LLMs semantically use English as a kind of pivot language in their intermediate layers. Mexa computes the alignment between non-English languages and English using parallel sentences, estimating the transfer of language understanding capabilities from English to other languages through this alignment. This metric can be useful in estimating task performance, provided we know the English performance in the task and the alignment score between languages derived from a parallel dataset.
## Code
https://github.com/kargaranamir
## Details
We use parallel datasets from FLORES and the Bible. In the ARC style, we use mean pooling over layers, and the English score achieved by each LLM in the ARC benchmark is used to adjust the multilingual scores. In the Belebele style, we use max pooling over layers, and the English score achieved by each LLM in Belebele is used to adjust the multilingual scores.
"""
CITATION = """
## Citation
```
@article{kargaran2024mexa,
title = {Mexa: Multilingual Evaluation of {E}nglish-Centric {LLMs} via Cross-Lingual Alignment},
author = {Kargaran, Amir Hossein and Modarressi, Ali and Nikeghbal, Nafiseh and Diesner, Jana and Yvon, François and Schütze, Hinrich},
journal = {arXiv preprint},
year = {2024},
url = {https://github.com/cisnlp/Mexa/}
}
```
"""
original_df = get_leaderboard_df(performance, 'FLORES') # Default
demo = gr.Blocks(css=CUSTOM_CSS)
with demo:
gr.HTML(TITLE)
gr.Markdown(INTRO_TEXT, elem_classes="markdown-text")
with gr.Group():
selection = gr.Radio(
["FLORES", "Bible"],
label="Parallel Dataset",
info="Choose parallel dataset!",
value = 'FLORES'
)
search_bar = gr.Textbox(
placeholder="Search models and languages...", show_label=False, elem_id="search-bar"
)
leaderboard_table = gr.components.Dataframe(
value=original_df,
headers=COLS,
datatype=TYPES,
# max_rows=5,
elem_id="leaderboard-table",
)
selection.change(
fn=update_dataframe,
inputs=selection,
outputs=leaderboard_table
)
search_bar.change(
fn=search_table,
inputs=[search_bar, selection],
outputs=leaderboard_table,
)
gr.Markdown(CITATION, elem_classes="markdown-text")
demo.launch()
|