|
|
|
|
|
|
|
|
|
|
|
|
|
import os |
|
import json |
|
import glob |
|
from collections import defaultdict |
|
import pandas as pd |
|
import gradio as gr |
|
import glob |
|
|
|
|
|
|
|
|
|
|
|
def language_names(json_path): |
|
with open(json_path, 'r') as json_file: |
|
data = json.load(json_file) |
|
return data |
|
|
|
|
|
label2name = language_names("assets/language_names.json") |
|
|
|
def get_name(label): |
|
"""Get the name of language from label""" |
|
iso_3 = label.split('_')[0] |
|
name = label2name[iso_3] |
|
return name |
|
|
|
|
|
FLORES_ARC = {'name': 'FLORES - ARC style', 'csv': 'evals/flores-mean-arc.csv'} |
|
Bible_ARC = {'name': 'Bible - ARC style', 'csv': 'evals/bible-mean-arc.csv'} |
|
FLORES_Belebele = {'name': 'FLORES - Belebele style', 'csv': 'evals/flores-max-belebele.csv'} |
|
Bible_Belebele = {'name': 'Bible - Belebele style', 'csv': 'evals/bible-max-belebele.csv'} |
|
|
|
BENCHMARKS = [FLORES_ARC, Bible_ARC, FLORES_Belebele, Bible_Belebele] |
|
|
|
|
|
def collect_results(): |
|
performance_dict = defaultdict(lambda: defaultdict(dict)) |
|
for bench in BENCHMARKS: |
|
|
|
task = bench['name'] |
|
results = pd.read_csv(bench['csv'], index_col = 0).to_dict() |
|
|
|
|
|
for model, d_lang_value in results.items(): |
|
if model == 'avg': |
|
continue |
|
for lang, value in d_lang_value.items(): |
|
|
|
performance_dict[task.split(' ')[0]][(model, lang)][task] = value |
|
return performance_dict |
|
|
|
|
|
def get_leaderboard_df(results_dict, parallel_data = 'FLORES'): |
|
df = list() |
|
performance_dict = results_dict.get(parallel_data, defaultdict(dict)) |
|
|
|
for (pretrained, lang), perfs in performance_dict.items(): |
|
lang_name = get_name(lang) |
|
perfs_num = [0, 0] |
|
|
|
if f'{parallel_data} - ARC style' in perfs: |
|
perfs_num = [perfs[f'{parallel_data} - ARC style'], perfs[f'{parallel_data} - Belebele style']] |
|
|
|
avg = round(sum(perfs_num) / len(perfs_num), 4) |
|
notes = ' '.join([pretrained, lang_name]) |
|
row = [pretrained, lang_name, lang, avg] + perfs_num + [notes] |
|
df.append(row) |
|
|
|
df = pd.DataFrame.from_records(df, columns=COLS) |
|
|
|
|
|
language_aggregate = df.groupby("Language")["Average"].mean().reset_index().sort_values(by='Average', ascending=False).reset_index() |
|
df = df.sort_values(by='Average', ascending=False) |
|
df = df.set_index("Language").loc[language_aggregate["Language"]].reset_index() |
|
df = df[COLS] |
|
|
|
return df |
|
|
|
MODEL_COL = "Model" |
|
LANG_COL = "Language" |
|
CODE_COL = "Code" |
|
ARC_COL = 'ARC Style' |
|
BELEBELE_COL = 'Belebele Style' |
|
AVERAGE_COL = "Average" |
|
NOTES_COL = "Notes" |
|
|
|
COLS = [MODEL_COL, LANG_COL, CODE_COL, AVERAGE_COL, ARC_COL, BELEBELE_COL, NOTES_COL] |
|
TYPES = ["str", "str", "str", "number", "number", "number", "number", "number", "str"] |
|
|
|
performance = collect_results() |
|
|
|
def search_table(query, selection): |
|
df = get_leaderboard_df(performance, selection) |
|
filtered_df = df[df[NOTES_COL].str.contains(query, case=False)] |
|
return filtered_df |
|
|
|
def update_dataframe(selection): |
|
if selection == "FLORES": |
|
return get_leaderboard_df(performance, selection) |
|
elif selection == "Bible": |
|
return get_leaderboard_df(performance, selection) |
|
|
|
|
|
CUSTOM_CSS= """ |
|
/* Hides the final column */ |
|
table td:last-child, |
|
table th:last-child { |
|
display: none; |
|
} |
|
# table td:first-child, |
|
# table th:first-child { |
|
# max-width: 400px; |
|
# overflow: auto; |
|
# white-space: nowrap; |
|
# } |
|
""" |
|
|
|
|
|
TITLE = '<h1 align="center" id="space-title">Mexa: Multilingual Evaluation of Open English-Centric LLMs via Cross-Lingual Alignment</h1>' |
|
|
|
INTRO_TEXT = f""" |
|
## About |
|
|
|
We introduce Mexa, a method for assessing the multilingual capabilities of English-centric large language models (LLMs). Mexa builds on the observation that English-centric LLMs semantically use English as a kind of pivot language in their intermediate layers. Mexa computes the alignment between non-English languages and English using parallel sentences, estimating the transfer of language understanding capabilities from English to other languages through this alignment. This metric can be useful in estimating task performance, provided we know the English performance in the task and the alignment score between languages derived from a parallel dataset. |
|
|
|
## Code |
|
|
|
https://github.com/kargaranamir |
|
|
|
## Details |
|
We use parallel datasets from FLORES and the Bible. In the ARC style, we use mean pooling over layers, and the English score achieved by each LLM in the ARC benchmark is used to adjust the multilingual scores. In the Belebele style, we use max pooling over layers, and the English score achieved by each LLM in Belebele is used to adjust the multilingual scores. |
|
""" |
|
|
|
|
|
CITATION = """ |
|
## Citation |
|
``` |
|
@article{kargaran2024mexa, |
|
title = {Mexa: Multilingual Evaluation of {E}nglish-Centric {LLMs} via Cross-Lingual Alignment}, |
|
author = {Kargaran, Amir Hossein and Modarressi, Ali and Nikeghbal, Nafiseh and Diesner, Jana and Yvon, François and Schütze, Hinrich}, |
|
journal = {arXiv preprint}, |
|
year = {2024}, |
|
url = {https://github.com/cisnlp/Mexa/} |
|
} |
|
``` |
|
""" |
|
|
|
original_df = get_leaderboard_df(performance, 'FLORES') |
|
|
|
|
|
demo = gr.Blocks(css=CUSTOM_CSS) |
|
with demo: |
|
gr.HTML(TITLE) |
|
gr.Markdown(INTRO_TEXT, elem_classes="markdown-text") |
|
|
|
with gr.Group(): |
|
|
|
selection = gr.Radio( |
|
["FLORES", "Bible"], |
|
label="Parallel Dataset", |
|
info="Choose parallel dataset!", |
|
value = 'FLORES' |
|
) |
|
|
|
search_bar = gr.Textbox( |
|
placeholder="Search models and languages...", show_label=False, elem_id="search-bar" |
|
) |
|
|
|
leaderboard_table = gr.components.Dataframe( |
|
value=original_df, |
|
headers=COLS, |
|
datatype=TYPES, |
|
|
|
elem_id="leaderboard-table", |
|
) |
|
|
|
|
|
selection.change( |
|
fn=update_dataframe, |
|
inputs=selection, |
|
outputs=leaderboard_table |
|
) |
|
|
|
|
|
search_bar.change( |
|
fn=search_table, |
|
inputs=[search_bar, selection], |
|
outputs=leaderboard_table, |
|
) |
|
|
|
gr.Markdown(CITATION, elem_classes="markdown-text") |
|
|
|
demo.launch() |
|
|