Spaces:
Runtime error
Runtime error
import gradio as gr | |
import pandas as pd | |
import random | |
df = pd.read_csv('data.csv') | |
df_stats = pd.read_csv('data_stats_langs.csv') | |
map_models = df_stats[['model','model_name']].set_index('model').to_dict() | |
df = df.rename(columns=map_models['model_name']) | |
models = sorted(df.columns.tolist()) | |
models.remove('hash') | |
models.remove('message') | |
messages = sorted(df['message'].tolist(), key=len) | |
messages_select = [(m[:250],m) for m in messages] | |
def out(message, model1, model2): | |
row = df[df['message'] == message] | |
output1 = row[model1].values[0] | |
output2 = row[model2].values[0] | |
return message, output1, output2 | |
OUTPUT_DESCRIPTION='''How good are OpenSource LLMs in German? I've benchmarked a couple of models and generated outputs for about 250 prompts to compare the models. | |
For information about the used dataset and generation see the [README.md](https://huggingface.co/spaces/floleuerer/german_llm_outputs/blob/main/README.md) | |
Select a Prompt and the models you would like to compare -> hit "Show Outputs" | |
''' | |
BENCHMARK_DESCRIPTION='''# Columns | |
de: German Benchmark results (arc, hellaswag, mmlu) | |
en: English Benchmark results (arc, hellaswag, mmlu) | |
de_frac: Given a german prompt - how often does the model correctly respond in German? | |
''' | |
with gr.Blocks() as iface: | |
with gr.Tab('Model Outputs'): | |
gr.Markdown(OUTPUT_DESCRIPTION) | |
with gr.Row(): | |
drop_message = gr.Dropdown(messages_select, label='Prompt', value=random.choice(messages)) | |
with gr.Row(): | |
drop_model1 = gr.Dropdown(models, label='Model 1', value=random.choice(models)) | |
drop_model2 = gr.Dropdown(models, label='Model 2', value=random.choice(models)) | |
with gr.Row(): | |
btn = gr.Button("Show Outputs") | |
with gr.Row(): | |
out_message = gr.TextArea(label='Prompt') | |
with gr.Row(): | |
out_model1 = gr.TextArea(label='Output Model 1') | |
out_model2 = gr.TextArea(label='Output Model 2') | |
with gr.Tab('Benchmarks'): | |
gr.Markdown(BENCHMARK_DESCRIPTION) | |
gr.Dataframe(df_stats.drop('model', axis=1)) | |
btn.click(out, | |
inputs=[drop_message, drop_model1, drop_model2], | |
outputs=[out_message, out_model1, out_model2]) | |
iface.launch() |