File size: 2,262 Bytes
9631045
 
 
 
 
2ce0e9d
 
 
9631045
 
2ce0e9d
9631045
 
 
2ce0e9d
 
9631045
 
 
 
 
 
 
2ce0e9d
 
 
 
 
 
 
 
 
 
 
 
9631045
 
2ce0e9d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9631045
 
 
 
 
9b694d3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
import gradio as gr
import pandas as pd
import random

df = pd.read_csv('data.csv')
df_stats = pd.read_csv('data_stats_langs.csv')
map_models = df_stats[['model','model_name']].set_index('model').to_dict()
df = df.rename(columns=map_models['model_name'])


models = sorted(df.columns.tolist())
models.remove('hash')
models.remove('message')
messages = sorted(df['message'].tolist(), key=len)
messages_select = [(m[:250],m) for m in messages]


def out(message, model1, model2):
    row = df[df['message'] == message]
    output1 = row[model1].values[0]
    output2 = row[model2].values[0]
    return message, output1, output2

OUTPUT_DESCRIPTION='''How good are OpenSource LLMs in German? I've benchmarked a couple of models and generated outputs for about 250 prompts to compare the models.

For information about the used dataset and generation see the [README.md](https://huggingface.co/spaces/floleuerer/german_llm_outputs/blob/main/README.md)

Select a Prompt and the models you would like to compare -> hit "Show Outputs"
'''

BENCHMARK_DESCRIPTION='''# Columns  
de: German Benchmark results (arc, hellaswag, mmlu)  
en: English Benchmark results (arc, hellaswag, mmlu)  
de_frac: Given a german prompt - how often does the model correctly respond in German?
'''

with gr.Blocks() as iface:
    with gr.Tab('Model Outputs'):
        gr.Markdown(OUTPUT_DESCRIPTION)
        with gr.Row():
            drop_message = gr.Dropdown(messages_select, label='Prompt', value=random.choice(messages))
        with gr.Row():
            drop_model1 = gr.Dropdown(models, label='Model 1', value=random.choice(models))
            drop_model2 = gr.Dropdown(models, label='Model 2', value=random.choice(models))
        with gr.Row():
            btn = gr.Button("Show Outputs")
        with gr.Row():
            out_message = gr.TextArea(label='Prompt')
        with gr.Row():
            out_model1 = gr.TextArea(label='Output Model 1')
            out_model2 = gr.TextArea(label='Output Model 2')
    with gr.Tab('Benchmarks'):
        gr.Markdown(BENCHMARK_DESCRIPTION)
        gr.Dataframe(df_stats.drop('model', axis=1))



    btn.click(out,
    inputs=[drop_message, drop_model1, drop_model2],
    outputs=[out_message, out_model1, out_model2])

iface.launch()