File size: 3,303 Bytes
da7ea76
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
407ba49
 
 
 
 
da7ea76
 
 
496f292
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
da7ea76
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
import gradio as gr
import pandas as pd


def load_and_process_data(file_path):
    # Load the leaderboard data
    df = pd.read_pickle(file_path)

    # Group by 'lmm' and 'question' to calculate mean accuracy
    accuracy_df = (
        df.groupby(["lmm", "question"])["accepted_by_judge"].mean().reset_index()
    )
    accuracy_df = accuracy_df.rename(columns={"accepted_by_judge": "accuracy"})
    accuracy_df["accuracy"] = (accuracy_df["accuracy"] * 100).round(1)

    # Group by 'lmm' to calculate the count of images
    image_count_df = df.groupby("lmm")["image"].nunique().reset_index()
    image_count_df = image_count_df.rename(columns={"image": "Total Images"})

    return accuracy_df, image_count_df


def expand_and_format_df(accuracy_df, image_count_df):
    # Pivot and format the accuracy dataframe
    expanded_df = accuracy_df.pivot(index="lmm", columns="question", values="accuracy")
    expanded_df["Average"] = expanded_df.mean(axis=1).round(1)
    expanded_df = expanded_df.sort_values(by="Average", ascending=False).reset_index()
    expanded_df.columns.name = None

    # Merge the 'total_images' column
    final_df = pd.merge(expanded_df, image_count_df, on="lmm")

    return final_df.rename(columns={"lmm": "Model"})


def map_model_names(df, name_dict):
    # Map model names using the provided dictionary
    df["Model"] = df["Model"].map(name_dict)
    return df


# Dictionary for renaming models
name_dict = {
    "gpt4v": "GPT-4V(ision)",
    "llava": "LLaVA-1.5-13B",
    "llava-7b": "LLaVA-1.5-7B",
    "Long-SPHINX": "Long-SPHINX",
    "SPHINX": "SPHINX",
    "OtterHD": "OtterHD",
    "minigpt4v2": "MiniGPT4v2",
    "InstructBLIP-13B": "InstructBLIP-13B",
    "InstructBLIP": "InstructBLIP-7B",
    "qwen": "Qwen-VL-Chat",
    "fuyu-8b": "Fuyu-8B",
}

# Processing steps
accuracy_df, image_count_df = load_and_process_data("raw_outputs.pkl")
final_df = expand_and_format_df(accuracy_df, image_count_df)
final_df = map_model_names(final_df, name_dict)


# Gradio interface
with gr.Blocks() as demo:
    gr.Markdown("# GlitchBench Leaderboard")
    gr.HTML("""
<div style="text-align: center;">
    <a href="https://glitchbench.github.io/" target="_blank">Visit the GlitchBench Project Homepage</a>
</div>
""")
    with gr.Row():
        gr.Dataframe(final_df)

    gr.Markdown("# How to Submit Your Model")
    gr.Markdown(
        """
    We warmly invite you to submit your model's responses for inclusion in our leaderboard. To participate, please email a CSV file containing your model's responses to [glitchbench@gmail.com](mailto:glitchbench@gmail.com). Successful submissions will be evaluated and will be featured on our leaderboard.

    **CSV File Format:**

    ```
    ImageId,Question,Output
    ```
    
    - `ImageId`: The ID of the image. (If unsure how to find the Image ID, please [add instructions or a link here]).
    - `Question`: The prompt used to query the model. We currently support only two question types:
        1. "What is unusual about this image?"
        2. "What is wrong with this image?"
    - `Output`: The response generated by your model.

    Ensure that your submissions follow these guidelines for a smooth evaluation process. We look forward to seeing your innovative models in action!
    """
    )


demo.launch()