File size: 9,237 Bytes
3427608
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ffc77cb
 
3427608
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ffc77cb
3427608
b257e01
3427608
b6dc501
3427608
a4b32da
b6dc501
 
b257e01
b6dc501
3427608
 
a4b32da
3427608
 
 
 
b6dc501
 
 
3427608
 
b257e01
 
3427608
 
 
 
 
b6dc501
b257e01
b6dc501
 
 
 
3427608
b6dc501
 
3427608
b6dc501
 
3427608
b6dc501
 
3427608
b6dc501
 
ffc77cb
b6dc501
 
 
 
b257e01
b6dc501
 
ffc77cb
a4b32da
ffc77cb
 
 
 
 
a4b32da
ffc77cb
 
 
 
 
 
b6dc501
ffc77cb
 
 
b6dc501
b257e01
 
 
 
 
b6dc501
 
b257e01
b6dc501
 
 
b257e01
 
b6dc501
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b257e01
 
 
 
 
 
 
 
ffc77cb
b257e01
 
 
 
 
ffc77cb
 
b257e01
ffc77cb
 
 
b6dc501
 
 
b257e01
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
"""
Live monitor of the website statistics and leaderboard.

Dependency:
sudo apt install pkg-config libicu-dev
pip install pytz gradio gdown plotly polyglot pyicu pycld2 tabulate
"""

import argparse
import ast
import pickle
import os
import threading
import time

import gradio as gr
import numpy as np
import pandas as pd
import json
from datetime import datetime


# def make_leaderboard_md(elo_results):
#     leaderboard_md = f"""
# # πŸ† Chatbot Arena Leaderboard
# | [Blog](https://lmsys.org/blog/2023-05-03-arena/) | [GitHub](https://github.com/lm-sys/FastChat) | [Paper](https://arxiv.org/abs/2306.05685) | [Dataset](https://github.com/lm-sys/FastChat/blob/main/docs/dataset_release.md) | [Twitter](https://twitter.com/lmsysorg) | [Discord](https://discord.gg/HSWAKCrnFx) |

# This leaderboard is based on the following three benchmarks.
# - [Chatbot Arena](https://lmsys.org/blog/2023-05-03-arena/) - a crowdsourced, randomized battle platform. We use 100K+ user votes to compute Elo ratings.
# - [MT-Bench](https://arxiv.org/abs/2306.05685) - a set of challenging multi-turn questions. We use GPT-4 to grade the model responses.
# - [MMLU](https://arxiv.org/abs/2009.03300) (5-shot) - a test to measure a model's multitask accuracy on 57 tasks.

# πŸ’» Code: The Arena Elo ratings are computed by this [notebook]({notebook_url}). The MT-bench scores (single-answer grading on a scale of 10) are computed by [fastchat.llm_judge](https://github.com/lm-sys/FastChat/tree/main/fastchat/llm_judge). The MMLU scores are mostly computed by [InstructEval](https://github.com/declare-lab/instruct-eval). Higher values are better for all benchmarks. Empty cells mean not available. Last updated: November, 2023.
# """
#     return leaderboard_md

def make_leaderboard_md():
    leaderboard_md = f"""
# πŸ† K-Sort Arena Leaderboard (Text-to-Image Generation)
"""
    return leaderboard_md


def make_leaderboard_video_md():
    leaderboard_md = f"""
# πŸ† K-Sort Arena Leaderboard (Text-to-Video Generation)
"""
    return leaderboard_md


def model_hyperlink(model_name, link):
    return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'


def make_arena_leaderboard_md(total_models, total_votes, last_updated):
    # last_updated = datetime.now()
    # last_updated = last_updated.strftime("%Y-%m-%d")

    leaderboard_md = f"""
Total models: **{total_models}** (anonymized), Total votes: **{total_votes}** (equivalent to **{total_votes*6}** pairwise comparisons)
\n Last updated: {last_updated}
"""

    return leaderboard_md


def make_disclaimer_md():
    disclaimer_md = '''
        <div id="modal" style="display:none; position:fixed; top:50%; left:50%; transform:translate(-50%, -50%); padding:20px; background:white; box-shadow:0 0 10px rgba(0,0,0,0.5); z-index:1000;">
            <p style="font-size:24px;"><strong>Disclaimer</strong></p>
            <p style="font-size:18px;"><b>Purpose and Scope</b></b></p>
            <p><b>This platform is designed for academic use, providing a space for evaluating and comparing Visual Generation Models. The information and services provided are intended for research and educational purposes only.</b></p>

            <p style="font-size:18px;"><b>Privacy and Data Protection</b></p>
            <p><b>While users may voluntarily submit their names and institutional affiliations, this information is not required and is collected solely for the purpose of academic recognition. Personal information submitted to this platform will be handled with care and used solely for the intended academic purposes. We are committed to protecting your privacy, and we will not share personal data with third parties without explicit consent.</b></p>

            <p style="font-size:18px;"><b>Source of Models</b></p>
            <p><b>All models evaluated and displayed on this platform are obtained from official sources, including but not limited to official repositories and Replicate.</b></p>

            <p style="font-size:18px;"><b>Limitations of Liability</b></p>
            <p><b>The platform and its administrators do not assume any legal liability for the use or interpretation of the information provided. The evaluations and comparisons are for academic purposes. Users should verify the information independently and must not use the platform for any illegal, harmful, violent, racist, or sexual purposes.</b></p>

            <p style="font-size:18px;"><b>Modification of Terms</b></p>
            <p><b>We reserve the right to modify these terms at any time. Users will be notified of significant changes through updates on the platform.</b></p>

            <p style="font-size:18px;"><b>Contact Information</b></p>
            <p><b>For any questions or to report issues, please contact us at info@ksort.org.</b></p>
        </div>
        <div id="overlay" style="display:none; position:fixed; top:0; left:0; width:100%; height:100%; background:rgba(0,0,0,0.5); z-index:999;" onclick="document.getElementById('modal').style.display='none'; document.getElementById('overlay').style.display='none'"></div>
        <p> This platform is designed for academic usage, for details please refer to <a href="#" id="open_link" onclick="document.getElementById('modal').style.display='block'; document.getElementById('overlay').style.display='block'">disclaimer</a>.</p>
        '''
    return disclaimer_md


def make_arena_leaderboard_data(results):
    import pandas as pd
    df = pd.DataFrame(results)
    return df


def build_leaderboard_tab(score_result_file = 'sorted_score_list.json'):
    with open(score_result_file, "r") as json_file:
        data = json.load(json_file)
    score_results = data["sorted_score_list"]
    total_models = data["total_models"]
    total_votes = data["total_votes"]
    last_updated = data["last_updated"]

    md = make_leaderboard_md()
    md_1 = gr.Markdown(md, elem_id="leaderboard_markdown")

    # with gr.Tab("Arena Score", id=0):
    md = make_arena_leaderboard_md(total_models, total_votes, last_updated)
    gr.Markdown(md, elem_id="leaderboard_markdown")
    md = make_arena_leaderboard_data(score_results)
    gr.Dataframe(md)
    
    gr.Markdown(
                """ ### The leaderboard is regularly updated and continuously incorporates new models. 
                """,
                elem_id="leaderboard_markdown",
            )
    with gr.Blocks():
        gr.HTML(make_disclaimer_md)
    from .utils import acknowledgment_md, html_code
    with gr.Blocks():
        gr.Markdown(acknowledgment_md)


def build_leaderboard_video_tab(score_result_file = 'sorted_score_list_video.json'):
    with open(score_result_file, "r") as json_file:
        data = json.load(json_file)
    score_results = data["sorted_score_list"]
    total_models = data["total_models"]
    total_votes = data["total_votes"]
    last_updated = data["last_updated"]

    md = make_leaderboard_video_md()
    md_1 = gr.Markdown(md, elem_id="leaderboard_markdown")
    # with gr.Blocks():
    #     gr.HTML(make_disclaimer_md)

    # with gr.Tab("Arena Score", id=0):
    md = make_arena_leaderboard_md(total_models, total_votes, last_updated)
    gr.Markdown(md, elem_id="leaderboard_markdown")
    md = make_arena_leaderboard_data(score_results)
    gr.Dataframe(md)

    notice_markdown_sora = """
    - Note: As Sora's video generation function is not publicly available, we used sample videos from their official website. This may lead to a biased assessment of Sora's capabilities, as these samples likely represent Sora's best outputs. Therefore, Sora's position on our leaderboard should be considered as its upper bound. We are working on methods to conduct more comprehensive and fair comparisons in the future.
    """

    gr.Markdown(notice_markdown_sora, elem_id="notice_markdown_sora")
    
    gr.Markdown(
                """ ### The leaderboard is regularly updated and continuously incorporates new models. 
                """,
                elem_id="leaderboard_markdown",
            )
    from .utils import acknowledgment_md, html_code
    with gr.Blocks():
        gr.Markdown(acknowledgment_md)


def build_leaderboard_contributor(file = 'contributor.json'):
    md = f"""
# πŸ† Contributor Leaderboard
The submission of user information is entirely optional. This information is used solely for contribution statistics. We respect and safeguard users' privacy choices. 
To maintain a clean and concise leaderboard, please ensure consistency in submitted names and affiliations. For example, use 'Berkeley' consistently rather than alternating with 'UC Berkeley'.
"""
    md_1 = gr.Markdown(md, elem_id="leaderboard_markdown")

    # md = make_arena_leaderboard_md(total_models, total_votes, last_updated)
    # gr.Markdown(md, elem_id="leaderboard_markdown")

    with open(file, "r") as json_file:
        data = json.load(json_file)
    score_results = data["contributor"]
    md = make_arena_leaderboard_data(score_results)
    gr.Dataframe(md)
    
    gr.Markdown(
                """ ### The leaderboard is regularly updated. 
                """,
                elem_id="leaderboard_markdown",
            )