lmzheng commited on
Commit
72650c2
β€’
1 Parent(s): 81b3d10

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +189 -17
app.py CHANGED
@@ -1,47 +1,215 @@
1
  """A gradio app that renders a static leaderboard. This is used for Hugging Face Space."""
 
2
  import argparse
3
  import pickle
4
 
5
  import gradio as gr
 
6
 
7
 
8
- notebook_url = "https://colab.research.google.com/drive/17L9uCiAivzWfzOxo2Tb9RMauT7vS6nVU?usp=sharing"
 
 
 
 
9
 
10
 
11
  def make_leaderboard_md(elo_results):
12
  leaderboard_md = f"""
13
  # Leaderboard
14
- | [Vote](https://arena.lmsys.org/) | [Blog](https://lmsys.org/blog/2023-05-03-arena/) | [GitHub](https://github.com/lm-sys/FastChat) | [Paper](https://arxiv.org/abs/2306.05685) | [Twitter](https://twitter.com/lmsysorg) | [Discord](https://discord.gg/HSWAKCrnFx) |
 
 
 
 
 
 
 
 
 
15
 
16
- We use the Elo rating system to calculate the relative performance of the models. You can view the voting data, basic analyses, and calculation procedure in this [notebook]({notebook_url}). We will periodically release new leaderboards. If you want to see more models, please help us [add them](https://github.com/lm-sys/FastChat/blob/main/docs/arena.md#how-to-add-a-new-model).
 
 
 
17
  Last updated: {elo_results["last_updated_datetime"]}
18
  {elo_results["leaderboard_table"]}
19
  """
20
  return leaderboard_md
21
 
22
 
23
- def build_leaderboard_tab(elo_results_file):
24
- if elo_results_file is not None:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  with open(elo_results_file, "rb") as fin:
26
  elo_results = pickle.load(fin)
27
 
28
  md = make_leaderboard_md(elo_results)
29
  p1 = elo_results["win_fraction_heatmap"]
30
  p2 = elo_results["battle_count_heatmap"]
31
- p3 = elo_results["average_win_rate_bar"]
32
- p4 = elo_results["bootstrap_elo_rating"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  else:
34
- md = "Loading ..."
35
- p1 = p2 = p3 = p4 = None
36
 
37
- md_1 = gr.Markdown(md)
38
  gr.Markdown(
39
- f"""## More Statistics\n
40
  We added some additional figures to show more statistics. The code for generating them is also included in this [notebook]({notebook_url}).
41
  Please note that you may see different orders from different ranking methods. This is expected for models that perform similarly, as demonstrated by the confidence interval in the bootstrap figure. Going forward, we prefer the classical Elo calculation because of its scalability and interpretability. You can find more discussions in this blog [post](https://lmsys.org/blog/2023-05-03-arena/).
42
  """
43
  )
44
 
 
 
45
  with gr.Row():
46
  with gr.Column():
47
  gr.Markdown(
@@ -56,23 +224,27 @@ Please note that you may see different orders from different ranking methods. Th
56
  with gr.Row():
57
  with gr.Column():
58
  gr.Markdown(
59
- "#### Figure 3: Average Win Rate Against All Other Models (Assuming Uniform Sampling and No Ties)"
60
  )
61
  plot_3 = gr.Plot(p3, show_label=False)
62
  with gr.Column():
63
  gr.Markdown(
64
- "#### Figure 4: Bootstrap of Elo Estimates (1000 Rounds of Random Sampling)"
65
  )
66
  plot_4 = gr.Plot(p4, show_label=False)
67
  return [md_1, plot_1, plot_2, plot_3, plot_4]
68
 
69
 
70
- def build_demo(elo_results_file):
 
 
71
  with gr.Blocks(
72
  title="Chatbot Arena Leaderboard",
73
- theme=gr.themes.Base(),
74
  ) as demo:
75
- leader_components = build_leaderboard_tab(elo_results_file)
 
 
76
 
77
  return demo
78
 
@@ -82,5 +254,5 @@ if __name__ == "__main__":
82
  parser.add_argument("--share", action="store_true")
83
  args = parser.parse_args()
84
 
85
- demo = build_demo("elo_results_20230619.pkl")
86
  demo.launch(share=args.share)
 
1
  """A gradio app that renders a static leaderboard. This is used for Hugging Face Space."""
2
+ import ast
3
  import argparse
4
  import pickle
5
 
6
  import gradio as gr
7
+ import numpy as np
8
 
9
 
10
+ notebook_url = "https://colab.research.google.com/drive/1RAWb22-PFNI-X1gPVzc927SGUdfr6nsR?usp=sharing"
11
+
12
+
13
+ basic_component_values = [None] * 6
14
+ leader_component_values = [None] * 5
15
 
16
 
17
  def make_leaderboard_md(elo_results):
18
  leaderboard_md = f"""
19
  # Leaderboard
20
+ | [Blog](https://lmsys.org/blog/2023-05-03-arena/) | [GitHub](https://github.com/lm-sys/FastChat) | [Paper](https://arxiv.org/abs/2306.05685) | [Twitter](https://twitter.com/lmsysorg) | [Discord](https://discord.gg/HSWAKCrnFx) |
21
+
22
+ πŸ† This leaderboard is based on the following three benchmarks.
23
+ - [Chatbot Arena](https://lmsys.org/blog/2023-05-03-arena/) - a crowdsourced, randomized battle platform. We use 40K+ user votes to compute Elo ratings.
24
+ - [MT-Bench](https://arxiv.org/abs/2306.05685) - a set of challenging multi-turn questions. We use GPT-4 to grade the model responses.
25
+ - [MMLU](https://arxiv.org/abs/2009.03300) (5-shot) - a test to measure a model's multitask accuracy on 57 tasks.
26
+
27
+ πŸ’» We use [fastchat.llm_judge](https://github.com/lm-sys/FastChat/tree/main/fastchat/llm_judge) to compute MT-bench scores (single-answer grading on a scale of 10) and win rates (against gpt-3.5). The Arena Elo ratings are computed by this [notebook]({notebook_url}). The MMLU scores are computed by [InstructEval](https://github.com/declare-lab/instruct-eval) and [Chain-of-Thought Hub](https://github.com/FranxYao/chain-of-thought-hub). Higher values are better for all benchmarks. Empty cells mean not available.
28
+ """
29
+ return leaderboard_md
30
 
31
+
32
+ def make_leaderboard_md_live(elo_results):
33
+ leaderboard_md = f"""
34
+ # Leaderboard
35
  Last updated: {elo_results["last_updated_datetime"]}
36
  {elo_results["leaderboard_table"]}
37
  """
38
  return leaderboard_md
39
 
40
 
41
+ def update_elo_components(max_num_files, elo_results_file):
42
+ log_files = get_log_files(max_num_files)
43
+
44
+ # Leaderboard
45
+ if elo_results_file is None: # Do live update
46
+ battles = clean_battle_data(log_files)
47
+ elo_results = report_elo_analysis_results(battles)
48
+
49
+ leader_component_values[0] = make_leaderboard_md_live(elo_results)
50
+ leader_component_values[1] = elo_results["win_fraction_heatmap"]
51
+ leader_component_values[2] = elo_results["battle_count_heatmap"]
52
+ leader_component_values[3] = elo_results["bootstrap_elo_rating"]
53
+ leader_component_values[4] = elo_results["average_win_rate_bar"]
54
+
55
+ # Basic stats
56
+ basic_stats = report_basic_stats(log_files)
57
+ md0 = f"Last updated: {basic_stats['last_updated_datetime']}"
58
+
59
+ md1 = "### Action Histogram\n"
60
+ md1 += basic_stats["action_hist_md"] + "\n"
61
+
62
+ md2 = "### Anony. Vote Histogram\n"
63
+ md2 += basic_stats["anony_vote_hist_md"] + "\n"
64
+
65
+ md3 = "### Model Call Histogram\n"
66
+ md3 += basic_stats["model_hist_md"] + "\n"
67
+
68
+ md4 = "### Model Call (Last 24 Hours)\n"
69
+ md4 += basic_stats["num_chats_last_24_hours"] + "\n"
70
+
71
+ basic_component_values[0] = md0
72
+ basic_component_values[1] = basic_stats["chat_dates_bar"]
73
+ basic_component_values[2] = md1
74
+ basic_component_values[3] = md2
75
+ basic_component_values[4] = md3
76
+ basic_component_values[5] = md4
77
+
78
+
79
+ def update_worker(max_num_files, interval, elo_results_file):
80
+ while True:
81
+ tic = time.time()
82
+ update_elo_components(max_num_files, elo_results_file)
83
+ durtaion = time.time() - tic
84
+ print(f"update duration: {durtaion:.2f} s")
85
+ time.sleep(max(interval - durtaion, 0))
86
+
87
+
88
+ def load_demo(url_params, request: gr.Request):
89
+ logger.info(f"load_demo. ip: {request.client.host}. params: {url_params}")
90
+ return basic_component_values + leader_component_values
91
+
92
+
93
+ def model_hyperlink(model_name, link):
94
+ return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
95
+
96
+
97
+ def load_leaderboard_table_csv(filename, add_hyperlink=True):
98
+ lines = open(filename).readlines()
99
+ heads = [v.strip() for v in lines[0].split(",")]
100
+ rows = []
101
+ for i in range(1, len(lines)):
102
+ row = [v.strip() for v in lines[i].split(",")]
103
+ for j in range(len(heads)):
104
+ item = {}
105
+ for h, v in zip(heads, row):
106
+ if h == "Arena Elo rating":
107
+ if v != "-":
108
+ v = int(ast.literal_eval(v))
109
+ else:
110
+ v = np.nan
111
+ elif h == "MMLU":
112
+ if v != "-":
113
+ v = round(ast.literal_eval(v) * 100, 1)
114
+ else:
115
+ v = np.nan
116
+ elif h == "MT-bench (win rate %)":
117
+ if v != "-":
118
+ v = round(ast.literal_eval(v[:-1]), 1)
119
+ else:
120
+ v = np.nan
121
+ elif h == "MT-bench (score)":
122
+ if v != "-":
123
+ v = round(ast.literal_eval(v), 2)
124
+ else:
125
+ v = np.nan
126
+ item[h] = v
127
+ if add_hyperlink:
128
+ item["Model"] = model_hyperlink(item["Model"], item["Link"])
129
+ rows.append(item)
130
+
131
+ return rows
132
+
133
+
134
+ def build_basic_stats_tab():
135
+ empty = "Loading ..."
136
+ basic_component_values[:] = [empty, None, empty, empty, empty, empty]
137
+
138
+ md0 = gr.Markdown(empty)
139
+ gr.Markdown("#### Figure 1: Number of model calls and votes")
140
+ plot_1 = gr.Plot(show_label=False)
141
+ with gr.Row():
142
+ with gr.Column():
143
+ md1 = gr.Markdown(empty)
144
+ with gr.Column():
145
+ md2 = gr.Markdown(empty)
146
+ with gr.Row():
147
+ with gr.Column():
148
+ md3 = gr.Markdown(empty)
149
+ with gr.Column():
150
+ md4 = gr.Markdown(empty)
151
+ return [md0, plot_1, md1, md2, md3, md4]
152
+
153
+
154
+ def build_leaderboard_tab(elo_results_file, leaderboard_table_file):
155
+ if elo_results_file is None: # Do live update
156
+ md = "Loading ..."
157
+ p1 = p2 = p3 = p4 = None
158
+ else:
159
  with open(elo_results_file, "rb") as fin:
160
  elo_results = pickle.load(fin)
161
 
162
  md = make_leaderboard_md(elo_results)
163
  p1 = elo_results["win_fraction_heatmap"]
164
  p2 = elo_results["battle_count_heatmap"]
165
+ p3 = elo_results["bootstrap_elo_rating"]
166
+ p4 = elo_results["average_win_rate_bar"]
167
+
168
+ md_1 = gr.Markdown(md, elem_id="leaderboard_markdown")
169
+
170
+ if leaderboard_table_file:
171
+ data = load_leaderboard_table_csv(leaderboard_table_file)
172
+ headers = [
173
+ "Model",
174
+ "Arena Elo rating",
175
+ "MT-bench (score)",
176
+ "MT-bench (win rate %)",
177
+ "MMLU",
178
+ "License",
179
+ ]
180
+ values = []
181
+ for item in data:
182
+ row = []
183
+ for key in headers:
184
+ value = item[key]
185
+ row.append(value)
186
+ values.append(row)
187
+ values.sort(key=lambda x: -x[1] if not np.isnan(x[1]) else 1e9)
188
+
189
+ headers[1] = "⭐ " + headers[1]
190
+ headers[2] = "πŸ“ˆ " + headers[2]
191
+
192
+ gr.Dataframe(
193
+ headers=headers,
194
+ datatype=["markdown", "number", "number", "number", "number", "str"],
195
+ value=values,
196
+ elem_id="leaderboard_dataframe",
197
+ )
198
+ gr.Markdown(
199
+ "If you want to see more models, please help us [add them](https://github.com/lm-sys/FastChat/blob/main/docs/arena.md#how-to-add-a-new-model)."
200
+ )
201
  else:
202
+ pass
 
203
 
 
204
  gr.Markdown(
205
+ f"""## More Statistics for Chatbot Arena\n
206
  We added some additional figures to show more statistics. The code for generating them is also included in this [notebook]({notebook_url}).
207
  Please note that you may see different orders from different ranking methods. This is expected for models that perform similarly, as demonstrated by the confidence interval in the bootstrap figure. Going forward, we prefer the classical Elo calculation because of its scalability and interpretability. You can find more discussions in this blog [post](https://lmsys.org/blog/2023-05-03-arena/).
208
  """
209
  )
210
 
211
+ leader_component_values[:] = [md, p1, p2, p3, p4]
212
+
213
  with gr.Row():
214
  with gr.Column():
215
  gr.Markdown(
 
224
  with gr.Row():
225
  with gr.Column():
226
  gr.Markdown(
227
+ "#### Figure 3: Bootstrap of Elo Estimates (1000 Rounds of Random Sampling)"
228
  )
229
  plot_3 = gr.Plot(p3, show_label=False)
230
  with gr.Column():
231
  gr.Markdown(
232
+ "#### Figure 4: Average Win Rate Against All Other Models (Assuming Uniform Sampling and No Ties)"
233
  )
234
  plot_4 = gr.Plot(p4, show_label=False)
235
  return [md_1, plot_1, plot_2, plot_3, plot_4]
236
 
237
 
238
+ def build_demo(elo_results_file, leaderboard_table_file):
239
+ text_size = gr.themes.sizes.text_lg
240
+
241
  with gr.Blocks(
242
  title="Chatbot Arena Leaderboard",
243
+ theme=gr.themes.Base(text_size=text_size),
244
  ) as demo:
245
+ leader_components = build_leaderboard_tab(
246
+ elo_results_file, leaderboard_table_file
247
+ )
248
 
249
  return demo
250
 
 
254
  parser.add_argument("--share", action="store_true")
255
  args = parser.parse_args()
256
 
257
+ demo = build_demo("elo_results_20230619.pkl", "leaderboard_table_20230619.csv")
258
  demo.launch(share=args.share)