dsorokin commited on
Commit
5ccbe05
β€’
1 Parent(s): c0217a3
Files changed (5) hide show
  1. .gitignore +1 -0
  2. README.md +11 -7
  3. app.py +449 -0
  4. index.html +0 -57
  5. requirements.txt +1 -0
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ hf_token
README.md CHANGED
@@ -1,11 +1,15 @@
1
  ---
2
- title: Gradio Lite
3
- emoji: πŸ–ΌοΈ
4
- colorFrom: yellow
5
- colorTo: red
6
- sdk: static
 
 
7
  pinned: false
8
- license: mit
 
 
9
  ---
10
 
11
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: LMSys Chatbot Arena Leaderboard
3
+ emoji: πŸ†πŸ€–
4
+ colorFrom: indigo
5
+ colorTo: green
6
+ sdk: gradio
7
+ sdk_version: 3.50.2
8
+ app_file: app.py
9
  pinned: false
10
+ license: apache-2.0
11
+ tags:
12
+ - leaderboard
13
  ---
14
 
15
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,449 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """A gradio app that renders a static leaderboard. This is used for Hugging Face Space."""
2
+ import ast
3
+ import argparse
4
+ import glob
5
+ import pickle
6
+
7
+ import gradio as gr
8
+ import numpy as np
9
+ import pandas as pd
10
+
11
+
12
+ # notebook_url = "https://colab.research.google.com/drive/1RAWb22-PFNI-X1gPVzc927SGUdfr6nsR?usp=sharing"
13
+ notebook_url = "https://colab.research.google.com/drive/1KdwokPjirkTmpO_P1WByFNFiqxWQquwH#scrollTo=o_CpbkGEbhrK"
14
+
15
+
16
+ basic_component_values = [None] * 6
17
+ leader_component_values = [None] * 5
18
+
19
+
20
+ def make_default_md(arena_df, elo_results):
21
+ total_votes = sum(arena_df["num_battles"]) // 2
22
+ total_models = len(arena_df)
23
+
24
+ leaderboard_md = f"""
25
+ # πŸ† LMSYS Chatbot Arena Leaderboard
26
+ | [Vote](https://chat.lmsys.org) | [Blog](https://lmsys.org/blog/2023-05-03-arena/) | [GitHub](https://github.com/lm-sys/FastChat) | [Paper](https://arxiv.org/abs/2306.05685) | [Dataset](https://github.com/lm-sys/FastChat/blob/main/docs/dataset_release.md) | [Twitter](https://twitter.com/lmsysorg) | [Discord](https://discord.gg/HSWAKCrnFx) |
27
+
28
+ LMSYS [Chatbot Arena](https://lmsys.org/blog/2023-05-03-arena/) is a crowdsourced open platform for LLM evals.
29
+ We've collected over **200,000** human preference votes to rank LLMs with the Elo ranking system.
30
+ """
31
+ return leaderboard_md
32
+
33
+
34
+ def make_arena_leaderboard_md(arena_df):
35
+ total_votes = sum(arena_df["num_battles"]) // 2
36
+ total_models = len(arena_df)
37
+
38
+ leaderboard_md = f"""
39
+ Total #models: **{total_models}**. Total #votes: **{total_votes}**. Last updated: Feb 15, 2024.
40
+
41
+ Contribute your vote πŸ—³οΈ at [chat.lmsys.org](https://chat.lmsys.org)! Find more analysis in the [notebook]({notebook_url}).
42
+ """
43
+ return leaderboard_md
44
+
45
+
46
+ def make_full_leaderboard_md(elo_results):
47
+ leaderboard_md = f"""
48
+ Three benchmarks are displayed: **Arena Elo**, **MT-Bench** and **MMLU**.
49
+ - [Chatbot Arena](https://chat.lmsys.org/?arena) - a crowdsourced, randomized battle platform. We use 200K+ user votes to compute Elo ratings.
50
+ - [MT-Bench](https://arxiv.org/abs/2306.05685): a set of challenging multi-turn questions. We use GPT-4 to grade the model responses.
51
+ - [MMLU](https://arxiv.org/abs/2009.03300) (5-shot): a test to measure a model's multitask accuracy on 57 tasks.
52
+
53
+ πŸ’» Code: The MT-bench scores (single-answer grading on a scale of 10) are computed by [fastchat.llm_judge](https://github.com/lm-sys/FastChat/tree/main/fastchat/llm_judge).
54
+ The MMLU scores are mostly computed by [InstructEval](https://github.com/declare-lab/instruct-eval).
55
+ Higher values are better for all benchmarks. Empty cells mean not available.
56
+ """
57
+ return leaderboard_md
58
+
59
+
60
+ def make_leaderboard_md_live(elo_results):
61
+ leaderboard_md = f"""
62
+ # Leaderboard
63
+ Last updated: {elo_results["last_updated_datetime"]}
64
+ {elo_results["leaderboard_table"]}
65
+ """
66
+ return leaderboard_md
67
+
68
+
69
+ def update_elo_components(max_num_files, elo_results_file):
70
+ log_files = get_log_files(max_num_files)
71
+
72
+ # Leaderboard
73
+ if elo_results_file is None: # Do live update
74
+ battles = clean_battle_data(log_files)
75
+ elo_results = report_elo_analysis_results(battles)
76
+
77
+ leader_component_values[0] = make_leaderboard_md_live(elo_results)
78
+ leader_component_values[1] = elo_results["win_fraction_heatmap"]
79
+ leader_component_values[2] = elo_results["battle_count_heatmap"]
80
+ leader_component_values[3] = elo_results["bootstrap_elo_rating"]
81
+ leader_component_values[4] = elo_results["average_win_rate_bar"]
82
+
83
+ # Basic stats
84
+ basic_stats = report_basic_stats(log_files)
85
+ md0 = f"Last updated: {basic_stats['last_updated_datetime']}"
86
+
87
+ md1 = "### Action Histogram\n"
88
+ md1 += basic_stats["action_hist_md"] + "\n"
89
+
90
+ md2 = "### Anony. Vote Histogram\n"
91
+ md2 += basic_stats["anony_vote_hist_md"] + "\n"
92
+
93
+ md3 = "### Model Call Histogram\n"
94
+ md3 += basic_stats["model_hist_md"] + "\n"
95
+
96
+ md4 = "### Model Call (Last 24 Hours)\n"
97
+ md4 += basic_stats["num_chats_last_24_hours"] + "\n"
98
+
99
+ basic_component_values[0] = md0
100
+ basic_component_values[1] = basic_stats["chat_dates_bar"]
101
+ basic_component_values[2] = md1
102
+ basic_component_values[3] = md2
103
+ basic_component_values[4] = md3
104
+ basic_component_values[5] = md4
105
+
106
+
107
+ def update_worker(max_num_files, interval, elo_results_file):
108
+ while True:
109
+ tic = time.time()
110
+ update_elo_components(max_num_files, elo_results_file)
111
+ durtaion = time.time() - tic
112
+ print(f"update duration: {durtaion:.2f} s")
113
+ time.sleep(max(interval - durtaion, 0))
114
+
115
+
116
+ def load_demo(url_params, request: gr.Request):
117
+ logger.info(f"load_demo. ip: {request.client.host}. params: {url_params}")
118
+ return basic_component_values + leader_component_values
119
+
120
+
121
+ def model_hyperlink(model_name, link):
122
+ return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
123
+
124
+
125
+ def load_leaderboard_table_csv(filename, add_hyperlink=True):
126
+ lines = open(filename).readlines()
127
+ heads = [v.strip() for v in lines[0].split(",")]
128
+ rows = []
129
+ for i in range(1, len(lines)):
130
+ row = [v.strip() for v in lines[i].split(",")]
131
+ for j in range(len(heads)):
132
+ item = {}
133
+ for h, v in zip(heads, row):
134
+ if h == "Arena Elo rating":
135
+ if v != "-":
136
+ v = int(ast.literal_eval(v))
137
+ else:
138
+ v = np.nan
139
+ elif h == "MMLU":
140
+ if v != "-":
141
+ v = round(ast.literal_eval(v) * 100, 1)
142
+ else:
143
+ v = np.nan
144
+ elif h == "MT-bench (win rate %)":
145
+ if v != "-":
146
+ v = round(ast.literal_eval(v[:-1]), 1)
147
+ else:
148
+ v = np.nan
149
+ elif h == "MT-bench (score)":
150
+ if v != "-":
151
+ v = round(ast.literal_eval(v), 2)
152
+ else:
153
+ v = np.nan
154
+ item[h] = v
155
+ if add_hyperlink:
156
+ item["Model"] = model_hyperlink(item["Model"], item["Link"])
157
+ rows.append(item)
158
+
159
+ return rows
160
+
161
+
162
+ def build_basic_stats_tab():
163
+ empty = "Loading ..."
164
+ basic_component_values[:] = [empty, None, empty, empty, empty, empty]
165
+
166
+ md0 = gr.Markdown(empty)
167
+ gr.Markdown("#### Figure 1: Number of model calls and votes")
168
+ plot_1 = gr.Plot(show_label=False)
169
+ with gr.Row():
170
+ with gr.Column():
171
+ md1 = gr.Markdown(empty)
172
+ with gr.Column():
173
+ md2 = gr.Markdown(empty)
174
+ with gr.Row():
175
+ with gr.Column():
176
+ md3 = gr.Markdown(empty)
177
+ with gr.Column():
178
+ md4 = gr.Markdown(empty)
179
+ return [md0, plot_1, md1, md2, md3, md4]
180
+
181
+ def get_full_table(arena_df, model_table_df):
182
+ values = []
183
+ for i in range(len(model_table_df)):
184
+ row = []
185
+ model_key = model_table_df.iloc[i]["key"]
186
+ model_name = model_table_df.iloc[i]["Model"]
187
+ # model display name
188
+ row.append(model_name)
189
+ if model_key in arena_df.index:
190
+ idx = arena_df.index.get_loc(model_key)
191
+ row.append(round(arena_df.iloc[idx]["rating"]))
192
+ else:
193
+ row.append(np.nan)
194
+ row.append(model_table_df.iloc[i]["MT-bench (score)"])
195
+ row.append(model_table_df.iloc[i]["MMLU"])
196
+ # Organization
197
+ row.append(model_table_df.iloc[i]["Organization"])
198
+ # license
199
+ row.append(model_table_df.iloc[i]["License"])
200
+
201
+ values.append(row)
202
+ values.sort(key=lambda x: -x[1] if not np.isnan(x[1]) else 1e9)
203
+ return values
204
+
205
+
206
+ def get_arena_table(arena_df, model_table_df):
207
+ # sort by rating
208
+ arena_df = arena_df.sort_values(by=["rating"], ascending=False)
209
+ values = []
210
+ for i in range(len(arena_df)):
211
+ row = []
212
+ model_key = arena_df.index[i]
213
+ model_name = model_table_df[model_table_df["key"] == model_key]["Model"].values[
214
+ 0
215
+ ]
216
+
217
+ # rank
218
+ row.append(i + 1)
219
+ # model display name
220
+ row.append(model_name)
221
+ # elo rating
222
+ row.append(round(arena_df.iloc[i]["rating"]))
223
+ upper_diff = round(
224
+ arena_df.iloc[i]["rating_q975"] - arena_df.iloc[i]["rating"]
225
+ )
226
+ lower_diff = round(
227
+ arena_df.iloc[i]["rating"] - arena_df.iloc[i]["rating_q025"]
228
+ )
229
+ row.append(f"+{upper_diff}/-{lower_diff}")
230
+ # num battles
231
+ row.append(round(arena_df.iloc[i]["num_battles"]))
232
+ # Organization
233
+ row.append(
234
+ model_table_df[model_table_df["key"] == model_key]["Organization"].values[0]
235
+ )
236
+ # license
237
+ row.append(
238
+ model_table_df[model_table_df["key"] == model_key]["License"].values[0]
239
+ )
240
+
241
+ cutoff_date = model_table_df[model_table_df["key"] == model_key]["Knowledge cutoff date"].values[0]
242
+ if cutoff_date == "-":
243
+ row.append("Unknown")
244
+ else:
245
+ row.append(cutoff_date)
246
+ values.append(row)
247
+ return values
248
+
249
+ def build_leaderboard_tab(elo_results_file, leaderboard_table_file, show_plot=False):
250
+ if elo_results_file is None: # Do live update
251
+ default_md = "Loading ..."
252
+ p1 = p2 = p3 = p4 = None
253
+ else:
254
+ with open(elo_results_file, "rb") as fin:
255
+ elo_results = pickle.load(fin)
256
+
257
+ p1 = elo_results["win_fraction_heatmap"]
258
+ p2 = elo_results["battle_count_heatmap"]
259
+ p3 = elo_results["bootstrap_elo_rating"]
260
+ p4 = elo_results["average_win_rate_bar"]
261
+ arena_df = elo_results["leaderboard_table_df"]
262
+ default_md = make_default_md(arena_df, elo_results)
263
+
264
+ md_1 = gr.Markdown(default_md, elem_id="leaderboard_markdown")
265
+ if leaderboard_table_file:
266
+ data = load_leaderboard_table_csv(leaderboard_table_file)
267
+ model_table_df = pd.DataFrame(data)
268
+
269
+ with gr.Tabs() as tabs:
270
+ # arena table
271
+ arena_table_vals = get_arena_table(arena_df, model_table_df)
272
+ with gr.Tab("Arena Elo", id=0):
273
+ md = make_arena_leaderboard_md(arena_df)
274
+ gr.Markdown(md, elem_id="leaderboard_markdown")
275
+ gr.Dataframe(
276
+ headers=[
277
+ "Rank",
278
+ "πŸ€– Model",
279
+ "⭐ Arena Elo",
280
+ "πŸ“Š 95% CI",
281
+ "πŸ—³οΈ Votes",
282
+ "Organization",
283
+ "License",
284
+ "Knowledge Cutoff",
285
+ ],
286
+ datatype=[
287
+ "str",
288
+ "markdown",
289
+ "number",
290
+ "str",
291
+ "number",
292
+ "str",
293
+ "str",
294
+ "str",
295
+ ],
296
+ value=arena_table_vals,
297
+ elem_id="arena_leaderboard_dataframe",
298
+ height=700,
299
+ column_widths=[50, 200, 120, 100, 100, 150, 150, 100],
300
+ wrap=True,
301
+ )
302
+ with gr.Tab("Full Leaderboard", id=1):
303
+ md = make_full_leaderboard_md(elo_results)
304
+ gr.Markdown(md, elem_id="leaderboard_markdown")
305
+ full_table_vals = get_full_table(arena_df, model_table_df)
306
+ gr.Dataframe(
307
+ headers=[
308
+ "πŸ€– Model",
309
+ "⭐ Arena Elo",
310
+ "πŸ“ˆ MT-bench",
311
+ "πŸ“š MMLU",
312
+ "Organization",
313
+ "License",
314
+ ],
315
+ datatype=["markdown", "number", "number", "number", "str", "str"],
316
+ value=full_table_vals,
317
+ elem_id="full_leaderboard_dataframe",
318
+ column_widths=[200, 100, 100, 100, 150, 150],
319
+ height=700,
320
+ wrap=True,
321
+ )
322
+ if not show_plot:
323
+ gr.Markdown(
324
+ """ ## Visit our [HF space](https://huggingface.co/spaces/lmsys/chatbot-arena-leaderboard) for more analysis!
325
+ If you want to see more models, please help us [add them](https://github.com/lm-sys/FastChat/blob/main/docs/arena.md#how-to-add-a-new-model).
326
+ """,
327
+ elem_id="leaderboard_markdown",
328
+ )
329
+ else:
330
+ pass
331
+
332
+ leader_component_values[:] = [default_md, p1, p2, p3, p4]
333
+
334
+ if show_plot:
335
+ gr.Markdown(
336
+ f"""## More Statistics for Chatbot Arena\n
337
+ Below are figures for more statistics. The code for generating them is also included in this [notebook]({notebook_url}).
338
+ You can find more discussions in this blog [post](https://lmsys.org/blog/2023-12-07-leaderboard/).
339
+ """,
340
+ elem_id="leaderboard_markdown"
341
+ )
342
+ with gr.Row():
343
+ with gr.Column():
344
+ gr.Markdown(
345
+ "#### Figure 1: Fraction of Model A Wins for All Non-tied A vs. B Battles"
346
+ )
347
+ plot_1 = gr.Plot(p1, show_label=False)
348
+ with gr.Column():
349
+ gr.Markdown(
350
+ "#### Figure 2: Battle Count for Each Combination of Models (without Ties)"
351
+ )
352
+ plot_2 = gr.Plot(p2, show_label=False)
353
+ with gr.Row():
354
+ with gr.Column():
355
+ gr.Markdown(
356
+ "#### Figure 3: Bootstrap of Elo Estimates (1000 Rounds of Random Sampling)"
357
+ )
358
+ plot_3 = gr.Plot(p3, show_label=False)
359
+ with gr.Column():
360
+ gr.Markdown(
361
+ "#### Figure 4: Average Win Rate Against All Other Models (Assuming Uniform Sampling and No Ties)"
362
+ )
363
+ plot_4 = gr.Plot(p4, show_label=False)
364
+
365
+ gr.Markdown(acknowledgment_md)
366
+
367
+ if show_plot:
368
+ return [md_1, plot_1, plot_2, plot_3, plot_4]
369
+ return [md_1]
370
+
371
+ block_css = """
372
+ #notice_markdown {
373
+ font-size: 104%
374
+ }
375
+ #notice_markdown th {
376
+ display: none;
377
+ }
378
+ #notice_markdown td {
379
+ padding-top: 6px;
380
+ padding-bottom: 6px;
381
+ }
382
+ #leaderboard_markdown {
383
+ font-size: 104%
384
+ }
385
+ #leaderboard_markdown td {
386
+ padding-top: 6px;
387
+ padding-bottom: 6px;
388
+ }
389
+ #leaderboard_dataframe td {
390
+ line-height: 0.1em;
391
+ }
392
+ footer {
393
+ display:none !important
394
+ }
395
+ .image-container {
396
+ display: flex;
397
+ align-items: center;
398
+ padding: 1px;
399
+ }
400
+ .image-container img {
401
+ margin: 0 30px;
402
+ height: 20px;
403
+ max-height: 100%;
404
+ width: auto;
405
+ max-width: 20%;
406
+ }
407
+ """
408
+
409
+ acknowledgment_md = """
410
+ ### Acknowledgment
411
+ <div class="image-container">
412
+ <p> We thank <a href="https://www.kaggle.com/" target="_blank">Kaggle</a>, <a href="https://mbzuai.ac.ae/" target="_blank">MBZUAI</a>, <a href="https://www.anyscale.com/" target="_blank">AnyScale</a>, <a href="https://www.a16z.com/" target="_blank">a16z</a>, and <a href="https://huggingface.co/" target="_blank">HuggingFace</a> for their generous <a href="https://lmsys.org/donations/" target="_blank">sponsorship</a>. </p>
413
+ <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/7/7c/Kaggle_logo.png/400px-Kaggle_logo.png" alt="Kaggle">
414
+ <img src="https://mma.prnewswire.com/media/1227419/MBZUAI_Logo.jpg?p=facebookg" alt="MBZUAI">
415
+ <img src="https://docs.anyscale.com/site-assets/logo.png" alt="AnyScale">
416
+ <img src="https://a16z.com/wp-content/themes/a16z/assets/images/opegraph_images/corporate-Yoast-Twitter.jpg" alt="a16z">
417
+ <img src="https://huggingface.co/datasets/huggingface/brand-assets/resolve/main/hf-logo-with-title.png" alt="HuggingFace">
418
+ </div>
419
+ """
420
+
421
+ def build_demo(elo_results_file, leaderboard_table_file):
422
+ text_size = gr.themes.sizes.text_lg
423
+
424
+ with gr.Blocks(
425
+ title="Chatbot Arena Leaderboard",
426
+ theme=gr.themes.Base(text_size=text_size),
427
+ css=block_css,
428
+ ) as demo:
429
+ leader_components = build_leaderboard_tab(
430
+ elo_results_file, leaderboard_table_file, show_plot=True
431
+ )
432
+ return demo
433
+
434
+
435
+ if __name__ == "__main__":
436
+ parser = argparse.ArgumentParser()
437
+ parser.add_argument("--share", action="store_true")
438
+ args = parser.parse_args()
439
+
440
+ elo_result_files = glob.glob("elo_results_*.pkl")
441
+ elo_result_files.sort(key=lambda x: int(x[12:-4]))
442
+ elo_result_file = elo_result_files[-1]
443
+
444
+ leaderboard_table_files = glob.glob("leaderboard_table_*.csv")
445
+ leaderboard_table_files.sort(key=lambda x: int(x[18:-4]))
446
+ leaderboard_table_file = leaderboard_table_files[-1]
447
+
448
+ demo = build_demo(elo_result_file, leaderboard_table_file)
449
+ demo.launch(share=args.share)
index.html DELETED
@@ -1,57 +0,0 @@
1
- <!DOCTYPE html>
2
- <html>
3
- <head>
4
- <meta charset="utf-8">
5
- <meta name="viewport" content="width=device-width, initial-scale=1">
6
- <title>Gradio-Lite: Serverless Gradio Running Entirely in Your Browser</title>
7
- <meta name="description" content="Gradio-Lite: Serverless Gradio Running Entirely in Your Browser">
8
-
9
- <script type="module" crossorigin src="https://cdn.jsdelivr.net/npm/@gradio/lite/dist/lite.js"></script>
10
- <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/@gradio/lite/dist/lite.css" />
11
-
12
- <style>
13
- html, body {
14
- margin: 0;
15
- padding: 0;
16
- height: 100%;
17
- }
18
- </style>
19
- </head>
20
- <body>
21
- <gradio-lite>
22
- <gradio-file name="app.py" entrypoint>
23
- import gradio as gr
24
-
25
- from filters import as_gray
26
-
27
- def process(input_image):
28
- output_image = as_gray(input_image)
29
- return output_image
30
-
31
- demo = gr.Interface(
32
- process,
33
- "image",
34
- "image",
35
- examples=["lion.jpg", "logo.png"],
36
- )
37
-
38
- demo.launch()
39
- </gradio-file>
40
-
41
- <gradio-file name="filters.py">
42
- from skimage.color import rgb2gray
43
-
44
- def as_gray(image):
45
- return rgb2gray(image)
46
- </gradio-file>
47
-
48
- <gradio-file name="lion.jpg" url="https://raw.githubusercontent.com/gradio-app/gradio/main/gradio/test_data/lion.jpg" />
49
- <gradio-file name="logo.png" url="https://raw.githubusercontent.com/gradio-app/gradio/main/guides/assets/logo.png" />
50
-
51
- <gradio-requirements>
52
- # Same syntax as requirements.txt
53
- scikit-image
54
- </gradio-requirements>
55
- </gradio-lite>
56
- </body>
57
- </html>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ plotly