Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
patrickvonplaten
commited on
Commit
β’
3acffd5
1
Parent(s):
f3f1b4e
finish
Browse files- @ +98 -0
- __pycache__/app.cpython-310.pyc +0 -0
- app.py +143 -0
- requirements.txt +2 -0
@
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datasets import load_dataset
|
2 |
+
from collections import Counter, defaultdict
|
3 |
+
from random import sample, shuffle
|
4 |
+
import datasets
|
5 |
+
from pandas import DataFrame
|
6 |
+
from huggingface_hub import list_datasets
|
7 |
+
import os
|
8 |
+
import gradio as gr
|
9 |
+
|
10 |
+
import secrets
|
11 |
+
|
12 |
+
from traitlets import default
|
13 |
+
|
14 |
+
parti_prompt_results = []
|
15 |
+
ORG = "diffusers-parti-prompts"
|
16 |
+
SUBMISSIONS = {
|
17 |
+
"sd-v1-5": None,
|
18 |
+
"sd-v2-1": None,
|
19 |
+
"if-v1-0": None,
|
20 |
+
"karlo": None,
|
21 |
+
}
|
22 |
+
LINKS = {
|
23 |
+
"sd-v1-5": "https://huggingface.co/runwayml/stable-diffusion-v1-5",
|
24 |
+
"sd-v2-1": "https://huggingface.co/stabilityai/stable-diffusion-2-1",
|
25 |
+
"if-v1-0": "https://huggingface.co/DeepFloyd/IF-I-XL-v1.0",
|
26 |
+
"karlo": "https://huggingface.co/kakaobrain/karlo-v1-alpha",
|
27 |
+
}
|
28 |
+
MODEL_KEYS = "-".join(SUBMISSIONS.keys())
|
29 |
+
SUBMISSION_ORG = f"results-{MODEL_KEYS}"
|
30 |
+
|
31 |
+
submission_names = list(SUBMISSIONS.keys())
|
32 |
+
parti_prompt_categories = load_dataset(os.path.join(ORG, "sd-v1-5"))["train"]["Category"]
|
33 |
+
parti_prompt_challenge = load_dataset(os.path.join(ORG, "sd-v1-5"))["train"]["Challenge"]
|
34 |
+
|
35 |
+
|
36 |
+
def load_submissions():
|
37 |
+
all_datasets = list_datasets(author=SUBMISSION_ORG)
|
38 |
+
relevant_ids = [d.id for d in all_datasets]
|
39 |
+
|
40 |
+
ids = defaultdict(list)
|
41 |
+
challenges = defaultdict(list)
|
42 |
+
categories = defaultdict(list)
|
43 |
+
|
44 |
+
for _id in relevant_ids[:2]:
|
45 |
+
ds = load_dataset(_id)["train"]
|
46 |
+
for result, image_id in zip(ds["result"], ds["id"]):
|
47 |
+
ids[result].append(image_id)
|
48 |
+
challenges[parti_prompt_challenge[image_id]].append(result)
|
49 |
+
categories[parti_prompt_categories[image_id]].append(result)
|
50 |
+
|
51 |
+
main_dict = {k: len(v) for k, v in ids.item()}
|
52 |
+
challenges = {k: Counter(v) for k, v in challenges.item()}
|
53 |
+
categories = {k: Counter(v) for k, v in categories.item()}
|
54 |
+
|
55 |
+
return main_dict, challenges, categories
|
56 |
+
|
57 |
+
def get_dataframe_all():
|
58 |
+
main, challanges, categories= load_submissions()
|
59 |
+
import ipdb; ipdb.set_trace()
|
60 |
+
|
61 |
+
TITLE = "# Community Parti Prompts - Who is your open-source genAI model?"
|
62 |
+
DESCRIPTION = """
|
63 |
+
*This is an interactive game in which you click through pre-generated images from SD-v1-5, SD-v2.1, Karlo, and IF
|
64 |
+
using [Parti Prompts](https://huggingface.co/datasets/nateraw/parti-prompts) prompts.* \n
|
65 |
+
*You choices will go into the public community [genAI leaderboard](TODO).*
|
66 |
+
"""
|
67 |
+
EXPLANATION = """\n\n
|
68 |
+
## How it works π \n\n
|
69 |
+
|
70 |
+
1. Click on 'Start'
|
71 |
+
2. A prompt and 4 different images are displayed
|
72 |
+
3. Select your favorite image
|
73 |
+
4. After 10 rounds your favorite diffusion model is displayed
|
74 |
+
"""
|
75 |
+
|
76 |
+
GALLERY_COLUMN_NUM = len(SUBMISSIONS)
|
77 |
+
|
78 |
+
with gr.Blocks() as demo:
|
79 |
+
with gr.Column(visible=True) as intro_view:
|
80 |
+
gr.Markdown(TITLE)
|
81 |
+
gr.Markdown(DESCRIPTION)
|
82 |
+
gr.Markdown(EXPLANATION)
|
83 |
+
|
84 |
+
headers = list(SUBMISSIONS.keys())
|
85 |
+
datatype = "str"
|
86 |
+
|
87 |
+
dataframes = get_dataframe_all()
|
88 |
+
import ipdb; ipdb.set_trace()
|
89 |
+
|
90 |
+
main_dataframe = gr.Dataframe(
|
91 |
+
headers=headers,
|
92 |
+
datatype=datatype,
|
93 |
+
row_count=1,
|
94 |
+
col_count=(len(SUBMISSIONS)),
|
95 |
+
interactive=False,
|
96 |
+
)
|
97 |
+
|
98 |
+
demo.launch()
|
__pycache__/app.cpython-310.pyc
ADDED
Binary file (5.36 kB). View file
|
|
app.py
ADDED
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datasets import load_dataset
|
2 |
+
from collections import Counter, defaultdict
|
3 |
+
import pandas as pd
|
4 |
+
from huggingface_hub import list_datasets
|
5 |
+
import os
|
6 |
+
import gradio as gr
|
7 |
+
|
8 |
+
parti_prompt_results = []
|
9 |
+
ORG = "diffusers-parti-prompts"
|
10 |
+
SUBMISSIONS = {
|
11 |
+
"sd-v1-5": None,
|
12 |
+
"sd-v2-1": None,
|
13 |
+
"if-v1-0": None,
|
14 |
+
"karlo": None,
|
15 |
+
}
|
16 |
+
LINKS = {
|
17 |
+
"sd-v1-5": "https://huggingface.co/runwayml/stable-diffusion-v1-5",
|
18 |
+
"sd-v2-1": "https://huggingface.co/stabilityai/stable-diffusion-2-1",
|
19 |
+
"if-v1-0": "https://huggingface.co/DeepFloyd/IF-I-XL-v1.0",
|
20 |
+
"karlo": "https://huggingface.co/kakaobrain/karlo-v1-alpha",
|
21 |
+
}
|
22 |
+
MODEL_KEYS = "-".join(SUBMISSIONS.keys())
|
23 |
+
SUBMISSION_ORG = f"results-{MODEL_KEYS}"
|
24 |
+
|
25 |
+
submission_names = list(SUBMISSIONS.keys())
|
26 |
+
parti_prompt_categories = load_dataset(os.path.join(ORG, "sd-v1-5"))["train"]["Category"]
|
27 |
+
parti_prompt_challenge = load_dataset(os.path.join(ORG, "sd-v1-5"))["train"]["Challenge"]
|
28 |
+
|
29 |
+
|
30 |
+
def load_submissions():
|
31 |
+
all_datasets = list_datasets(author=SUBMISSION_ORG)
|
32 |
+
relevant_ids = [d.id for d in all_datasets]
|
33 |
+
|
34 |
+
ids = defaultdict(list)
|
35 |
+
challenges = defaultdict(list)
|
36 |
+
categories = defaultdict(list)
|
37 |
+
|
38 |
+
for _id in relevant_ids:
|
39 |
+
ds = load_dataset(_id)["train"]
|
40 |
+
for result, image_id in zip(ds["result"], ds["id"]):
|
41 |
+
ids[result].append(image_id)
|
42 |
+
challenges[parti_prompt_challenge[image_id]].append(result)
|
43 |
+
categories[parti_prompt_categories[image_id]].append(result)
|
44 |
+
|
45 |
+
all_values = sum(len(v) for v in ids.values())
|
46 |
+
main_dict = {k: '{:.2%}'.format(len(v)/all_values) for k, v in ids.items()}
|
47 |
+
challenges = {k: Counter(v) for k, v in challenges.items()}
|
48 |
+
categories = {k: Counter(v) for k, v in categories.items()}
|
49 |
+
|
50 |
+
return main_dict, challenges, categories
|
51 |
+
|
52 |
+
def get_dataframe_all():
|
53 |
+
main, challenges, categories = load_submissions()
|
54 |
+
main_frame = pd.DataFrame([main])
|
55 |
+
|
56 |
+
challenges_frame = pd.DataFrame.from_dict(challenges).fillna(0).T
|
57 |
+
challenges_frame = challenges_frame.div(challenges_frame.sum(axis=1), axis=0)
|
58 |
+
challenges_frame = challenges_frame.applymap(lambda x: '{:.2%}'.format(x))
|
59 |
+
|
60 |
+
categories_frame = pd.DataFrame.from_dict(categories).fillna(0).T
|
61 |
+
categories_frame = categories_frame.div(categories_frame.sum(axis=1), axis=0)
|
62 |
+
categories_frame = categories_frame.applymap(lambda x: '{:.2%}'.format(x))
|
63 |
+
|
64 |
+
categories_frame = categories_frame.reset_index().rename(columns={'index': 'Category'})
|
65 |
+
challenges_frame = challenges_frame.reset_index().rename(columns={'index': 'Challenge'})
|
66 |
+
|
67 |
+
return main_frame, challenges_frame, categories_frame
|
68 |
+
|
69 |
+
TITLE = "# Open Parti Prompts Leaderboard"
|
70 |
+
DESCRIPTION = """
|
71 |
+
*This leaderboard is retrieved from answers of [Community Evaluations on Parti Prompts](https://huggingface.co/spaces/OpenGenAI/open-parti-prompts)*
|
72 |
+
"""
|
73 |
+
|
74 |
+
EXPLANATION = """\n\n
|
75 |
+
## How the is data collected π \n\n
|
76 |
+
|
77 |
+
In the [Community Parti Prompts](https://huggingface.co/spaces/OpenGenAI/open-parti-prompts), community members select for every prompt
|
78 |
+
of [Parti Prompts](https://huggingface.co/datasets/nateraw/parti-prompts) which open-source image generation model has generated the best image.
|
79 |
+
The community's answers are then stored and used in this space to give a human evaluation of the different models.
|
80 |
+
Currently the leaderboard includes the following models:
|
81 |
+
- [sd-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5)
|
82 |
+
- [sd-v2-1](https://huggingface.co/stabilityai/stable-diffusion-2-1)
|
83 |
+
- [if-v1-0](https://huggingface.co/DeepFloyd/IF-I-XL-v1.0)
|
84 |
+
- [karlo](https://huggingface.co/kakaobrain/karlo-v1-alpha)
|
85 |
+
|
86 |
+
In the following you can see three result tables. The first shows you the overall preferences across all prompts. The second and third tables
|
87 |
+
show you a breakdown analysis per category and level of difficulty ("challenge") as defined by [Parti Prompts](https://huggingface.co/datasets/nateraw/parti-prompts).
|
88 |
+
"""
|
89 |
+
|
90 |
+
GALLERY_COLUMN_NUM = len(SUBMISSIONS)
|
91 |
+
|
92 |
+
def refresh():
|
93 |
+
return get_dataframe_all()
|
94 |
+
|
95 |
+
with gr.Blocks() as demo:
|
96 |
+
with gr.Column(visible=True) as intro_view:
|
97 |
+
gr.Markdown(TITLE)
|
98 |
+
gr.Markdown(DESCRIPTION)
|
99 |
+
gr.Markdown(EXPLANATION)
|
100 |
+
|
101 |
+
headers = list(SUBMISSIONS.keys())
|
102 |
+
datatype = "str"
|
103 |
+
|
104 |
+
main_df, category_df, challenge_df = get_dataframe_all()
|
105 |
+
|
106 |
+
with gr.Column():
|
107 |
+
gr.Markdown("# Open Parti Prompts")
|
108 |
+
main_dataframe = gr.Dataframe(
|
109 |
+
value=main_df,
|
110 |
+
headers=main_df.columns.to_list(),
|
111 |
+
datatype="str",
|
112 |
+
row_count=main_df.shape[0],
|
113 |
+
col_count=main_df.shape[1],
|
114 |
+
interactive=False,
|
115 |
+
)
|
116 |
+
|
117 |
+
with gr.Column():
|
118 |
+
gr.Markdown("## per category")
|
119 |
+
cat_dataframe = gr.Dataframe(
|
120 |
+
value=category_df,
|
121 |
+
headers=category_df.columns.to_list(),
|
122 |
+
datatype="str",
|
123 |
+
row_count=category_df.shape[0],
|
124 |
+
col_count=category_df.shape[1],
|
125 |
+
interactive=False,
|
126 |
+
)
|
127 |
+
|
128 |
+
with gr.Column():
|
129 |
+
gr.Markdown("## per challenge")
|
130 |
+
chal_dataframe = gr.Dataframe(
|
131 |
+
value=challenge_df,
|
132 |
+
headers=challenge_df.columns.to_list(),
|
133 |
+
datatype="str",
|
134 |
+
row_count=challenge_df.shape[0],
|
135 |
+
col_count=challenge_df.shape[1],
|
136 |
+
interactive=False,
|
137 |
+
)
|
138 |
+
|
139 |
+
with gr.Row():
|
140 |
+
refresh_button = gr.Button("Refresh")
|
141 |
+
refresh_button.click(refresh, inputs=[], outputs=[main_dataframe, cat_dataframe, challenge_df])
|
142 |
+
|
143 |
+
demo.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
datasets
|
2 |
+
pandas
|