Spaces:
Running
on
Zero
Running
on
Zero
alfredplpl
commited on
Commit
•
502e234
1
Parent(s):
47b6e4e
Emi
Browse files- .gitattributes +4 -0
- README.md +5 -5
- app.py +125 -151
- requirements.txt +4 -2
.gitattributes
CHANGED
@@ -32,3 +32,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
35 |
+
manual.safetensors filter=lfs diff=lfs merge=lfs -text
|
36 |
+
unaestheticXLv1.safetensors filter=lfs diff=lfs merge=lfs -text
|
37 |
+
unaestheticXLv13.safetensors filter=lfs diff=lfs merge=lfs -text
|
38 |
+
unaestheticXLv31.safetensors filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
@@ -1,10 +1,10 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 3.
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: other
|
|
|
1 |
---
|
2 |
+
title: Emi
|
3 |
+
emoji: 😊
|
4 |
+
colorFrom: blue
|
5 |
+
colorTo: blue
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 3.41.2
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: other
|
app.py
CHANGED
@@ -1,183 +1,160 @@
|
|
1 |
-
from diffusers import
|
2 |
-
from transformers import CLIPFeatureExtractor
|
3 |
import gradio as gr
|
4 |
import torch
|
5 |
from PIL import Image
|
6 |
import random
|
7 |
import os
|
8 |
from huggingface_hub import hf_hub_download
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
-
|
11 |
-
|
12 |
-
auth_token=os.environ.get("ACCESS_TOKEN")
|
13 |
|
14 |
scheduler = EulerAncestralDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler", use_auth_token=auth_token)
|
15 |
-
feature_extractor = CLIPFeatureExtractor.from_pretrained(model_id, use_auth_token=auth_token)
|
16 |
|
17 |
-
|
18 |
model_id,
|
19 |
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
|
20 |
scheduler=scheduler, use_auth_token=auth_token)
|
21 |
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
pipe.
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
pipe.
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
for
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
pipe.tokenizer.
|
74 |
-
pipe.text_encoder.
|
75 |
-
|
76 |
-
|
|
|
|
|
|
|
|
|
|
|
77 |
|
78 |
def error_str(error, title="Error"):
|
79 |
return f"""#### {title}
|
80 |
{error}""" if error else ""
|
81 |
|
82 |
-
def inference(prompt, guidance, steps,
|
83 |
-
global pipe
|
84 |
-
|
85 |
generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
|
86 |
|
87 |
-
prompt,neg_prompt=auto_prompt_correction(prompt,neg_prompt,disable_auto_prompt_correction
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
width=768
|
92 |
-
elif(image_size=="Landscape"):
|
93 |
-
height=768
|
94 |
-
width=1024
|
95 |
-
elif(image_size=="Highreso."):
|
96 |
-
height=1024
|
97 |
-
width=1024
|
98 |
-
else:
|
99 |
-
height=768
|
100 |
-
width=768
|
101 |
|
102 |
print(prompt,neg_prompt)
|
103 |
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
else:
|
108 |
-
return txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator), None
|
109 |
-
except Exception as e:
|
110 |
-
return None, error_str(e)
|
111 |
-
def auto_prompt_correction(prompt_ui,neg_prompt_ui,disable_auto_prompt_correction,image_style):
|
112 |
# auto prompt correction
|
113 |
prompt=str(prompt_ui)
|
114 |
neg_prompt=str(neg_prompt_ui)
|
115 |
prompt=prompt.lower()
|
116 |
neg_prompt=neg_prompt.lower()
|
117 |
-
|
118 |
-
if(image_style=="Animetic"):
|
119 |
-
style="anime"
|
120 |
-
else:
|
121 |
-
style=f"anime,{embellish1}"
|
122 |
|
123 |
if(disable_auto_prompt_correction):
|
124 |
-
prompt=f"{style}, {prompt}"
|
125 |
return prompt, neg_prompt
|
126 |
|
127 |
if(prompt=="" and neg_prompt==""):
|
128 |
-
prompt=
|
129 |
-
neg_prompt=f"{
|
130 |
return prompt, neg_prompt
|
131 |
|
132 |
-
splited_prompt=prompt.replace(","," ").replace("_"," ").split(" ")
|
133 |
|
134 |
human_words=["1girl","girl","maid","maids","female","1woman","woman","girls","2girls","3girls","4girls","5girls","a couple of girls","women","1boy","boy","boys","a couple of boys","2boys","male","1man","1handsome","1bishounen","man","men","guy","guys"]
|
135 |
for word in human_words:
|
136 |
if( word in splited_prompt):
|
137 |
-
prompt=f"
|
138 |
-
neg_prompt=f"{
|
139 |
return prompt, neg_prompt
|
140 |
|
141 |
animal_words=["cat","dog","bird","pigeon","rabbit","bunny","horse"]
|
142 |
for word in animal_words:
|
143 |
if( word in splited_prompt):
|
144 |
-
prompt=f"
|
145 |
-
neg_prompt=f"{
|
146 |
return prompt, neg_prompt
|
147 |
|
148 |
background_words=["mount fuji","mt. fuji","building", "buildings", "tokyo", "kyoto", "nara", "shibuya", "shinjuku"]
|
149 |
for word in background_words:
|
150 |
if( word in splited_prompt):
|
151 |
-
prompt=f"
|
152 |
-
neg_prompt=f"girl,
|
153 |
return prompt, neg_prompt
|
154 |
|
155 |
return prompt,neg_prompt
|
156 |
|
157 |
def txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator):
|
158 |
-
|
159 |
-
prompt,
|
160 |
-
negative_prompt = neg_prompt,
|
161 |
-
num_inference_steps = int(steps),
|
162 |
-
guidance_scale = guidance,
|
163 |
-
width = width,
|
164 |
-
height = height,
|
165 |
-
generator = generator)
|
166 |
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
result = pipe_i2i(
|
173 |
-
prompt,
|
174 |
-
negative_prompt = neg_prompt,
|
175 |
-
image = img,
|
176 |
num_inference_steps = int(steps),
|
177 |
-
strength = strength,
|
178 |
guidance_scale = guidance,
|
|
|
|
|
179 |
generator = generator)
|
180 |
-
|
181 |
return result.images[0]
|
182 |
|
183 |
css = """.main-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.main-div div h1{font-weight:900;margin-bottom:7px}.main-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem}
|
@@ -187,23 +164,36 @@ with gr.Blocks(css=css) as demo:
|
|
187 |
f"""
|
188 |
<div class="main-div">
|
189 |
<div>
|
190 |
-
<h1>
|
191 |
</div>
|
192 |
<p>
|
193 |
-
Demo for <a href="https://huggingface.co/
|
194 |
</p>
|
195 |
<p>
|
196 |
サンプル: そのままGenerate��タンを押してください。<br>
|
197 |
sample : Click "Generate" button without any prompts.
|
198 |
</p>
|
199 |
<p>
|
200 |
-
sample prompt1 :
|
|
|
|
|
|
|
201 |
</p>
|
202 |
<p>
|
203 |
-
sample
|
204 |
</p>
|
|
|
|
|
|
|
|
|
205 |
Running on {"<b>GPU 🔥</b>" if torch.cuda.is_available() else f"<b>CPU 🥶</b>. For faster inference it is recommended to <b>upgrade to GPU in <a href='https://huggingface.co/spaces/akhaliq/cool-japan-diffusion-2-1-0/settings'>Settings</a></b>"} <br>
|
206 |
-
<a style="display:inline-block" href="https://huggingface.co/spaces/aipicasso/
|
|
|
|
|
|
|
|
|
|
|
|
|
207 |
</div>
|
208 |
"""
|
209 |
)
|
@@ -212,44 +202,28 @@ with gr.Blocks(css=css) as demo:
|
|
212 |
with gr.Column(scale=55):
|
213 |
with gr.Group():
|
214 |
with gr.Row():
|
215 |
-
|
216 |
-
|
217 |
-
image_style.value="Animetic"
|
218 |
-
|
219 |
-
with gr.Row():
|
220 |
-
prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder="[your prompt]").style(container=False)
|
221 |
-
generate = gr.Button(value="Generate").style(rounded=(False, True, True, False))
|
222 |
|
223 |
-
image_out = gr.Image(height=
|
224 |
error_output = gr.Markdown()
|
225 |
|
226 |
with gr.Column(scale=45):
|
227 |
-
with gr.Tab("Options"):
|
228 |
with gr.Group():
|
229 |
neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image")
|
230 |
disable_auto_prompt_correction = gr.Checkbox(label="Disable auto prompt corretion.")
|
231 |
-
#original_model = gr.Checkbox(label="Change the model into the original model.")
|
232 |
-
with gr.Row():
|
233 |
-
image_size=gr.Radio(["Portrait","Landscape","Square","Highreso."])
|
234 |
-
image_size.show_label=False
|
235 |
-
image_size.value="Square"
|
236 |
|
237 |
with gr.Row():
|
238 |
-
guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=
|
239 |
steps = gr.Slider(label="Steps", value=20, minimum=2, maximum=75, step=1)
|
240 |
-
|
241 |
seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1)
|
242 |
|
243 |
-
|
244 |
-
with gr.Group():
|
245 |
-
image = gr.Image(label="Image", height=256, tool="editor", type="pil")
|
246 |
-
strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5)
|
247 |
-
|
248 |
-
inputs = [prompt, guidance, steps, image_size, seed, image, strength, neg_prompt, disable_auto_prompt_correction,image_style]#, original_model]
|
249 |
|
250 |
outputs = [image_out, error_output]
|
251 |
prompt.submit(inference, inputs=inputs, outputs=outputs)
|
252 |
-
generate.click(inference, inputs=inputs, outputs=outputs)
|
253 |
-
|
254 |
demo.queue(concurrency_count=1)
|
255 |
-
demo.launch()
|
|
|
1 |
+
from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline, EulerAncestralDiscreteScheduler
|
|
|
2 |
import gradio as gr
|
3 |
import torch
|
4 |
from PIL import Image
|
5 |
import random
|
6 |
import os
|
7 |
from huggingface_hub import hf_hub_download
|
8 |
+
import torch
|
9 |
+
from torch import autocast
|
10 |
+
from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
|
11 |
+
from safetensors import safe_open
|
12 |
+
from compel import Compel, ReturnedEmbeddingsType
|
13 |
+
from huggingface_hub import hf_hub_download
|
14 |
|
15 |
+
model_id = 'aipicasso/emi'
|
16 |
+
auth_token=os.environ["ACCESS_TOKEN"]
|
|
|
17 |
|
18 |
scheduler = EulerAncestralDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler", use_auth_token=auth_token)
|
|
|
19 |
|
20 |
+
pipe = StableDiffusionXLPipeline.from_pretrained(
|
21 |
model_id,
|
22 |
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
|
23 |
scheduler=scheduler, use_auth_token=auth_token)
|
24 |
|
25 |
+
|
26 |
+
pipe=pipe.to("cuda")
|
27 |
+
#pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
28 |
+
|
29 |
+
token_num=65
|
30 |
+
|
31 |
+
unaestheticXLv31=""
|
32 |
+
embeddings_dict = {}
|
33 |
+
with safe_open("unaestheticXLv31.safetensors", framework="pt") as f:
|
34 |
+
for k in f.keys():
|
35 |
+
embeddings_dict[k] = f.get_tensor(k)
|
36 |
+
pipe.text_encoder.resize_token_embeddings(len(pipe.tokenizer),pad_to_multiple_of=128)
|
37 |
+
pipe.text_encoder_2.resize_token_embeddings(len(pipe.tokenizer),pad_to_multiple_of=128)
|
38 |
+
for i in range(len(embeddings_dict["clip_l"])):
|
39 |
+
token = f"sksd{chr(token_num)}"
|
40 |
+
token_num+=1
|
41 |
+
unaestheticXLv31 += token
|
42 |
+
pipe.tokenizer.add_tokens(token)
|
43 |
+
token_id = pipe.tokenizer.convert_tokens_to_ids(token)
|
44 |
+
pipe.text_encoder.get_input_embeddings().weight.data[token_id] = embeddings_dict["clip_l"][i]
|
45 |
+
pipe.text_encoder_2.get_input_embeddings().weight.data[token_id] = embeddings_dict["clip_g"][i]
|
46 |
+
|
47 |
+
unaestheticXLv1=""
|
48 |
+
embeddings_dict = {}
|
49 |
+
with safe_open("unaestheticXLv1.safetensors", framework="pt") as f:
|
50 |
+
for k in f.keys():
|
51 |
+
embeddings_dict[k] = f.get_tensor(k)
|
52 |
+
pipe.text_encoder.resize_token_embeddings(len(pipe.tokenizer),pad_to_multiple_of=128)
|
53 |
+
pipe.text_encoder_2.resize_token_embeddings(len(pipe.tokenizer),pad_to_multiple_of=128)
|
54 |
+
for i in range(len(embeddings_dict["clip_l"])):
|
55 |
+
token = f"sksd{chr(token_num)}"
|
56 |
+
token_num+=1
|
57 |
+
unaestheticXLv1 += token
|
58 |
+
pipe.tokenizer.add_tokens(token)
|
59 |
+
token_id = pipe.tokenizer.convert_tokens_to_ids(token)
|
60 |
+
pipe.text_encoder.get_input_embeddings().weight.data[token_id] = embeddings_dict["clip_l"][i]
|
61 |
+
pipe.text_encoder_2.get_input_embeddings().weight.data[token_id] = embeddings_dict["clip_g"][i]
|
62 |
+
|
63 |
+
unaestheticXLv13=""
|
64 |
+
embeddings_dict = {}
|
65 |
+
with safe_open("unaestheticXLv13.safetensors", framework="pt") as f:
|
66 |
+
for k in f.keys():
|
67 |
+
embeddings_dict[k] = f.get_tensor(k)
|
68 |
+
|
69 |
+
pipe.text_encoder.resize_token_embeddings(len(pipe.tokenizer),pad_to_multiple_of=128)
|
70 |
+
pipe.text_encoder_2.resize_token_embeddings(len(pipe.tokenizer),pad_to_multiple_of=128)
|
71 |
+
for i in range(len(embeddings_dict["clip_l"])):
|
72 |
+
token = f"sksd{chr(token_num)}"
|
73 |
+
unaestheticXLv13 += token
|
74 |
+
token_num+=1
|
75 |
+
pipe.tokenizer.add_tokens(token)
|
76 |
+
token_id = pipe.tokenizer.convert_tokens_to_ids(token)
|
77 |
+
pipe.text_encoder.get_input_embeddings().weight.data[token_id] = embeddings_dict["clip_l"][i]
|
78 |
+
pipe.text_encoder_2.get_input_embeddings().weight.data[token_id] = embeddings_dict["clip_g"][i]
|
79 |
+
|
80 |
+
|
81 |
+
compel = Compel(tokenizer=[pipe.tokenizer, pipe.tokenizer_2] ,
|
82 |
+
text_encoder=[pipe.text_encoder, pipe.text_encoder_2],
|
83 |
+
returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,
|
84 |
+
requires_pooled=[False, True])
|
85 |
|
86 |
def error_str(error, title="Error"):
|
87 |
return f"""#### {title}
|
88 |
{error}""" if error else ""
|
89 |
|
90 |
+
def inference(prompt, guidance, steps, seed=0, neg_prompt="", disable_auto_prompt_correction=False):
|
91 |
+
global pipe
|
92 |
+
|
93 |
generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
|
94 |
|
95 |
+
prompt,neg_prompt=auto_prompt_correction(prompt,neg_prompt,disable_auto_prompt_correction)
|
96 |
+
|
97 |
+
height=1024
|
98 |
+
width=1024
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
99 |
|
100 |
print(prompt,neg_prompt)
|
101 |
|
102 |
+
return txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator), None
|
103 |
+
|
104 |
+
def auto_prompt_correction(prompt_ui,neg_prompt_ui,disable_auto_prompt_correction):
|
|
|
|
|
|
|
|
|
|
|
105 |
# auto prompt correction
|
106 |
prompt=str(prompt_ui)
|
107 |
neg_prompt=str(neg_prompt_ui)
|
108 |
prompt=prompt.lower()
|
109 |
neg_prompt=neg_prompt.lower()
|
|
|
|
|
|
|
|
|
|
|
110 |
|
111 |
if(disable_auto_prompt_correction):
|
|
|
112 |
return prompt, neg_prompt
|
113 |
|
114 |
if(prompt=="" and neg_prompt==""):
|
115 |
+
prompt="1girl++, smile--, brown bob+++ hair, brown eyes, sunflowers, sky, transparent++"
|
116 |
+
neg_prompt=f"({unaestheticXLv31})---, photo, deformed, realism, disfigured, low contrast, bad hand"
|
117 |
return prompt, neg_prompt
|
118 |
|
119 |
+
splited_prompt=prompt.replace(","," ").replace("_"," ").replace("+"," ").split(" ")
|
120 |
|
121 |
human_words=["1girl","girl","maid","maids","female","1woman","woman","girls","2girls","3girls","4girls","5girls","a couple of girls","women","1boy","boy","boys","a couple of boys","2boys","male","1man","1handsome","1bishounen","man","men","guy","guys"]
|
122 |
for word in human_words:
|
123 |
if( word in splited_prompt):
|
124 |
+
prompt=f"anime artwork, anime style, {prompt}"
|
125 |
+
neg_prompt=f"({unaestheticXLv31})---,{neg_prompt}, photo, deformed, realism, disfigured, low contrast, bad hand"
|
126 |
return prompt, neg_prompt
|
127 |
|
128 |
animal_words=["cat","dog","bird","pigeon","rabbit","bunny","horse"]
|
129 |
for word in animal_words:
|
130 |
if( word in splited_prompt):
|
131 |
+
prompt=f"anime style, a {prompt}, 4k, detailed"
|
132 |
+
neg_prompt=f"{neg_prompt},({unaestheticXLv31})---"
|
133 |
return prompt, neg_prompt
|
134 |
|
135 |
background_words=["mount fuji","mt. fuji","building", "buildings", "tokyo", "kyoto", "nara", "shibuya", "shinjuku"]
|
136 |
for word in background_words:
|
137 |
if( word in splited_prompt):
|
138 |
+
prompt=f"anime artwork, anime style, {prompt}, highly detailed"
|
139 |
+
neg_prompt=f"girl, deformed+++, {neg_prompt}, girl, boy, photo, people, low quality, ui, error, lowres, jpeg artifacts, 2d, 3d, cg, text"
|
140 |
return prompt, neg_prompt
|
141 |
|
142 |
return prompt,neg_prompt
|
143 |
|
144 |
def txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator):
|
145 |
+
conditioning, pooled = compel([prompt, neg_prompt])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
146 |
|
147 |
+
result = pipe(
|
148 |
+
prompt_embeds=conditioning[0:1],
|
149 |
+
pooled_prompt_embeds=pooled[0:1],
|
150 |
+
negative_prompt_embeds=conditioning[1:2],
|
151 |
+
negative_pooled_prompt_embeds=pooled[1:2],
|
|
|
|
|
|
|
|
|
152 |
num_inference_steps = int(steps),
|
|
|
153 |
guidance_scale = guidance,
|
154 |
+
width = width,
|
155 |
+
height = height,
|
156 |
generator = generator)
|
157 |
+
|
158 |
return result.images[0]
|
159 |
|
160 |
css = """.main-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.main-div div h1{font-weight:900;margin-bottom:7px}.main-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem}
|
|
|
164 |
f"""
|
165 |
<div class="main-div">
|
166 |
<div>
|
167 |
+
<h1>Emi Demo</h1>
|
168 |
</div>
|
169 |
<p>
|
170 |
+
Demo for <a href="https://huggingface.co/aipicasso/emi">Emi</a><br>
|
171 |
</p>
|
172 |
<p>
|
173 |
サンプル: そのままGenerate��タンを押してください。<br>
|
174 |
sample : Click "Generate" button without any prompts.
|
175 |
</p>
|
176 |
<p>
|
177 |
+
sample prompt1 : 1girl++, cool+, smile--, colorful long hair, colorful eyes, stars, night, pastel color, transparent+
|
178 |
+
</p>
|
179 |
+
<p>
|
180 |
+
sample prompt2 : 1man+, focus, wavy short hair, blue eyes, black shirt, white background, simple background
|
181 |
</p>
|
182 |
<p>
|
183 |
+
sample prompt3 : anime style, 1girl++
|
184 |
</p>
|
185 |
+
<p>
|
186 |
+
共有ボタンを押してみんなに画像を共有しましょう。Please push share button to share your image.
|
187 |
+
</p>
|
188 |
+
<p>
|
189 |
Running on {"<b>GPU 🔥</b>" if torch.cuda.is_available() else f"<b>CPU 🥶</b>. For faster inference it is recommended to <b>upgrade to GPU in <a href='https://huggingface.co/spaces/akhaliq/cool-japan-diffusion-2-1-0/settings'>Settings</a></b>"} <br>
|
190 |
+
<a style="display:inline-block" href="https://huggingface.co/spaces/aipicasso/emi-latest-demo?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a> to say goodbye from waiting for the generating.
|
191 |
+
<h2>
|
192 |
+
<a href="https://e5b26b1151bf4a7fe2.gradio.live"> | Emi Demo (Sub) | </a>
|
193 |
+
<a href="https://e4f3a9ae48c8c4921e.gradio.live"> Emi Stable Demo | </a>
|
194 |
+
<a href="https://07f0307923ad05d680.gradio.live"> Emix Demo 1 |</a>
|
195 |
+
<a href="https://471d8ebdaf5d5c5aa5.gradio.live/"> Emix Demo 2 |</a>
|
196 |
+
</h2>
|
197 |
</div>
|
198 |
"""
|
199 |
)
|
|
|
202 |
with gr.Column(scale=55):
|
203 |
with gr.Group():
|
204 |
with gr.Row():
|
205 |
+
prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder="[your prompt]")
|
206 |
+
generate = gr.Button(value="Generate")
|
|
|
|
|
|
|
|
|
|
|
207 |
|
208 |
+
image_out = gr.Image(height=1024,width=1024)
|
209 |
error_output = gr.Markdown()
|
210 |
|
211 |
with gr.Column(scale=45):
|
|
|
212 |
with gr.Group():
|
213 |
neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image")
|
214 |
disable_auto_prompt_correction = gr.Checkbox(label="Disable auto prompt corretion.")
|
|
|
|
|
|
|
|
|
|
|
215 |
|
216 |
with gr.Row():
|
217 |
+
guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=25)
|
218 |
steps = gr.Slider(label="Steps", value=20, minimum=2, maximum=75, step=1)
|
219 |
+
|
220 |
seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1)
|
221 |
|
222 |
+
inputs = [prompt, guidance, steps, seed, neg_prompt, disable_auto_prompt_correction]
|
|
|
|
|
|
|
|
|
|
|
223 |
|
224 |
outputs = [image_out, error_output]
|
225 |
prompt.submit(inference, inputs=inputs, outputs=outputs)
|
226 |
+
generate.click(inference, inputs=inputs, outputs=outputs)
|
227 |
+
|
228 |
demo.queue(concurrency_count=1)
|
229 |
+
demo.launch()
|
requirements.txt
CHANGED
@@ -1,8 +1,10 @@
|
|
1 |
-
--extra-index-url https://download.pytorch.org/whl/cu117
|
2 |
torch
|
|
|
3 |
diffusers
|
4 |
transformers
|
5 |
accelerate
|
6 |
ftfy
|
7 |
triton
|
8 |
-
|
|
|
|
|
|
|
|
1 |
torch
|
2 |
+
xformers
|
3 |
diffusers
|
4 |
transformers
|
5 |
accelerate
|
6 |
ftfy
|
7 |
triton
|
8 |
+
safetensors
|
9 |
+
compel
|
10 |
+
omegaconf
|