vittore commited on
Commit
58a057a
1 Parent(s): 55ec1d0

Add cuda check

Browse files
Files changed (2) hide show
  1. app.py +266 -2
  2. app2.py +0 -264
app.py CHANGED
@@ -1,4 +1,268 @@
1
- from transformers.utils.hub import move_cache
2
- move_cache()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import torch
3
+ import gradio as gr
4
+ from gradio import processing_utils, utils
5
+ from PIL import Image
6
+ import random
7
+ from diffusers import (
8
+ DiffusionPipeline,
9
+ AutoencoderKL,
10
+ StableDiffusionControlNetPipeline,
11
+ ControlNetModel,
12
+ StableDiffusionLatentUpscalePipeline,
13
+ StableDiffusionImg2ImgPipeline,
14
+ StableDiffusionControlNetImg2ImgPipeline,
15
+ DPMSolverMultistepScheduler, # <-- Added import
16
+ EulerDiscreteScheduler # <-- Added import
17
+ )
18
+ import tempfile
19
+ import time
20
+ from share_btn import community_icon_html, loading_icon_html, share_js
21
+ import user_history
22
+ from illusion_style import css
23
 
24
 
25
+ BASE_MODEL = "SG161222/Realistic_Vision_V5.1_noVAE"
26
+
27
+
28
+ if torch.cuda.is_available():
29
+ device='gpu'
30
+ else:
31
+ device='cpu'
32
+
33
+ # Initialize both pipelines
34
+ vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16)
35
+ #init_pipe = DiffusionPipeline.from_pretrained("SG161222/Realistic_Vision_V5.1_noVAE", torch_dtype=torch.float16)
36
+ controlnet = ControlNetModel.from_pretrained("monster-labs/control_v1p_sd15_qrcode_monster", torch_dtype=torch.float16)#, torch_dtype=torch.float16)
37
+ main_pipe = StableDiffusionControlNetPipeline.from_pretrained(
38
+ BASE_MODEL,
39
+ controlnet=controlnet,
40
+ vae=vae,
41
+ safety_checker=None,
42
+ torch_dtype=torch.float16,
43
+ ).to(device)
44
+
45
+ #main_pipe.unet = torch.compile(main_pipe.unet, mode="reduce-overhead", fullgraph=True)
46
+ #main_pipe.unet.to(memory_format=torch.channels_last)
47
+ #main_pipe.unet = torch.compile(main_pipe.unet, mode="reduce-overhead", fullgraph=True)
48
+ #model_id = "stabilityai/sd-x2-latent-upscaler"
49
+ image_pipe = StableDiffusionControlNetImg2ImgPipeline(**main_pipe.components)
50
+
51
+
52
+ #image_pipe.unet = torch.compile(image_pipe.unet, mode="reduce-overhead", fullgraph=True)
53
+ #upscaler = StableDiffusionLatentUpscalePipeline.from_pretrained(model_id, torch_dtype=torch.float16)
54
+ #upscaler.to("cuda")
55
+
56
+
57
+ # Sampler map
58
+ SAMPLER_MAP = {
59
+ "DPM++ Karras SDE": lambda config: DPMSolverMultistepScheduler.from_config(config, use_karras=True, algorithm_type="sde-dpmsolver++"),
60
+ "Euler": lambda config: EulerDiscreteScheduler.from_config(config),
61
+ }
62
+
63
+ def center_crop_resize(img, output_size=(512, 512)):
64
+ width, height = img.size
65
+
66
+ # Calculate dimensions to crop to the center
67
+ new_dimension = min(width, height)
68
+ left = (width - new_dimension)/2
69
+ top = (height - new_dimension)/2
70
+ right = (width + new_dimension)/2
71
+ bottom = (height + new_dimension)/2
72
+
73
+ # Crop and resize
74
+ img = img.crop((left, top, right, bottom))
75
+ img = img.resize(output_size)
76
+
77
+ return img
78
+
79
+ def common_upscale(samples, width, height, upscale_method, crop=False):
80
+ if crop == "center":
81
+ old_width = samples.shape[3]
82
+ old_height = samples.shape[2]
83
+ old_aspect = old_width / old_height
84
+ new_aspect = width / height
85
+ x = 0
86
+ y = 0
87
+ if old_aspect > new_aspect:
88
+ x = round((old_width - old_width * (new_aspect / old_aspect)) / 2)
89
+ elif old_aspect < new_aspect:
90
+ y = round((old_height - old_height * (old_aspect / new_aspect)) / 2)
91
+ s = samples[:,:,y:old_height-y,x:old_width-x]
92
+ else:
93
+ s = samples
94
+
95
+ return torch.nn.functional.interpolate(s, size=(height, width), mode=upscale_method)
96
+
97
+ def upscale(samples, upscale_method, scale_by):
98
+ #s = samples.copy()
99
+ width = round(samples["images"].shape[3] * scale_by)
100
+ height = round(samples["images"].shape[2] * scale_by)
101
+ s = common_upscale(samples["images"], width, height, upscale_method, "disabled")
102
+ return (s)
103
+
104
+ def check_inputs(prompt: str, control_image: Image.Image):
105
+ if control_image is None:
106
+ raise gr.Error("Please select or upload an Input Illusion")
107
+ if prompt is None or prompt == "":
108
+ raise gr.Error("Prompt is required")
109
+
110
+ def convert_to_pil(base64_image):
111
+ pil_image = Image.open(base64_image)
112
+ return pil_image
113
+
114
+ def convert_to_base64(pil_image):
115
+ with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as temp_file:
116
+ image.save(temp_file.name)
117
+ return temp_file.name
118
+
119
+ # Inference function
120
+ @spaces.GPU
121
+ def inference(
122
+ control_image: Image.Image,
123
+ prompt: str,
124
+ negative_prompt: str,
125
+ guidance_scale: float = 8.0,
126
+ controlnet_conditioning_scale: float = 1,
127
+ control_guidance_start: float = 1,
128
+ control_guidance_end: float = 1,
129
+ upscaler_strength: float = 0.5,
130
+ seed: int = -1,
131
+ sampler = "DPM++ Karras SDE",
132
+ progress = gr.Progress(track_tqdm=True),
133
+ profile: gr.OAuthProfile | None = None,
134
+ ):
135
+ start_time = time.time()
136
+ start_time_struct = time.localtime(start_time)
137
+ start_time_formatted = time.strftime("%H:%M:%S", start_time_struct)
138
+ print(f"Inference started at {start_time_formatted}")
139
+
140
+ # Generate the initial image
141
+ #init_image = init_pipe(prompt).images[0]
142
+
143
+ # Rest of your existing code
144
+ control_image_small = center_crop_resize(control_image)
145
+ control_image_large = center_crop_resize(control_image, (1024, 1024))
146
+
147
+ main_pipe.scheduler = SAMPLER_MAP[sampler](main_pipe.scheduler.config)
148
+ my_seed = random.randint(0, 2**32 - 1) if seed == -1 else seed
149
+ generator = torch.Generator(device=device).manual_seed(my_seed)
150
+
151
+ out = main_pipe(
152
+ prompt=prompt,
153
+ negative_prompt=negative_prompt,
154
+ image=control_image_small,
155
+ guidance_scale=float(guidance_scale),
156
+ controlnet_conditioning_scale=float(controlnet_conditioning_scale),
157
+ generator=generator,
158
+ control_guidance_start=float(control_guidance_start),
159
+ control_guidance_end=float(control_guidance_end),
160
+ num_inference_steps=15,
161
+ output_type="latent"
162
+ )
163
+ upscaled_latents = upscale(out, "nearest-exact", 2)
164
+ out_image = image_pipe(
165
+ prompt=prompt,
166
+ negative_prompt=negative_prompt,
167
+ control_image=control_image_large,
168
+ image=upscaled_latents,
169
+ guidance_scale=float(guidance_scale),
170
+ generator=generator,
171
+ num_inference_steps=20,
172
+ strength=upscaler_strength,
173
+ control_guidance_start=float(control_guidance_start),
174
+ control_guidance_end=float(control_guidance_end),
175
+ controlnet_conditioning_scale=float(controlnet_conditioning_scale)
176
+ )
177
+ end_time = time.time()
178
+ end_time_struct = time.localtime(end_time)
179
+ end_time_formatted = time.strftime("%H:%M:%S", end_time_struct)
180
+ print(f"Inference ended at {end_time_formatted}, taking {end_time-start_time}s")
181
+
182
+ # Save image + metadata
183
+ user_history.save_image(
184
+ label=prompt,
185
+ image=out_image["images"][0],
186
+ profile=profile,
187
+ metadata={
188
+ "prompt": prompt,
189
+ "negative_prompt": negative_prompt,
190
+ "guidance_scale": guidance_scale,
191
+ "controlnet_conditioning_scale": controlnet_conditioning_scale,
192
+ "control_guidance_start": control_guidance_start,
193
+ "control_guidance_end": control_guidance_end,
194
+ "upscaler_strength": upscaler_strength,
195
+ "seed": seed,
196
+ "sampler": sampler,
197
+ },
198
+ )
199
+
200
+ return out_image["images"][0], gr.update(visible=True), gr.update(visible=True), my_seed
201
+
202
+ with gr.Blocks() as app:
203
+ gr.Markdown(
204
+ '''
205
+ <center><h1>Illusion Diffusion HQ 🌀</h1></span>
206
+ <span font-size:16px;">Generate stunning high quality illusion artwork with Stable Diffusion</span>
207
+ </center>
208
+
209
+ This project works by using
210
+ [Monster Labs QR Control Net](https://huggingface.co/monster-labs/control_v1p_sd15_qrcode_monster) and [multimodalart](https://twitter.com/multimodalart)
211
+ Given a prompt and your pattern, we use a QR code conditioned controlnet to create a stunning illusion! Credit to: [MrUgleh](https://twitter.com/MrUgleh) for discovering the workflow :)
212
+ '''
213
+ )
214
+ state_img_input = gr.State()
215
+ state_img_output = gr.State()
216
+ with gr.Row():
217
+ with gr.Column():
218
+ control_image = gr.Image(label="Input Illusion", type="pil", elem_id="control_image")
219
+ controlnet_conditioning_scale = gr.Slider(minimum=0.0, maximum=5.0, step=0.01, value=0.8, label="Illusion strength", elem_id="illusion_strength", info="ControlNet conditioning scale")
220
+ gr.Examples(examples=["checkers.png", "checkers_mid.jpg", "pattern.png", "ultra_checkers.png", "spiral.jpeg", "funky.jpeg" ], inputs=control_image)
221
+ prompt = gr.Textbox(label="Prompt", elem_id="prompt", info="Type what you want to generate", placeholder="Medieval village scene with busy streets and castle in the distance")
222
+ negative_prompt = gr.Textbox(label="Negative Prompt", info="Type what you don't want to see", value="low quality", elem_id="negative_prompt")
223
+ with gr.Accordion(label="Advanced Options", open=False):
224
+ guidance_scale = gr.Slider(minimum=0.0, maximum=50.0, step=0.25, value=7.5, label="Guidance Scale")
225
+ sampler = gr.Dropdown(choices=list(SAMPLER_MAP.keys()), value="Euler")
226
+ control_start = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=0, label="Start of ControlNet")
227
+ control_end = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=1, label="End of ControlNet")
228
+ strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=1, label="Strength of the upscaler")
229
+ seed = gr.Slider(minimum=-1, maximum=9999999999, step=1, value=-1, label="Seed", info="-1 means random seed")
230
+ used_seed = gr.Number(label="Last seed used",interactive=False)
231
+ run_btn = gr.Button("Run")
232
+ with gr.Column():
233
+ result_image = gr.Image(label="Illusion Diffusion Output", interactive=False, elem_id="output")
234
+ with gr.Group(elem_id="share-btn-container", visible=False) as share_group:
235
+ community_icon = gr.HTML(community_icon_html)
236
+ loading_icon = gr.HTML(loading_icon_html)
237
+ share_button = gr.Button("Share to community", elem_id="share-btn")
238
+
239
+ prompt.submit(
240
+ check_inputs,
241
+ inputs=[prompt, control_image],
242
+ queue=False
243
+ ).success(
244
+ inference,
245
+ inputs=[control_image, prompt, negative_prompt, guidance_scale, controlnet_conditioning_scale, control_start, control_end, strength, seed, sampler],
246
+ outputs=[result_image, result_image, share_group, used_seed])
247
+
248
+ run_btn.click(
249
+ check_inputs,
250
+ inputs=[prompt, control_image],
251
+ queue=False
252
+ ).success(
253
+ inference,
254
+ inputs=[control_image, prompt, negative_prompt, guidance_scale, controlnet_conditioning_scale, control_start, control_end, strength, seed, sampler],
255
+ outputs=[result_image, result_image, share_group, used_seed])
256
+
257
+ share_button.click(None, [], [], js=share_js)
258
+
259
+ with gr.Blocks(css=css) as app_with_history:
260
+ with gr.Tab("Demo"):
261
+ app.render()
262
+ with gr.Tab("Past generations"):
263
+ user_history.render()
264
+
265
+ app_with_history.queue(max_size=20,api_open=False )
266
+
267
+ if __name__ == "__main__":
268
+ app_with_history.launch(max_threads=400)
app2.py DELETED
@@ -1,264 +0,0 @@
1
- import spaces
2
- import torch
3
- import gradio as gr
4
- from gradio import processing_utils, utils
5
- from PIL import Image
6
- import random
7
- from diffusers import (
8
- DiffusionPipeline,
9
- AutoencoderKL,
10
- StableDiffusionControlNetPipeline,
11
- ControlNetModel,
12
- StableDiffusionLatentUpscalePipeline,
13
- StableDiffusionImg2ImgPipeline,
14
- StableDiffusionControlNetImg2ImgPipeline,
15
- DPMSolverMultistepScheduler, # <-- Added import
16
- EulerDiscreteScheduler # <-- Added import
17
- )
18
- import tempfile
19
- import time
20
- from share_btn import community_icon_html, loading_icon_html, share_js
21
- import user_history
22
- from illusion_style import css
23
-
24
- from transformers.utils.hub import move_cache
25
- move_cache()
26
-
27
- BASE_MODEL = "SG161222/Realistic_Vision_V5.1_noVAE"
28
-
29
- # Initialize both pipelines
30
- vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16)
31
- #init_pipe = DiffusionPipeline.from_pretrained("SG161222/Realistic_Vision_V5.1_noVAE", torch_dtype=torch.float16)
32
- controlnet = ControlNetModel.from_pretrained("monster-labs/control_v1p_sd15_qrcode_monster", torch_dtype=torch.float16)#, torch_dtype=torch.float16)
33
- main_pipe = StableDiffusionControlNetPipeline.from_pretrained(
34
- BASE_MODEL,
35
- controlnet=controlnet,
36
- vae=vae,
37
- safety_checker=None,
38
- torch_dtype=torch.float16,
39
- ).to("cuda")
40
-
41
- #main_pipe.unet = torch.compile(main_pipe.unet, mode="reduce-overhead", fullgraph=True)
42
- #main_pipe.unet.to(memory_format=torch.channels_last)
43
- #main_pipe.unet = torch.compile(main_pipe.unet, mode="reduce-overhead", fullgraph=True)
44
- #model_id = "stabilityai/sd-x2-latent-upscaler"
45
- image_pipe = StableDiffusionControlNetImg2ImgPipeline(**main_pipe.components)
46
-
47
-
48
- #image_pipe.unet = torch.compile(image_pipe.unet, mode="reduce-overhead", fullgraph=True)
49
- #upscaler = StableDiffusionLatentUpscalePipeline.from_pretrained(model_id, torch_dtype=torch.float16)
50
- #upscaler.to("cuda")
51
-
52
-
53
- # Sampler map
54
- SAMPLER_MAP = {
55
- "DPM++ Karras SDE": lambda config: DPMSolverMultistepScheduler.from_config(config, use_karras=True, algorithm_type="sde-dpmsolver++"),
56
- "Euler": lambda config: EulerDiscreteScheduler.from_config(config),
57
- }
58
-
59
- def center_crop_resize(img, output_size=(512, 512)):
60
- width, height = img.size
61
-
62
- # Calculate dimensions to crop to the center
63
- new_dimension = min(width, height)
64
- left = (width - new_dimension)/2
65
- top = (height - new_dimension)/2
66
- right = (width + new_dimension)/2
67
- bottom = (height + new_dimension)/2
68
-
69
- # Crop and resize
70
- img = img.crop((left, top, right, bottom))
71
- img = img.resize(output_size)
72
-
73
- return img
74
-
75
- def common_upscale(samples, width, height, upscale_method, crop=False):
76
- if crop == "center":
77
- old_width = samples.shape[3]
78
- old_height = samples.shape[2]
79
- old_aspect = old_width / old_height
80
- new_aspect = width / height
81
- x = 0
82
- y = 0
83
- if old_aspect > new_aspect:
84
- x = round((old_width - old_width * (new_aspect / old_aspect)) / 2)
85
- elif old_aspect < new_aspect:
86
- y = round((old_height - old_height * (old_aspect / new_aspect)) / 2)
87
- s = samples[:,:,y:old_height-y,x:old_width-x]
88
- else:
89
- s = samples
90
-
91
- return torch.nn.functional.interpolate(s, size=(height, width), mode=upscale_method)
92
-
93
- def upscale(samples, upscale_method, scale_by):
94
- #s = samples.copy()
95
- width = round(samples["images"].shape[3] * scale_by)
96
- height = round(samples["images"].shape[2] * scale_by)
97
- s = common_upscale(samples["images"], width, height, upscale_method, "disabled")
98
- return (s)
99
-
100
- def check_inputs(prompt: str, control_image: Image.Image):
101
- if control_image is None:
102
- raise gr.Error("Please select or upload an Input Illusion")
103
- if prompt is None or prompt == "":
104
- raise gr.Error("Prompt is required")
105
-
106
- def convert_to_pil(base64_image):
107
- pil_image = Image.open(base64_image)
108
- return pil_image
109
-
110
- def convert_to_base64(pil_image):
111
- with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as temp_file:
112
- image.save(temp_file.name)
113
- return temp_file.name
114
-
115
- # Inference function
116
- @spaces.GPU
117
- def inference(
118
- control_image: Image.Image,
119
- prompt: str,
120
- negative_prompt: str,
121
- guidance_scale: float = 8.0,
122
- controlnet_conditioning_scale: float = 1,
123
- control_guidance_start: float = 1,
124
- control_guidance_end: float = 1,
125
- upscaler_strength: float = 0.5,
126
- seed: int = -1,
127
- sampler = "DPM++ Karras SDE",
128
- progress = gr.Progress(track_tqdm=True),
129
- profile: gr.OAuthProfile | None = None,
130
- ):
131
- start_time = time.time()
132
- start_time_struct = time.localtime(start_time)
133
- start_time_formatted = time.strftime("%H:%M:%S", start_time_struct)
134
- print(f"Inference started at {start_time_formatted}")
135
-
136
- # Generate the initial image
137
- #init_image = init_pipe(prompt).images[0]
138
-
139
- # Rest of your existing code
140
- control_image_small = center_crop_resize(control_image)
141
- control_image_large = center_crop_resize(control_image, (1024, 1024))
142
-
143
- main_pipe.scheduler = SAMPLER_MAP[sampler](main_pipe.scheduler.config)
144
- my_seed = random.randint(0, 2**32 - 1) if seed == -1 else seed
145
- generator = torch.Generator(device="cuda").manual_seed(my_seed)
146
-
147
- out = main_pipe(
148
- prompt=prompt,
149
- negative_prompt=negative_prompt,
150
- image=control_image_small,
151
- guidance_scale=float(guidance_scale),
152
- controlnet_conditioning_scale=float(controlnet_conditioning_scale),
153
- generator=generator,
154
- control_guidance_start=float(control_guidance_start),
155
- control_guidance_end=float(control_guidance_end),
156
- num_inference_steps=15,
157
- output_type="latent"
158
- )
159
- upscaled_latents = upscale(out, "nearest-exact", 2)
160
- out_image = image_pipe(
161
- prompt=prompt,
162
- negative_prompt=negative_prompt,
163
- control_image=control_image_large,
164
- image=upscaled_latents,
165
- guidance_scale=float(guidance_scale),
166
- generator=generator,
167
- num_inference_steps=20,
168
- strength=upscaler_strength,
169
- control_guidance_start=float(control_guidance_start),
170
- control_guidance_end=float(control_guidance_end),
171
- controlnet_conditioning_scale=float(controlnet_conditioning_scale)
172
- )
173
- end_time = time.time()
174
- end_time_struct = time.localtime(end_time)
175
- end_time_formatted = time.strftime("%H:%M:%S", end_time_struct)
176
- print(f"Inference ended at {end_time_formatted}, taking {end_time-start_time}s")
177
-
178
- # Save image + metadata
179
- user_history.save_image(
180
- label=prompt,
181
- image=out_image["images"][0],
182
- profile=profile,
183
- metadata={
184
- "prompt": prompt,
185
- "negative_prompt": negative_prompt,
186
- "guidance_scale": guidance_scale,
187
- "controlnet_conditioning_scale": controlnet_conditioning_scale,
188
- "control_guidance_start": control_guidance_start,
189
- "control_guidance_end": control_guidance_end,
190
- "upscaler_strength": upscaler_strength,
191
- "seed": seed,
192
- "sampler": sampler,
193
- },
194
- )
195
-
196
- return out_image["images"][0], gr.update(visible=True), gr.update(visible=True), my_seed
197
-
198
- with gr.Blocks() as app:
199
- gr.Markdown(
200
- '''
201
- <center><h1>Illusion Diffusion HQ 🌀</h1></span>
202
- <span font-size:16px;">Generate stunning high quality illusion artwork with Stable Diffusion</span>
203
- </center>
204
-
205
- This project works by using
206
- [Monster Labs QR Control Net](https://huggingface.co/monster-labs/control_v1p_sd15_qrcode_monster) and [multimodalart](https://twitter.com/multimodalart)
207
- Given a prompt and your pattern, we use a QR code conditioned controlnet to create a stunning illusion! Credit to: [MrUgleh](https://twitter.com/MrUgleh) for discovering the workflow :)
208
- '''
209
- )
210
- state_img_input = gr.State()
211
- state_img_output = gr.State()
212
- with gr.Row():
213
- with gr.Column():
214
- control_image = gr.Image(label="Input Illusion", type="pil", elem_id="control_image")
215
- controlnet_conditioning_scale = gr.Slider(minimum=0.0, maximum=5.0, step=0.01, value=0.8, label="Illusion strength", elem_id="illusion_strength", info="ControlNet conditioning scale")
216
- gr.Examples(examples=["checkers.png", "checkers_mid.jpg", "pattern.png", "ultra_checkers.png", "spiral.jpeg", "funky.jpeg" ], inputs=control_image)
217
- prompt = gr.Textbox(label="Prompt", elem_id="prompt", info="Type what you want to generate", placeholder="Medieval village scene with busy streets and castle in the distance")
218
- negative_prompt = gr.Textbox(label="Negative Prompt", info="Type what you don't want to see", value="low quality", elem_id="negative_prompt")
219
- with gr.Accordion(label="Advanced Options", open=False):
220
- guidance_scale = gr.Slider(minimum=0.0, maximum=50.0, step=0.25, value=7.5, label="Guidance Scale")
221
- sampler = gr.Dropdown(choices=list(SAMPLER_MAP.keys()), value="Euler")
222
- control_start = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=0, label="Start of ControlNet")
223
- control_end = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=1, label="End of ControlNet")
224
- strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=1, label="Strength of the upscaler")
225
- seed = gr.Slider(minimum=-1, maximum=9999999999, step=1, value=-1, label="Seed", info="-1 means random seed")
226
- used_seed = gr.Number(label="Last seed used",interactive=False)
227
- run_btn = gr.Button("Run")
228
- with gr.Column():
229
- result_image = gr.Image(label="Illusion Diffusion Output", interactive=False, elem_id="output")
230
- with gr.Group(elem_id="share-btn-container", visible=False) as share_group:
231
- community_icon = gr.HTML(community_icon_html)
232
- loading_icon = gr.HTML(loading_icon_html)
233
- share_button = gr.Button("Share to community", elem_id="share-btn")
234
-
235
- prompt.submit(
236
- check_inputs,
237
- inputs=[prompt, control_image],
238
- queue=False
239
- ).success(
240
- inference,
241
- inputs=[control_image, prompt, negative_prompt, guidance_scale, controlnet_conditioning_scale, control_start, control_end, strength, seed, sampler],
242
- outputs=[result_image, result_image, share_group, used_seed])
243
-
244
- run_btn.click(
245
- check_inputs,
246
- inputs=[prompt, control_image],
247
- queue=False
248
- ).success(
249
- inference,
250
- inputs=[control_image, prompt, negative_prompt, guidance_scale, controlnet_conditioning_scale, control_start, control_end, strength, seed, sampler],
251
- outputs=[result_image, result_image, share_group, used_seed])
252
-
253
- share_button.click(None, [], [], js=share_js)
254
-
255
- with gr.Blocks(css=css) as app_with_history:
256
- with gr.Tab("Demo"):
257
- app.render()
258
- with gr.Tab("Past generations"):
259
- user_history.render()
260
-
261
- app_with_history.queue(max_size=20,api_open=False )
262
-
263
- if __name__ == "__main__":
264
- app_with_history.launch(max_threads=400)