vittore commited on
Commit
54577b9
1 Parent(s): 5837400

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +86 -7
app.py CHANGED
@@ -37,7 +37,7 @@ main_pipe = StableDiffusionControlNetPipeline.from_pretrained(
37
  vae=vae,
38
  safety_checker=None,
39
  torch_dtype=torch.float16,
40
- ).to(device)
41
 
42
  #main_pipe.unet = torch.compile(main_pipe.unet, mode="reduce-overhead", fullgraph=True)
43
  #main_pipe.unet.to(memory_format=torch.channels_last)
@@ -45,7 +45,6 @@ main_pipe = StableDiffusionControlNetPipeline.from_pretrained(
45
  #model_id = "stabilityai/sd-x2-latent-upscaler"
46
  image_pipe = StableDiffusionControlNetImg2ImgPipeline(**main_pipe.components)
47
 
48
-
49
  #image_pipe.unet = torch.compile(image_pipe.unet, mode="reduce-overhead", fullgraph=True)
50
  #upscaler = StableDiffusionLatentUpscalePipeline.from_pretrained(model_id, torch_dtype=torch.float16)
51
  #upscaler.to("cuda")
@@ -105,13 +104,12 @@ def check_inputs(prompt: str, control_image: Image.Image):
105
  raise gr.Error("Prompt is required")
106
 
107
  def convert_to_pil(base64_image):
108
- pil_image = Image.open(base64_image)
109
  return pil_image
110
 
111
  def convert_to_base64(pil_image):
112
- with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as temp_file:
113
- image.save(temp_file.name)
114
- return temp_file.name
115
 
116
  # Inference function
117
  @spaces.GPU
@@ -143,7 +141,7 @@ def inference(
143
 
144
  main_pipe.scheduler = SAMPLER_MAP[sampler](main_pipe.scheduler.config)
145
  my_seed = random.randint(0, 2**32 - 1) if seed == -1 else seed
146
- generator = torch.Generator(device=device).manual_seed(my_seed)
147
 
148
  out = main_pipe(
149
  prompt=prompt,
@@ -195,6 +193,87 @@ def inference(
195
  )
196
 
197
  return out_image["images"][0], gr.update(visible=True), gr.update(visible=True), my_seed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198
 
199
  def greet(name):
200
  return "Hello " + name + "!!"
 
37
  vae=vae,
38
  safety_checker=None,
39
  torch_dtype=torch.float16,
40
+ ).to("cuda")
41
 
42
  #main_pipe.unet = torch.compile(main_pipe.unet, mode="reduce-overhead", fullgraph=True)
43
  #main_pipe.unet.to(memory_format=torch.channels_last)
 
45
  #model_id = "stabilityai/sd-x2-latent-upscaler"
46
  image_pipe = StableDiffusionControlNetImg2ImgPipeline(**main_pipe.components)
47
 
 
48
  #image_pipe.unet = torch.compile(image_pipe.unet, mode="reduce-overhead", fullgraph=True)
49
  #upscaler = StableDiffusionLatentUpscalePipeline.from_pretrained(model_id, torch_dtype=torch.float16)
50
  #upscaler.to("cuda")
 
104
  raise gr.Error("Prompt is required")
105
 
106
  def convert_to_pil(base64_image):
107
+ pil_image = processing_utils.decode_base64_to_image(base64_image)
108
  return pil_image
109
 
110
  def convert_to_base64(pil_image):
111
+ base64_image = processing_utils.encode_pil_to_base64(pil_image)
112
+ return base64_image
 
113
 
114
  # Inference function
115
  @spaces.GPU
 
141
 
142
  main_pipe.scheduler = SAMPLER_MAP[sampler](main_pipe.scheduler.config)
143
  my_seed = random.randint(0, 2**32 - 1) if seed == -1 else seed
144
+ generator = torch.Generator(device="cuda").manual_seed(my_seed)
145
 
146
  out = main_pipe(
147
  prompt=prompt,
 
193
  )
194
 
195
  return out_image["images"][0], gr.update(visible=True), gr.update(visible=True), my_seed
196
+
197
+ with gr.Blocks() as app:
198
+ gr.Markdown(
199
+ '''
200
+ <center><h1>Illusion Diffusion HQ 🌀</h1></span>
201
+ <span font-size:16px;">Generate stunning high quality illusion artwork with Stable Diffusion</span>
202
+ </center>
203
+
204
+ A space by AP [Follow me on Twitter](https://twitter.com/angrypenguinPNG) with big contributions from [multimodalart](https://twitter.com/multimodalart)
205
+ This project works by using [Monster Labs QR Control Net](https://huggingface.co/monster-labs/control_v1p_sd15_qrcode_monster).
206
+ Given a prompt and your pattern, we use a QR code conditioned controlnet to create a stunning illusion! Credit to: [MrUgleh](https://twitter.com/MrUgleh) for discovering the workflow :)
207
+ '''
208
+ )
209
+ state_img_input = gr.State()
210
+ state_img_output = gr.State()
211
+ with gr.Row():
212
+ with gr.Column():
213
+ control_image = gr.Image(label="Input Illusion", type="pil", elem_id="control_image")
214
+ controlnet_conditioning_scale = gr.Slider(minimum=0.0, maximum=5.0, step=0.01, value=0.8, label="Illusion strength", elem_id="illusion_strength", info="ControlNet conditioning scale")
215
+ gr.Examples(examples=["checkers.png", "checkers_mid.jpg", "pattern.png", "ultra_checkers.png", "spiral.jpeg", "funky.jpeg" ], inputs=control_image)
216
+ prompt = gr.Textbox(label="Prompt", elem_id="prompt", info="Type what you want to generate", placeholder="Medieval village scene with busy streets and castle in the distance")
217
+ negative_prompt = gr.Textbox(label="Negative Prompt", info="Type what you don't want to see", value="low quality", elem_id="negative_prompt")
218
+ with gr.Accordion(label="Advanced Options", open=False):
219
+ guidance_scale = gr.Slider(minimum=0.0, maximum=50.0, step=0.25, value=7.5, label="Guidance Scale")
220
+ sampler = gr.Dropdown(choices=list(SAMPLER_MAP.keys()), value="Euler")
221
+ control_start = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=0, label="Start of ControlNet")
222
+ control_end = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=1, label="End of ControlNet")
223
+ strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=1, label="Strength of the upscaler")
224
+ seed = gr.Slider(minimum=-1, maximum=9999999999, step=1, value=-1, label="Seed", info="-1 means random seed")
225
+ used_seed = gr.Number(label="Last seed used",interactive=False)
226
+ run_btn = gr.Button("Run")
227
+ with gr.Column():
228
+ result_image = gr.Image(label="Illusion Diffusion Output", interactive=False, elem_id="output")
229
+ with gr.Group(elem_id="share-btn-container", visible=False) as share_group:
230
+ community_icon = gr.HTML(community_icon_html)
231
+ loading_icon = gr.HTML(loading_icon_html)
232
+ share_button = gr.Button("Share to community", elem_id="share-btn")
233
+
234
+ prompt.submit(
235
+ check_inputs,
236
+ inputs=[prompt, control_image],
237
+ queue=False
238
+ ).success(
239
+ convert_to_pil,
240
+ inputs=[control_image],
241
+ outputs=[state_img_input],
242
+ queue=False,
243
+ preprocess=False,
244
+ ).success(
245
+ inference,
246
+ inputs=[state_img_input, prompt, negative_prompt, guidance_scale, controlnet_conditioning_scale, control_start, control_end, strength, seed, sampler],
247
+ outputs=[state_img_output, result_image, share_group, used_seed]
248
+ ).success(
249
+ convert_to_base64,
250
+ inputs=[state_img_output],
251
+ outputs=[result_image],
252
+ queue=False,
253
+ postprocess=False
254
+ )
255
+ run_btn.click(
256
+ check_inputs,
257
+ inputs=[prompt, control_image],
258
+ queue=False
259
+ ).success(
260
+ convert_to_pil,
261
+ inputs=[control_image],
262
+ outputs=[state_img_input],
263
+ queue=False,
264
+ preprocess=False,
265
+ ).success(
266
+ inference,
267
+ inputs=[state_img_input, prompt, negative_prompt, guidance_scale, controlnet_conditioning_scale, control_start, control_end, strength, seed, sampler],
268
+ outputs=[state_img_output, result_image, share_group, used_seed]
269
+ ).success(
270
+ convert_to_base64,
271
+ inputs=[state_img_output],
272
+ outputs=[result_image],
273
+ queue=False,
274
+ postprocess=False
275
+ )
276
+ share_button.click(None, [], [], js=share_js)
277
 
278
  def greet(name):
279
  return "Hello " + name + "!!"