AP123 commited on
Commit
0277b1d
1 Parent(s): 6ec4b8d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -12
app.py CHANGED
@@ -13,12 +13,10 @@ from diffusers import (
13
  EulerDiscreteScheduler,
14
  )
15
 
16
- # Initialize ControlNet model
17
  controlnet = ControlNetModel.from_pretrained(
18
  "DionTimmer/controlnet_qrcode-control_v1p_sd15", torch_dtype=torch.float16
19
  )
20
 
21
- # Initialize pipeline
22
  pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
23
  "XpucT/Deliberate",
24
  controlnet=controlnet,
@@ -27,13 +25,11 @@ pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
27
  ).to("cuda")
28
  pipe.enable_xformers_memory_efficient_attention()
29
 
30
- # Sampler configurations
31
  SAMPLER_MAP = {
32
  "DPM++ Karras SDE": lambda config: DPMSolverMultistepScheduler.from_config(config, use_karras=True, algorithm_type="sde-dpmsolver++"),
33
  "Euler": lambda config: EulerDiscreteScheduler.from_config(config),
34
  }
35
 
36
- # Inference function
37
  def inference(
38
  input_image: Image.Image,
39
  prompt: str,
@@ -47,6 +43,8 @@ def inference(
47
  if prompt is None or prompt == "":
48
  raise gr.Error("Prompt is required")
49
 
 
 
50
  pipe.scheduler = SAMPLER_MAP[sampler](pipe.scheduler.config)
51
  generator = torch.manual_seed(seed) if seed != -1 else torch.Generator()
52
 
@@ -54,18 +52,15 @@ def inference(
54
  prompt=prompt,
55
  negative_prompt=negative_prompt,
56
  image=input_image,
57
- control_image=input_image, # type: ignore
58
- width=512, # type: ignore
59
- height=512, # type: ignore
60
  guidance_scale=float(guidance_scale),
61
- controlnet_conditioning_scale=float(controlnet_conditioning_scale), # type: ignore
62
  generator=generator,
63
  strength=float(strength),
64
  num_inference_steps=40,
65
  )
66
- return out.images[0] # type: ignore
67
 
68
- # Gradio UI
69
  with gr.Blocks() as app:
70
  gr.Markdown(
71
  '''
@@ -78,7 +73,7 @@ with gr.Blocks() as app:
78
  with gr.Row():
79
  with gr.Column():
80
  input_image = gr.Image(label="Input Illusion", type="pil")
81
- prompt = gr.Textbox(label="Prompt", info="Prompt that guides the generation towards")
82
  negative_prompt = gr.Textbox(label="Negative Prompt", value="ugly, disfigured, low quality, blurry, nsfw")
83
  with gr.Accordion(label="Advanced Options", open=False):
84
  controlnet_conditioning_scale = gr.Slider(minimum=0.0, maximum=5.0, step=0.01, value=1.1, label="Controlnet Conditioning Scale")
@@ -99,4 +94,4 @@ with gr.Blocks() as app:
99
  app.queue(concurrency_count=4, max_size=20)
100
 
101
  if __name__ == "__main__":
102
- app.launch(debug=True)
 
13
  EulerDiscreteScheduler,
14
  )
15
 
 
16
  controlnet = ControlNetModel.from_pretrained(
17
  "DionTimmer/controlnet_qrcode-control_v1p_sd15", torch_dtype=torch.float16
18
  )
19
 
 
20
  pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
21
  "XpucT/Deliberate",
22
  controlnet=controlnet,
 
25
  ).to("cuda")
26
  pipe.enable_xformers_memory_efficient_attention()
27
 
 
28
  SAMPLER_MAP = {
29
  "DPM++ Karras SDE": lambda config: DPMSolverMultistepScheduler.from_config(config, use_karras=True, algorithm_type="sde-dpmsolver++"),
30
  "Euler": lambda config: EulerDiscreteScheduler.from_config(config),
31
  }
32
 
 
33
  def inference(
34
  input_image: Image.Image,
35
  prompt: str,
 
43
  if prompt is None or prompt == "":
44
  raise gr.Error("Prompt is required")
45
 
46
+ input_image = input_image.resize((512, 512))
47
+
48
  pipe.scheduler = SAMPLER_MAP[sampler](pipe.scheduler.config)
49
  generator = torch.manual_seed(seed) if seed != -1 else torch.Generator()
50
 
 
52
  prompt=prompt,
53
  negative_prompt=negative_prompt,
54
  image=input_image,
55
+ control_image=input_image,
 
 
56
  guidance_scale=float(guidance_scale),
57
+ controlnet_conditioning_scale=float(controlnet_conditioning_scale),
58
  generator=generator,
59
  strength=float(strength),
60
  num_inference_steps=40,
61
  )
62
+ return out.images[0]
63
 
 
64
  with gr.Blocks() as app:
65
  gr.Markdown(
66
  '''
 
73
  with gr.Row():
74
  with gr.Column():
75
  input_image = gr.Image(label="Input Illusion", type="pil")
76
+ prompt = gr.Textbox(label="Prompt")
77
  negative_prompt = gr.Textbox(label="Negative Prompt", value="ugly, disfigured, low quality, blurry, nsfw")
78
  with gr.Accordion(label="Advanced Options", open=False):
79
  controlnet_conditioning_scale = gr.Slider(minimum=0.0, maximum=5.0, step=0.01, value=1.1, label="Controlnet Conditioning Scale")
 
94
  app.queue(concurrency_count=4, max_size=20)
95
 
96
  if __name__ == "__main__":
97
+ app.launch(debug=True)