ciaochaos commited on
Commit
9f4b472
1 Parent(s): cbdf446

performance

Browse files
Files changed (2) hide show
  1. app.py +20 -13
  2. requirements.txt +1 -0
app.py CHANGED
@@ -2,27 +2,33 @@ from PIL import Image
2
  import gradio as gr
3
  from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
4
  import torch
 
5
 
6
- controlnet = ControlNetModel.from_pretrained("ioclab/control_v1p_sd15_brightness", torch_dtype=torch.float32, use_safetensors=True)
7
 
8
  pipe = StableDiffusionControlNetPipeline.from_pretrained(
9
- "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float32,
10
  )
11
 
12
  pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
13
 
14
- # pipe.enable_xformers_memory_efficient_attention()
15
  pipe.enable_model_cpu_offload()
16
-
17
 
18
  def infer(prompt, negative_prompt, conditioning_image, num_inference_steps, size, guidance_scale, seed):
19
 
20
  conditioning_image = Image.fromarray(conditioning_image)
21
  conditioning_image = conditioning_image.convert('L')
22
 
23
- generator = torch.Generator(device="cpu").manual_seed(seed)
 
 
 
 
 
24
 
25
- output_image = pipe(
26
  prompt,
27
  conditioning_image,
28
  height=size,
@@ -32,9 +38,9 @@ def infer(prompt, negative_prompt, conditioning_image, num_inference_steps, size
32
  negative_prompt=negative_prompt,
33
  guidance_scale=guidance_scale,
34
  controlnet_conditioning_scale=1.0,
35
- ).images[0]
36
 
37
- return output_image
38
 
39
  with gr.Blocks() as demo:
40
  gr.Markdown(
@@ -72,22 +78,23 @@ with gr.Blocks() as demo:
72
  label='Guidance Scale',
73
  minimum=0.1,
74
  maximum=30.0,
75
- value=9.0,
76
  step=0.1
77
  )
78
  seed = gr.Slider(
79
  label='Seed',
 
80
  minimum=-1,
81
  maximum=2147483647,
82
  step=1,
83
- randomize=True
84
  )
85
  submit_btn = gr.Button(
86
  value="Submit",
87
  variant="primary"
88
  )
89
  with gr.Column(min_width=300):
90
- output = gr.Image(
91
  label="Result",
92
  )
93
 
@@ -102,8 +109,8 @@ with gr.Blocks() as demo:
102
  examples=[
103
  ["a painting of a village in the mountains", "monochrome", "./conditioning_images/conditioning_image_1.jpg"],
104
  ["three people walking in an alleyway with hats and pants", "monochrome", "./conditioning_images/conditioning_image_2.jpg"],
105
- ["an anime character with blue hair", "monochrome", "./conditioning_images/conditioning_image_3.jpg"],
106
- ["white object standing on colored ground", "monochrome", "./conditioning_images/conditioning_image_4.jpg"],
107
  ],
108
  inputs=[
109
  prompt, negative_prompt, conditioning_image
 
2
  import gradio as gr
3
  from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
4
  import torch
5
+ torch.backends.cuda.matmul.allow_tf32 = True
6
 
7
+ controlnet = ControlNetModel.from_pretrained("ioclab/control_v1p_sd15_brightness", torch_dtype=torch.float16, use_safetensors=True)
8
 
9
  pipe = StableDiffusionControlNetPipeline.from_pretrained(
10
+ "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16,
11
  )
12
 
13
  pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
14
 
15
+ pipe.enable_xformers_memory_efficient_attention()
16
  pipe.enable_model_cpu_offload()
17
+ pipe.enable_attention_slicing()
18
 
19
  def infer(prompt, negative_prompt, conditioning_image, num_inference_steps, size, guidance_scale, seed):
20
 
21
  conditioning_image = Image.fromarray(conditioning_image)
22
  conditioning_image = conditioning_image.convert('L')
23
 
24
+ g_cpu = torch.Generator()
25
+
26
+ if seed == -1:
27
+ generator = g_cpu.manual_seed(g_cpu.seed())
28
+ else:
29
+ generator = g_cpu.manual_seed(seed)
30
 
31
+ output_images = pipe(
32
  prompt,
33
  conditioning_image,
34
  height=size,
 
38
  negative_prompt=negative_prompt,
39
  guidance_scale=guidance_scale,
40
  controlnet_conditioning_scale=1.0,
41
+ ).images
42
 
43
+ return output_images
44
 
45
  with gr.Blocks() as demo:
46
  gr.Markdown(
 
78
  label='Guidance Scale',
79
  minimum=0.1,
80
  maximum=30.0,
81
+ value=7.0,
82
  step=0.1
83
  )
84
  seed = gr.Slider(
85
  label='Seed',
86
+ value=-1,
87
  minimum=-1,
88
  maximum=2147483647,
89
  step=1,
90
+ # randomize=True
91
  )
92
  submit_btn = gr.Button(
93
  value="Submit",
94
  variant="primary"
95
  )
96
  with gr.Column(min_width=300):
97
+ output = gr.Gallery(
98
  label="Result",
99
  )
100
 
 
109
  examples=[
110
  ["a painting of a village in the mountains", "monochrome", "./conditioning_images/conditioning_image_1.jpg"],
111
  ["three people walking in an alleyway with hats and pants", "monochrome", "./conditioning_images/conditioning_image_2.jpg"],
112
+ ["an anime character, natural skin", "monochrome", "./conditioning_images/conditioning_image_3.jpg"],
113
+ ["white object standing on colorful ground", "monochrome", "./conditioning_images/conditioning_image_4.jpg"],
114
  ],
115
  inputs=[
116
  prompt, negative_prompt, conditioning_image
requirements.txt CHANGED
@@ -3,4 +3,5 @@ accelerate
3
  diffusers
4
  transformers
5
  torch
 
6
  safetensors
 
3
  diffusers
4
  transformers
5
  torch
6
+ xformers
7
  safetensors