kadirnar commited on
Commit
1e1a292
1 Parent(s): 5fef1f4
app.py CHANGED
@@ -1,19 +1,19 @@
1
- from diffusion_webui.controlnet.controlnet_canny import stable_diffusion_controlnet_canny_app, stable_diffusion_controlnet_canny
2
- from diffusion_webui.controlnet.controlnet_depth import stable_diffusion_controlnet_depth_app, stable_diffusion_controlnet_depth
3
- from diffusion_webui.controlnet.controlnet_hed import stable_diffusion_controlnet_hed_app, stable_diffusion_controlnet_hed
4
- from diffusion_webui.controlnet.controlnet_mlsd import stable_diffusion_controlnet_mlsd_app, stable_diffusion_controlnet_mlsd
5
- from diffusion_webui.controlnet.controlnet_pose import stable_diffusion_controlnet_pose_app, stable_diffusion_controlnet_pose
6
- from diffusion_webui.controlnet.controlnet_scribble import stable_diffusion_controlnet_scribble_app, stable_diffusion_controlnet_scribble
7
- from diffusion_webui.controlnet.controlnet_seg import stable_diffusion_controlnet_seg_app, stable_diffusion_controlnet_seg
8
-
9
- from diffusion_webui.stable_diffusion.text2img_app import stable_diffusion_text2img_app, stable_diffusion_text2img
10
- from diffusion_webui.stable_diffusion.img2img_app import stable_diffusion_img2img_app, stable_diffusion_img2img
11
- from diffusion_webui.stable_diffusion.inpaint_app import stable_diffusion_inpaint_app, stable_diffusion_inpaint
12
- from diffusion_webui.stable_diffusion.keras_txt2img import keras_stable_diffusion, keras_stable_diffusion_app
13
-
14
-
15
  import gradio as gr
16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  app = gr.Blocks()
18
  with app:
19
  gr.HTML(
@@ -33,30 +33,30 @@ with app:
33
  )
34
  with gr.Row():
35
  with gr.Column():
36
- with gr.Tab('Text2Img'):
37
  stable_diffusion_text2img_app()
38
- with gr.Tab('Img2Img'):
39
  stable_diffusion_img2img_app()
40
- with gr.Tab('Inpaint'):
41
  stable_diffusion_inpaint_app()
42
-
43
- with gr.Tab('ControlNet'):
44
- with gr.Tab('Canny'):
45
  stable_diffusion_controlnet_canny_app()
46
- with gr.Tab('Depth'):
47
  stable_diffusion_controlnet_depth_app()
48
- with gr.Tab('HED'):
49
  stable_diffusion_controlnet_hed_app()
50
- with gr.Tab('MLSD'):
51
  stable_diffusion_controlnet_mlsd_app()
52
- with gr.Tab('Pose'):
53
  stable_diffusion_controlnet_pose_app()
54
- with gr.Tab('Seg'):
55
  stable_diffusion_controlnet_seg_app()
56
- with gr.Tab('Scribble'):
57
  stable_diffusion_controlnet_scribble_app()
58
-
59
- with gr.Tab('Keras Diffusion'):
60
  keras_diffusion_app = keras_stable_diffusion_app()
61
 
62
- app.launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
 
3
+ from diffusion_webui.helpers import (
4
+ keras_stable_diffusion_app,
5
+ stable_diffusion_controlnet_canny_app,
6
+ stable_diffusion_controlnet_depth_app,
7
+ stable_diffusion_controlnet_hed_app,
8
+ stable_diffusion_controlnet_mlsd_app,
9
+ stable_diffusion_controlnet_pose_app,
10
+ stable_diffusion_controlnet_scribble_app,
11
+ stable_diffusion_controlnet_seg_app,
12
+ stable_diffusion_img2img_app,
13
+ stable_diffusion_inpaint_app,
14
+ stable_diffusion_text2img_app,
15
+ )
16
+
17
  app = gr.Blocks()
18
  with app:
19
  gr.HTML(
 
33
  )
34
  with gr.Row():
35
  with gr.Column():
36
+ with gr.Tab("Text2Img"):
37
  stable_diffusion_text2img_app()
38
+ with gr.Tab("Img2Img"):
39
  stable_diffusion_img2img_app()
40
+ with gr.Tab("Inpaint"):
41
  stable_diffusion_inpaint_app()
42
+
43
+ with gr.Tab("ControlNet"):
44
+ with gr.Tab("Canny"):
45
  stable_diffusion_controlnet_canny_app()
46
+ with gr.Tab("Depth"):
47
  stable_diffusion_controlnet_depth_app()
48
+ with gr.Tab("HED"):
49
  stable_diffusion_controlnet_hed_app()
50
+ with gr.Tab("MLSD"):
51
  stable_diffusion_controlnet_mlsd_app()
52
+ with gr.Tab("Pose"):
53
  stable_diffusion_controlnet_pose_app()
54
+ with gr.Tab("Seg"):
55
  stable_diffusion_controlnet_seg_app()
56
+ with gr.Tab("Scribble"):
57
  stable_diffusion_controlnet_scribble_app()
58
+
59
+ with gr.Tab("Keras Diffusion"):
60
  keras_diffusion_app = keras_stable_diffusion_app()
61
 
62
+ app.launch(debug=True)
data/bash.sh ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ black . --config pyproject.toml
2
+ isort .
data/test.png ADDED
diffusion_webui/controlnet/controlnet_canny.py CHANGED
@@ -1,12 +1,13 @@
1
- from diffusers import ( StableDiffusionControlNetPipeline,
2
- ControlNetModel, UniPCMultistepScheduler)
3
-
4
- from PIL import Image
5
  import gradio as gr
6
  import numpy as np
7
  import torch
8
- import cv2
9
-
 
 
 
 
10
 
11
  stable_model_list = [
12
  "runwayml/stable-diffusion-v1-5",
@@ -15,23 +16,22 @@ stable_model_list = [
15
 
16
  controlnet_canny_model_list = [
17
  "lllyasviel/sd-controlnet-canny",
18
- "thibaud/controlnet-sd21-canny-diffusers"
19
  ]
20
 
21
 
22
- stable_prompt_list = [
23
- "a photo of a man.",
24
- "a photo of a girl."
25
- ]
 
 
 
26
 
27
- stable_negative_prompt_list = [
28
- "bad, ugly",
29
- "deformed"
30
- ]
31
 
32
  def controlnet_canny(
33
- image_path:str,
34
- controlnet_model_path:str,
35
  ):
36
  image = Image.open(image_path)
37
  image = np.array(image)
@@ -42,28 +42,29 @@ def controlnet_canny(
42
  image = Image.fromarray(image)
43
 
44
  controlnet = ControlNetModel.from_pretrained(
45
- controlnet_model_path,
46
- torch_dtype=torch.float16
47
  )
48
  return controlnet, image
49
 
50
 
51
  def stable_diffusion_controlnet_canny(
52
- image_path:str,
53
- stable_model_path:str,
54
- controlnet_model_path:str,
55
- prompt:str,
56
- negative_prompt:str,
57
- guidance_scale:int,
58
- num_inference_step:int,
59
- ):
60
-
61
- controlnet, image = controlnet_canny(image_path, controlnet_model_path)
62
-
 
 
63
  pipe = StableDiffusionControlNetPipeline.from_pretrained(
64
- pretrained_model_name_or_path=stable_model_path,
65
- controlnet=controlnet,
66
- safety_checker=None,
67
  torch_dtype=torch.float16,
68
  )
69
  pipe.to("cuda")
@@ -71,11 +72,11 @@ def stable_diffusion_controlnet_canny(
71
  pipe.enable_xformers_memory_efficient_attention()
72
 
73
  output = pipe(
74
- prompt = prompt,
75
- image = image,
76
- negative_prompt = negative_prompt,
77
- num_inference_steps = num_inference_step,
78
- guidance_scale = guidance_scale,
79
  ).images
80
 
81
  return output[0]
@@ -86,56 +87,80 @@ def stable_diffusion_controlnet_canny_app():
86
  with gr.Row():
87
  with gr.Column():
88
  controlnet_canny_image_file = gr.Image(
89
- type='filepath',
90
- label='Image'
91
  )
92
 
93
  controlnet_canny_stable_model_id = gr.Dropdown(
94
- choices=stable_model_list,
95
- value=stable_model_list[0],
96
- label='Stable Model Id'
97
  )
98
-
99
  controlnet_canny_model_id = gr.Dropdown(
100
  choices=controlnet_canny_model_list,
101
  value=controlnet_canny_model_list[0],
102
- label='Controlnet Model Id'
103
  )
104
-
105
  controlnet_canny_prompt = gr.Textbox(
106
- lines=1,
107
- value=stable_prompt_list[0],
108
- label='Prompt'
109
  )
110
 
111
  controlnet_canny_negative_prompt = gr.Textbox(
112
- lines=1,
113
- value=stable_negative_prompt_list[0],
114
- label='Negative Prompt'
115
  )
116
 
117
  with gr.Accordion("Advanced Options", open=False):
118
  controlnet_canny_guidance_scale = gr.Slider(
119
- minimum=0.1,
120
- maximum=15,
121
- step=0.1,
122
- value=7.5,
123
- label='Guidance Scale'
124
  )
125
 
126
  controlnet_canny_num_inference_step = gr.Slider(
127
- minimum=1,
128
- maximum=100,
129
- step=1,
130
- value=50,
131
- label='Num Inference Step'
132
  )
133
 
134
- controlnet_canny_predict = gr.Button(value='Generator')
135
-
136
  with gr.Column():
137
- output_image = gr.Image(label='Output')
138
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
139
  controlnet_canny_predict.click(
140
  fn=stable_diffusion_controlnet_canny,
141
  inputs=[
 
1
+ import cv2
 
 
 
2
  import gradio as gr
3
  import numpy as np
4
  import torch
5
+ from diffusers import (
6
+ ControlNetModel,
7
+ StableDiffusionControlNetPipeline,
8
+ UniPCMultistepScheduler,
9
+ )
10
+ from PIL import Image
11
 
12
  stable_model_list = [
13
  "runwayml/stable-diffusion-v1-5",
 
16
 
17
  controlnet_canny_model_list = [
18
  "lllyasviel/sd-controlnet-canny",
19
+ "thibaud/controlnet-sd21-canny-diffusers",
20
  ]
21
 
22
 
23
+ stable_prompt_list = ["a photo of a man.", "a photo of a girl."]
24
+
25
+ stable_negative_prompt_list = ["bad, ugly", "deformed"]
26
+
27
+ data_list = [
28
+ "data/test.png",
29
+ ]
30
 
 
 
 
 
31
 
32
  def controlnet_canny(
33
+ image_path: str,
34
+ controlnet_model_path: str,
35
  ):
36
  image = Image.open(image_path)
37
  image = np.array(image)
 
42
  image = Image.fromarray(image)
43
 
44
  controlnet = ControlNetModel.from_pretrained(
45
+ controlnet_model_path, torch_dtype=torch.float16
 
46
  )
47
  return controlnet, image
48
 
49
 
50
  def stable_diffusion_controlnet_canny(
51
+ image_path: str,
52
+ stable_model_path: str,
53
+ controlnet_model_path: str,
54
+ prompt: str,
55
+ negative_prompt: str,
56
+ guidance_scale: int,
57
+ num_inference_step: int,
58
+ ):
59
+
60
+ controlnet, image = controlnet_canny(
61
+ image_path=image_path, controlnet_model_path=controlnet_model_path
62
+ )
63
+
64
  pipe = StableDiffusionControlNetPipeline.from_pretrained(
65
+ pretrained_model_name_or_path=stable_model_path,
66
+ controlnet=controlnet,
67
+ safety_checker=None,
68
  torch_dtype=torch.float16,
69
  )
70
  pipe.to("cuda")
 
72
  pipe.enable_xformers_memory_efficient_attention()
73
 
74
  output = pipe(
75
+ prompt=prompt,
76
+ image=image,
77
+ negative_prompt=negative_prompt,
78
+ num_inference_steps=num_inference_step,
79
+ guidance_scale=guidance_scale,
80
  ).images
81
 
82
  return output[0]
 
87
  with gr.Row():
88
  with gr.Column():
89
  controlnet_canny_image_file = gr.Image(
90
+ type="filepath", label="Image"
 
91
  )
92
 
93
  controlnet_canny_stable_model_id = gr.Dropdown(
94
+ choices=stable_model_list,
95
+ value=stable_model_list[0],
96
+ label="Stable Model Id",
97
  )
98
+
99
  controlnet_canny_model_id = gr.Dropdown(
100
  choices=controlnet_canny_model_list,
101
  value=controlnet_canny_model_list[0],
102
+ label="Controlnet Model Id",
103
  )
104
+
105
  controlnet_canny_prompt = gr.Textbox(
106
+ lines=1, value=stable_prompt_list[0], label="Prompt"
 
 
107
  )
108
 
109
  controlnet_canny_negative_prompt = gr.Textbox(
110
+ lines=1,
111
+ value=stable_negative_prompt_list[0],
112
+ label="Negative Prompt",
113
  )
114
 
115
  with gr.Accordion("Advanced Options", open=False):
116
  controlnet_canny_guidance_scale = gr.Slider(
117
+ minimum=0.1,
118
+ maximum=15,
119
+ step=0.1,
120
+ value=7.5,
121
+ label="Guidance Scale",
122
  )
123
 
124
  controlnet_canny_num_inference_step = gr.Slider(
125
+ minimum=1,
126
+ maximum=100,
127
+ step=1,
128
+ value=50,
129
+ label="Num Inference Step",
130
  )
131
 
132
+ controlnet_canny_predict = gr.Button(value="Generator")
133
+
134
  with gr.Column():
135
+ output_image = gr.Image(label="Output")
136
+
137
+ gr.Examples(
138
+ fn=stable_diffusion_controlnet_canny,
139
+ examples=[
140
+ [
141
+ data_list[0],
142
+ stable_model_list[0],
143
+ controlnet_canny_model_list[0],
144
+ stable_prompt_list[0],
145
+ stable_negative_prompt_list[0],
146
+ 7.5,
147
+ 50,
148
+ ]
149
+ ],
150
+ inputs=[
151
+ controlnet_canny_image_file,
152
+ controlnet_canny_stable_model_id,
153
+ controlnet_canny_model_id,
154
+ controlnet_canny_prompt,
155
+ controlnet_canny_negative_prompt,
156
+ controlnet_canny_guidance_scale,
157
+ controlnet_canny_num_inference_step,
158
+ ],
159
+ outputs=[output_image],
160
+ cache_examples=False,
161
+ label="Controlnet Canny Example",
162
+ )
163
+
164
  controlnet_canny_predict.click(
165
  fn=stable_diffusion_controlnet_canny,
166
  inputs=[
diffusion_webui/controlnet/controlnet_depth.py CHANGED
@@ -1,11 +1,13 @@
1
- from diffusers import ( StableDiffusionControlNetPipeline,
2
- ControlNetModel, UniPCMultistepScheduler )
3
-
4
- from transformers import pipeline
5
- from PIL import Image
6
  import gradio as gr
7
  import numpy as np
8
  import torch
 
 
 
 
 
 
 
9
 
10
  stable_model_list = [
11
  "runwayml/stable-diffusion-v1-5",
@@ -13,27 +15,25 @@ stable_model_list = [
13
  ]
14
 
15
  controlnet_depth_model_list = [
16
- "fusing/stable-diffusion-v1-5-controlnet-depth",
17
- "thibaud/controlnet-sd21-depth-diffusers"
18
  ]
19
 
20
 
21
- stable_prompt_list = [
22
- "a photo of a man.",
23
- "a photo of a girl."
24
- ]
25
 
26
- stable_negative_prompt_list = [
27
- "bad, ugly",
28
- "deformed"
29
- ]
30
 
31
 
32
- def controlnet_depth(image_path:str, depth_model_path:str):
33
- depth_estimator = pipeline('depth-estimation')
34
 
35
  image = Image.open(image_path)
36
- image = depth_estimator(image)['depth']
37
  image = np.array(image)
38
  image = image[:, :, None]
39
  image = np.concatenate([image, image, image], axis=2)
@@ -45,23 +45,26 @@ def controlnet_depth(image_path:str, depth_model_path:str):
45
 
46
  return controlnet, image
47
 
48
- def stable_diffusion_controlnet_depth(
49
- image_path:str,
50
- stable_model_path:str,
51
- depth_model_path:str,
52
- prompt:str,
53
- negative_prompt:str,
54
- guidance_scale:int,
55
- num_inference_step:int,
56
- ):
57
 
58
- controlnet, image = controlnet_depth(image_path=image_path, depth_model_path=depth_model_path)
 
 
 
 
 
 
 
 
 
 
 
 
59
 
60
  pipe = StableDiffusionControlNetPipeline.from_pretrained(
61
- pretrained_model_name_or_path=stable_model_path,
62
- controlnet=controlnet,
63
- safety_checker=None,
64
- torch_dtype=torch.float16
65
  )
66
 
67
  pipe.to("cuda")
@@ -69,11 +72,11 @@ def stable_diffusion_controlnet_depth(
69
  pipe.enable_xformers_memory_efficient_attention()
70
 
71
  output = pipe(
72
- prompt = prompt,
73
- image = image,
74
- negative_prompt = negative_prompt,
75
- num_inference_steps = num_inference_step,
76
- guidance_scale = guidance_scale,
77
  ).images
78
 
79
  return output[0]
@@ -84,56 +87,80 @@ def stable_diffusion_controlnet_depth_app():
84
  with gr.Row():
85
  with gr.Column():
86
  controlnet_depth_image_file = gr.Image(
87
- type='filepath',
88
- label='Image'
89
  )
90
 
91
  controlnet_depth_stable_model_id = gr.Dropdown(
92
- choices=stable_model_list,
93
- value=stable_model_list[0],
94
- label='Stable Model Id'
95
  )
96
 
97
  controlnet_depth_model_id = gr.Dropdown(
98
  choices=controlnet_depth_model_list,
99
  value=controlnet_depth_model_list[0],
100
- label='ControlNet Model Id'
101
  )
102
-
103
  controlnet_depth_prompt = gr.Textbox(
104
- lines=1,
105
- value=stable_prompt_list[0],
106
- label='Prompt'
107
  )
108
 
109
  controlnet_depth_negative_prompt = gr.Textbox(
110
- lines=1,
111
- value=stable_negative_prompt_list[0],
112
- label='Negative Prompt'
113
  )
114
 
115
  with gr.Accordion("Advanced Options", open=False):
116
  controlnet_depth_guidance_scale = gr.Slider(
117
- minimum=0.1,
118
- maximum=15,
119
- step=0.1,
120
- value=7.5,
121
- label='Guidance Scale'
122
  )
123
 
124
  controlnet_depth_num_inference_step = gr.Slider(
125
- minimum=1,
126
- maximum=100,
127
- step=1,
128
- value=50,
129
- label='Num Inference Step'
130
  )
131
 
132
- controlnet_depth_predict = gr.Button(value='Generator')
133
-
134
  with gr.Column():
135
- output_image = gr.Image(label='Output')
136
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137
  controlnet_depth_predict.click(
138
  fn=stable_diffusion_controlnet_depth,
139
  inputs=[
@@ -145,5 +172,5 @@ def stable_diffusion_controlnet_depth_app():
145
  controlnet_depth_guidance_scale,
146
  controlnet_depth_num_inference_step,
147
  ],
148
- outputs=output_image
149
  )
 
 
 
 
 
 
1
  import gradio as gr
2
  import numpy as np
3
  import torch
4
+ from diffusers import (
5
+ ControlNetModel,
6
+ StableDiffusionControlNetPipeline,
7
+ UniPCMultistepScheduler,
8
+ )
9
+ from PIL import Image
10
+ from transformers import pipeline
11
 
12
  stable_model_list = [
13
  "runwayml/stable-diffusion-v1-5",
 
15
  ]
16
 
17
  controlnet_depth_model_list = [
18
+ "lllyasviel/sd-controlnet-depth",
19
+ "thibaud/controlnet-sd21-depth-diffusers",
20
  ]
21
 
22
 
23
+ stable_prompt_list = ["a photo of a man.", "a photo of a girl."]
24
+
25
+ stable_negative_prompt_list = ["bad, ugly", "deformed"]
 
26
 
27
+ data_list = [
28
+ "data/test.png",
29
+ ]
 
30
 
31
 
32
+ def controlnet_depth(image_path: str, depth_model_path: str):
33
+ depth_estimator = pipeline("depth-estimation")
34
 
35
  image = Image.open(image_path)
36
+ image = depth_estimator(image)["depth"]
37
  image = np.array(image)
38
  image = image[:, :, None]
39
  image = np.concatenate([image, image, image], axis=2)
 
45
 
46
  return controlnet, image
47
 
 
 
 
 
 
 
 
 
 
48
 
49
+ def stable_diffusion_controlnet_depth(
50
+ image_path: str,
51
+ stable_model_path: str,
52
+ depth_model_path: str,
53
+ prompt: str,
54
+ negative_prompt: str,
55
+ guidance_scale: int,
56
+ num_inference_step: int,
57
+ ):
58
+
59
+ controlnet, image = controlnet_depth(
60
+ image_path=image_path, depth_model_path=depth_model_path
61
+ )
62
 
63
  pipe = StableDiffusionControlNetPipeline.from_pretrained(
64
+ pretrained_model_name_or_path=stable_model_path,
65
+ controlnet=controlnet,
66
+ safety_checker=None,
67
+ torch_dtype=torch.float16,
68
  )
69
 
70
  pipe.to("cuda")
 
72
  pipe.enable_xformers_memory_efficient_attention()
73
 
74
  output = pipe(
75
+ prompt=prompt,
76
+ image=image,
77
+ negative_prompt=negative_prompt,
78
+ num_inference_steps=num_inference_step,
79
+ guidance_scale=guidance_scale,
80
  ).images
81
 
82
  return output[0]
 
87
  with gr.Row():
88
  with gr.Column():
89
  controlnet_depth_image_file = gr.Image(
90
+ type="filepath", label="Image"
 
91
  )
92
 
93
  controlnet_depth_stable_model_id = gr.Dropdown(
94
+ choices=stable_model_list,
95
+ value=stable_model_list[0],
96
+ label="Stable Model Id",
97
  )
98
 
99
  controlnet_depth_model_id = gr.Dropdown(
100
  choices=controlnet_depth_model_list,
101
  value=controlnet_depth_model_list[0],
102
+ label="ControlNet Model Id",
103
  )
104
+
105
  controlnet_depth_prompt = gr.Textbox(
106
+ lines=1, value=stable_prompt_list[0], label="Prompt"
 
 
107
  )
108
 
109
  controlnet_depth_negative_prompt = gr.Textbox(
110
+ lines=1,
111
+ value=stable_negative_prompt_list[0],
112
+ label="Negative Prompt",
113
  )
114
 
115
  with gr.Accordion("Advanced Options", open=False):
116
  controlnet_depth_guidance_scale = gr.Slider(
117
+ minimum=0.1,
118
+ maximum=15,
119
+ step=0.1,
120
+ value=7.5,
121
+ label="Guidance Scale",
122
  )
123
 
124
  controlnet_depth_num_inference_step = gr.Slider(
125
+ minimum=1,
126
+ maximum=100,
127
+ step=1,
128
+ value=50,
129
+ label="Num Inference Step",
130
  )
131
 
132
+ controlnet_depth_predict = gr.Button(value="Generator")
133
+
134
  with gr.Column():
135
+ output_image = gr.Image(label="Output")
136
+
137
+ gr.Examples(
138
+ fn=stable_diffusion_controlnet_depth,
139
+ examples=[
140
+ [
141
+ data_list[0],
142
+ stable_model_list[0],
143
+ controlnet_depth_model_list[0],
144
+ stable_prompt_list[0],
145
+ stable_negative_prompt_list[0],
146
+ 7.5,
147
+ 50,
148
+ ]
149
+ ],
150
+ inputs=[
151
+ controlnet_depth_image_file,
152
+ controlnet_depth_stable_model_id,
153
+ controlnet_depth_model_id,
154
+ controlnet_depth_prompt,
155
+ controlnet_depth_negative_prompt,
156
+ controlnet_depth_guidance_scale,
157
+ controlnet_depth_num_inference_step,
158
+ ],
159
+ outputs=[output_image],
160
+ cache_examples=False,
161
+ label="ControlNet Depth Example",
162
+ )
163
+
164
  controlnet_depth_predict.click(
165
  fn=stable_diffusion_controlnet_depth,
166
  inputs=[
 
172
  controlnet_depth_guidance_scale,
173
  controlnet_depth_num_inference_step,
174
  ],
175
+ outputs=output_image,
176
  )
diffusion_webui/controlnet/controlnet_hed.py CHANGED
@@ -1,10 +1,12 @@
1
- from diffusers import ( StableDiffusionControlNetPipeline,
2
- ControlNetModel, UniPCMultistepScheduler)
3
-
4
- from controlnet_aux import HEDdetector
5
- from PIL import Image
6
  import gradio as gr
7
  import torch
 
 
 
 
 
 
 
8
 
9
  stable_model_list = [
10
  "runwayml/stable-diffusion-v1-5",
@@ -12,51 +14,51 @@ stable_model_list = [
12
  ]
13
 
14
  controlnet_hed_model_list = [
15
- "fusing/stable-diffusion-v1-5-controlnet-hed",
16
- "thibaud/controlnet-sd21-hed-diffusers"
17
  ]
18
 
19
- stable_prompt_list = [
20
- "a photo of a man.",
21
- "a photo of a girl."
22
- ]
23
 
24
- stable_negative_prompt_list = [
25
- "bad, ugly",
26
- "deformed"
27
- ]
28
 
29
 
30
- def controlnet_hed(image_path:str, controlnet_hed_model_path:str):
31
- hed = HEDdetector.from_pretrained('lllyasviel/ControlNet')
32
 
33
  image = Image.open(image_path)
34
  image = hed(image)
35
 
36
  controlnet = ControlNetModel.from_pretrained(
37
- controlnet_hed_model_path,
38
- torch_dtype=torch.float16
39
  )
40
  return controlnet, image
41
 
42
 
43
  def stable_diffusion_controlnet_hed(
44
- image_path:str,
45
- stable_model_path:str,
46
- controlnet_hed_model_path:str,
47
- prompt:str,
48
- negative_prompt:str,
49
- guidance_scale:int,
50
- num_inference_step:int,
51
- ):
52
-
53
- controlnet, image = controlnet_hed(image_path=image_path, controlnet_hed_model_path=controlnet_hed_model_path)
 
 
 
54
 
55
  pipe = StableDiffusionControlNetPipeline.from_pretrained(
56
- pretrained_model_name_or_path=stable_model_path,
57
- controlnet=controlnet,
58
- safety_checker=None,
59
- torch_dtype=torch.float16
60
  )
61
 
62
  pipe.to("cuda")
@@ -64,71 +66,95 @@ def stable_diffusion_controlnet_hed(
64
  pipe.enable_xformers_memory_efficient_attention()
65
 
66
  output = pipe(
67
- prompt = prompt,
68
- image = image,
69
- negative_prompt = negative_prompt,
70
- num_inference_steps = num_inference_step,
71
- guidance_scale = guidance_scale,
72
  ).images
73
 
74
  return output[0]
75
 
 
76
  def stable_diffusion_controlnet_hed_app():
77
  with gr.Blocks():
78
  with gr.Row():
79
  with gr.Column():
80
  controlnet_hed_image_file = gr.Image(
81
- type='filepath',
82
- label='Image'
83
  )
84
 
85
  controlnet_hed_stable_model_id = gr.Dropdown(
86
- choices=stable_model_list,
87
- value=stable_model_list[0],
88
- label='Stable Model Id'
89
  )
90
-
91
  controlnet_hed_model_id = gr.Dropdown(
92
  choices=stable_model_list,
93
  value=stable_model_list[1],
94
- label='ControlNet Model Id'
95
  )
96
 
97
  controlnet_hed_prompt = gr.Textbox(
98
- lines=1,
99
- value=stable_prompt_list[0],
100
- label='Prompt'
101
  )
102
 
103
  controlnet_hed_negative_prompt = gr.Textbox(
104
- lines=1,
105
- value=stable_negative_prompt_list[0],
106
- label='Negative Prompt'
107
  )
108
 
109
  with gr.Accordion("Advanced Options", open=False):
110
  controlnet_hed_guidance_scale = gr.Slider(
111
- minimum=0.1,
112
- maximum=15,
113
- step=0.1,
114
- value=7.5,
115
- label='Guidance Scale'
116
  )
117
 
118
  controlnet_hed_num_inference_step = gr.Slider(
119
- minimum=1,
120
- maximum=100,
121
- step=1,
122
- value=50,
123
- label='Num Inference Step'
124
  )
125
 
126
- controlnet_hed_predict = gr.Button(value='Generator')
127
-
128
-
129
  with gr.Column():
130
- output_image = gr.Image(label='Output')
131
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
  controlnet_hed_predict.click(
133
  fn=stable_diffusion_controlnet_hed,
134
  inputs=[
@@ -140,6 +166,5 @@ def stable_diffusion_controlnet_hed_app():
140
  controlnet_hed_guidance_scale,
141
  controlnet_hed_num_inference_step,
142
  ],
143
- outputs=[output_image]
144
  )
145
-
 
 
 
 
 
 
1
  import gradio as gr
2
  import torch
3
+ from controlnet_aux import HEDdetector
4
+ from diffusers import (
5
+ ControlNetModel,
6
+ StableDiffusionControlNetPipeline,
7
+ UniPCMultistepScheduler,
8
+ )
9
+ from PIL import Image
10
 
11
  stable_model_list = [
12
  "runwayml/stable-diffusion-v1-5",
 
14
  ]
15
 
16
  controlnet_hed_model_list = [
17
+ "lllyasviel/sd-controlnet-hed",
18
+ "thibaud/controlnet-sd21-hed-diffusers",
19
  ]
20
 
21
+ stable_prompt_list = ["a photo of a man.", "a photo of a girl."]
22
+
23
+ stable_negative_prompt_list = ["bad, ugly", "deformed"]
 
24
 
25
+ data_list = [
26
+ "data/test.png",
27
+ ]
 
28
 
29
 
30
+ def controlnet_hed(image_path: str, controlnet_hed_model_path: str):
31
+ hed = HEDdetector.from_pretrained("lllyasviel/ControlNet")
32
 
33
  image = Image.open(image_path)
34
  image = hed(image)
35
 
36
  controlnet = ControlNetModel.from_pretrained(
37
+ controlnet_hed_model_path, torch_dtype=torch.float16
 
38
  )
39
  return controlnet, image
40
 
41
 
42
  def stable_diffusion_controlnet_hed(
43
+ image_path: str,
44
+ stable_model_path: str,
45
+ controlnet_hed_model_path: str,
46
+ prompt: str,
47
+ negative_prompt: str,
48
+ guidance_scale: int,
49
+ num_inference_step: int,
50
+ ):
51
+
52
+ controlnet, image = controlnet_hed(
53
+ image_path=image_path,
54
+ controlnet_hed_model_path=controlnet_hed_model_path,
55
+ )
56
 
57
  pipe = StableDiffusionControlNetPipeline.from_pretrained(
58
+ pretrained_model_name_or_path=stable_model_path,
59
+ controlnet=controlnet,
60
+ safety_checker=None,
61
+ torch_dtype=torch.float16,
62
  )
63
 
64
  pipe.to("cuda")
 
66
  pipe.enable_xformers_memory_efficient_attention()
67
 
68
  output = pipe(
69
+ prompt=prompt,
70
+ image=image,
71
+ negative_prompt=negative_prompt,
72
+ num_inference_steps=num_inference_step,
73
+ guidance_scale=guidance_scale,
74
  ).images
75
 
76
  return output[0]
77
 
78
+
79
  def stable_diffusion_controlnet_hed_app():
80
  with gr.Blocks():
81
  with gr.Row():
82
  with gr.Column():
83
  controlnet_hed_image_file = gr.Image(
84
+ type="filepath", label="Image"
 
85
  )
86
 
87
  controlnet_hed_stable_model_id = gr.Dropdown(
88
+ choices=stable_model_list,
89
+ value=stable_model_list[0],
90
+ label="Stable Model Id",
91
  )
92
+
93
  controlnet_hed_model_id = gr.Dropdown(
94
  choices=stable_model_list,
95
  value=stable_model_list[1],
96
+ label="ControlNet Model Id",
97
  )
98
 
99
  controlnet_hed_prompt = gr.Textbox(
100
+ lines=1, value=stable_prompt_list[0], label="Prompt"
 
 
101
  )
102
 
103
  controlnet_hed_negative_prompt = gr.Textbox(
104
+ lines=1,
105
+ value=stable_negative_prompt_list[0],
106
+ label="Negative Prompt",
107
  )
108
 
109
  with gr.Accordion("Advanced Options", open=False):
110
  controlnet_hed_guidance_scale = gr.Slider(
111
+ minimum=0.1,
112
+ maximum=15,
113
+ step=0.1,
114
+ value=7.5,
115
+ label="Guidance Scale",
116
  )
117
 
118
  controlnet_hed_num_inference_step = gr.Slider(
119
+ minimum=1,
120
+ maximum=100,
121
+ step=1,
122
+ value=50,
123
+ label="Num Inference Step",
124
  )
125
 
126
+ controlnet_hed_predict = gr.Button(value="Generator")
127
+
 
128
  with gr.Column():
129
+ output_image = gr.Image(label="Output")
130
+
131
+ gr.Examples(
132
+ fn=stable_diffusion_controlnet_hed,
133
+ examples=[
134
+ [
135
+ data_list[0],
136
+ stable_model_list[0],
137
+ controlnet_hed_model_list[0],
138
+ stable_prompt_list[0],
139
+ stable_negative_prompt_list[0],
140
+ 7.5,
141
+ 50,
142
+ ]
143
+ ],
144
+ inputs=[
145
+ controlnet_hed_image_file,
146
+ controlnet_hed_stable_model_id,
147
+ controlnet_hed_model_id,
148
+ controlnet_hed_prompt,
149
+ controlnet_hed_negative_prompt,
150
+ controlnet_hed_guidance_scale,
151
+ controlnet_hed_num_inference_step,
152
+ ],
153
+ outputs=[output_image],
154
+ cache_examples=False,
155
+ label="ControlNet HED Example",
156
+ )
157
+
158
  controlnet_hed_predict.click(
159
  fn=stable_diffusion_controlnet_hed,
160
  inputs=[
 
166
  controlnet_hed_guidance_scale,
167
  controlnet_hed_num_inference_step,
168
  ],
169
+ outputs=[output_image],
170
  )
 
diffusion_webui/controlnet/controlnet_mlsd.py CHANGED
@@ -1,59 +1,56 @@
1
- from diffusers import ( StableDiffusionControlNetPipeline,
2
- ControlNetModel, UniPCMultistepScheduler)
3
-
4
- from controlnet_aux import MLSDdetector
5
- from PIL import Image
6
  import gradio as gr
7
  import torch
 
 
 
 
 
 
 
8
 
9
  stable_model_list = [
10
  "runwayml/stable-diffusion-v1-5",
11
- "stabilityai/stable-diffusion-2",
12
- "stabilityai/stable-diffusion-2-base",
13
- "stabilityai/stable-diffusion-2-1",
14
- "stabilityai/stable-diffusion-2-1-base"
15
  ]
16
 
17
- stable_prompt_list = [
18
- "a photo of a man.",
19
- "a photo of a girl."
20
- ]
21
 
22
- stable_negative_prompt_list = [
23
- "bad, ugly",
24
- "deformed"
25
- ]
26
 
 
 
 
27
 
28
- def controlnet_mlsd(image_path:str):
29
- mlsd = MLSDdetector.from_pretrained('lllyasviel/ControlNet')
 
30
 
31
  image = Image.open(image_path)
32
  image = mlsd(image)
33
 
34
  controlnet = ControlNetModel.from_pretrained(
35
- "fusing/stable-diffusion-v1-5-controlnet-mlsd",
36
- torch_dtype=torch.float16
37
  )
38
 
39
  return controlnet, image
40
 
 
41
  def stable_diffusion_controlnet_mlsd(
42
- image_path:str,
43
- model_path:str,
44
- prompt:str,
45
- negative_prompt:str,
46
- guidance_scale:int,
47
- num_inference_step:int,
48
- ):
49
 
50
  controlnet, image = controlnet_mlsd(image_path=image_path)
51
 
52
  pipe = StableDiffusionControlNetPipeline.from_pretrained(
53
- pretrained_model_name_or_path=model_path,
54
- controlnet=controlnet,
55
- safety_checker=None,
56
- torch_dtype=torch.float16
57
  )
58
 
59
  pipe.to("cuda")
@@ -61,64 +58,87 @@ def stable_diffusion_controlnet_mlsd(
61
  pipe.enable_xformers_memory_efficient_attention()
62
 
63
  output = pipe(
64
- prompt = prompt,
65
- image = image,
66
- negative_prompt = negative_prompt,
67
- num_inference_steps = num_inference_step,
68
- guidance_scale = guidance_scale,
69
  ).images
70
 
71
  return output[0]
72
 
 
73
  def stable_diffusion_controlnet_mlsd_app():
74
  with gr.Blocks():
75
  with gr.Row():
76
  with gr.Column():
77
  controlnet_mlsd_image_file = gr.Image(
78
- type='filepath',
79
- label='Image'
80
  )
81
 
82
  controlnet_mlsd_model_id = gr.Dropdown(
83
- choices=stable_model_list,
84
- value=stable_model_list[0],
85
- label='Stable Model Id'
86
  )
87
 
88
  controlnet_mlsd_prompt = gr.Textbox(
89
- lines=1,
90
- value=stable_prompt_list[0],
91
- label='Prompt'
92
  )
93
 
94
  controlnet_mlsd_negative_prompt = gr.Textbox(
95
- lines=1,
96
- value=stable_negative_prompt_list[0],
97
- label='Negative Prompt'
98
  )
99
 
100
  with gr.Accordion("Advanced Options", open=False):
101
  controlnet_mlsd_guidance_scale = gr.Slider(
102
- minimum=0.1,
103
- maximum=15,
104
- step=0.1,
105
- value=7.5,
106
- label='Guidance Scale'
107
  )
108
 
109
  controlnet_mlsd_num_inference_step = gr.Slider(
110
- minimum=1,
111
- maximum=100,
112
- step=1,
113
- value=50,
114
- label='Num Inference Step'
115
  )
116
 
117
- controlnet_mlsd_predict = gr.Button(value='Generator')
118
 
119
  with gr.Column():
120
- output_image = gr.Image(label='Output')
121
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
  controlnet_mlsd_predict.click(
123
  fn=stable_diffusion_controlnet_mlsd,
124
  inputs=[
@@ -127,7 +147,7 @@ def stable_diffusion_controlnet_mlsd_app():
127
  controlnet_mlsd_prompt,
128
  controlnet_mlsd_negative_prompt,
129
  controlnet_mlsd_guidance_scale,
130
- controlnet_mlsd_num_inference_step
131
  ],
132
- outputs=output_image
133
  )
 
 
 
 
 
 
1
  import gradio as gr
2
  import torch
3
+ from controlnet_aux import MLSDdetector
4
+ from diffusers import (
5
+ ControlNetModel,
6
+ StableDiffusionControlNetPipeline,
7
+ UniPCMultistepScheduler,
8
+ )
9
+ from PIL import Image
10
 
11
  stable_model_list = [
12
  "runwayml/stable-diffusion-v1-5",
 
 
 
 
13
  ]
14
 
15
+ stable_prompt_list = ["a photo of a man.", "a photo of a girl."]
 
 
 
16
 
17
+ stable_negative_prompt_list = ["bad, ugly", "deformed"]
 
 
 
18
 
19
+ data_list = [
20
+ "data/test.png",
21
+ ]
22
 
23
+
24
+ def controlnet_mlsd(image_path: str):
25
+ mlsd = MLSDdetector.from_pretrained("lllyasviel/ControlNet")
26
 
27
  image = Image.open(image_path)
28
  image = mlsd(image)
29
 
30
  controlnet = ControlNetModel.from_pretrained(
31
+ "lllyasviel/sd-controlnet-mlsd",
32
+ torch_dtype=torch.float16,
33
  )
34
 
35
  return controlnet, image
36
 
37
+
38
  def stable_diffusion_controlnet_mlsd(
39
+ image_path: str,
40
+ model_path: str,
41
+ prompt: str,
42
+ negative_prompt: str,
43
+ guidance_scale: int,
44
+ num_inference_step: int,
45
+ ):
46
 
47
  controlnet, image = controlnet_mlsd(image_path=image_path)
48
 
49
  pipe = StableDiffusionControlNetPipeline.from_pretrained(
50
+ pretrained_model_name_or_path=model_path,
51
+ controlnet=controlnet,
52
+ safety_checker=None,
53
+ torch_dtype=torch.float16,
54
  )
55
 
56
  pipe.to("cuda")
 
58
  pipe.enable_xformers_memory_efficient_attention()
59
 
60
  output = pipe(
61
+ prompt=prompt,
62
+ image=image,
63
+ negative_prompt=negative_prompt,
64
+ num_inference_steps=num_inference_step,
65
+ guidance_scale=guidance_scale,
66
  ).images
67
 
68
  return output[0]
69
 
70
+
71
  def stable_diffusion_controlnet_mlsd_app():
72
  with gr.Blocks():
73
  with gr.Row():
74
  with gr.Column():
75
  controlnet_mlsd_image_file = gr.Image(
76
+ type="filepath", label="Image"
 
77
  )
78
 
79
  controlnet_mlsd_model_id = gr.Dropdown(
80
+ choices=stable_model_list,
81
+ value=stable_model_list[0],
82
+ label="Stable Model Id",
83
  )
84
 
85
  controlnet_mlsd_prompt = gr.Textbox(
86
+ lines=1, value=stable_prompt_list[0], label="Prompt"
 
 
87
  )
88
 
89
  controlnet_mlsd_negative_prompt = gr.Textbox(
90
+ lines=1,
91
+ value=stable_negative_prompt_list[0],
92
+ label="Negative Prompt",
93
  )
94
 
95
  with gr.Accordion("Advanced Options", open=False):
96
  controlnet_mlsd_guidance_scale = gr.Slider(
97
+ minimum=0.1,
98
+ maximum=15,
99
+ step=0.1,
100
+ value=7.5,
101
+ label="Guidance Scale",
102
  )
103
 
104
  controlnet_mlsd_num_inference_step = gr.Slider(
105
+ minimum=1,
106
+ maximum=100,
107
+ step=1,
108
+ value=50,
109
+ label="Num Inference Step",
110
  )
111
 
112
+ controlnet_mlsd_predict = gr.Button(value="Generator")
113
 
114
  with gr.Column():
115
+ output_image = gr.Image(label="Output")
116
+
117
+ gr.Examples(
118
+ fn=stable_diffusion_controlnet_mlsd,
119
+ examples=[
120
+ [
121
+ data_list[0],
122
+ stable_model_list[0],
123
+ stable_prompt_list[0],
124
+ stable_negative_prompt_list[0],
125
+ 7.5,
126
+ 50,
127
+ ]
128
+ ],
129
+ inputs=[
130
+ controlnet_mlsd_image_file,
131
+ controlnet_mlsd_model_id,
132
+ controlnet_mlsd_prompt,
133
+ controlnet_mlsd_negative_prompt,
134
+ controlnet_mlsd_guidance_scale,
135
+ controlnet_mlsd_num_inference_step,
136
+ ],
137
+ outputs=[output_image],
138
+ label="ControlNet-MLSD Example",
139
+ cache_examples=False,
140
+ )
141
+
142
  controlnet_mlsd_predict.click(
143
  fn=stable_diffusion_controlnet_mlsd,
144
  inputs=[
 
147
  controlnet_mlsd_prompt,
148
  controlnet_mlsd_negative_prompt,
149
  controlnet_mlsd_guidance_scale,
150
+ controlnet_mlsd_num_inference_step,
151
  ],
152
+ outputs=output_image,
153
  )
diffusion_webui/controlnet/controlnet_pose.py CHANGED
@@ -1,11 +1,12 @@
1
- from diffusers import ( StableDiffusionControlNetPipeline,
2
- ControlNetModel, UniPCMultistepScheduler)
3
-
4
- from controlnet_aux import OpenposeDetector
5
-
6
- from PIL import Image
7
  import gradio as gr
8
  import torch
 
 
 
 
 
 
 
9
 
10
  stable_model_list = [
11
  "runwayml/stable-diffusion-v1-5",
@@ -13,51 +14,52 @@ stable_model_list = [
13
  ]
14
 
15
  controlnet_pose_model_list = [
16
- "fusing/stable-diffusion-v1-5-controlnet-openpose",
17
- "thibaud/controlnet-sd21-openpose-diffusers"
18
  ]
19
 
20
- stable_prompt_list = [
21
- "a photo of a man.",
22
- "a photo of a girl."
23
- ]
24
 
25
- stable_negative_prompt_list = [
26
- "bad, ugly",
27
- "deformed"
28
- ]
29
 
 
 
 
30
 
31
- def controlnet_pose(image_path:str, controlnet_pose_model_path:str):
32
- openpose = OpenposeDetector.from_pretrained('lllyasviel/ControlNet')
 
33
 
34
  image = Image.open(image_path)
35
  image = openpose(image)
36
 
37
  controlnet = ControlNetModel.from_pretrained(
38
- controlnet_pose_model_path,
39
- torch_dtype=torch.float16
40
  )
41
 
42
  return controlnet, image
43
 
44
- def stable_diffusion_controlnet_pose(
45
- image_path:str,
46
- stable_model_path:str,
47
- controlnet_pose_model_path:str,
48
- prompt:str,
49
- negative_prompt:str,
50
- guidance_scale:int,
51
- num_inference_step:int,
52
- ):
53
 
54
- controlnet, image = controlnet_pose(image_path=image_path, controlnet_pose_model_path=controlnet_pose_model_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
55
 
56
  pipe = StableDiffusionControlNetPipeline.from_pretrained(
57
- pretrained_model_name_or_path=-stable_model_path,
58
- controlnet=controlnet,
59
- safety_checker=None,
60
- torch_dtype=torch.float16
61
  )
62
 
63
  pipe.to("cuda")
@@ -65,11 +67,11 @@ def stable_diffusion_controlnet_pose(
65
  pipe.enable_xformers_memory_efficient_attention()
66
 
67
  output = pipe(
68
- prompt = prompt,
69
- image = image,
70
- negative_prompt = negative_prompt,
71
- num_inference_steps = num_inference_step,
72
- guidance_scale = guidance_scale,
73
  ).images
74
 
75
  return output[0]
@@ -80,57 +82,79 @@ def stable_diffusion_controlnet_pose_app():
80
  with gr.Row():
81
  with gr.Column():
82
  controlnet_pose_image_file = gr.Image(
83
- type='filepath',
84
- label='Image'
85
  )
86
 
87
  controlnet_pose_stable_model_id = gr.Dropdown(
88
- choices=stable_model_list,
89
- value=stable_model_list[0],
90
- label='Stable Model Id'
91
  )
92
-
93
  controlnet_pose_model_id = gr.Dropdown(
94
  choices=stable_model_list,
95
  value=stable_model_list[1],
96
- label='ControlNet Model Id'
97
  )
98
-
99
 
100
  controlnet_pose_prompt = gr.Textbox(
101
- lines=1,
102
- value=stable_prompt_list[0],
103
- label='Prompt'
104
  )
105
 
106
  controlnet_pose_negative_prompt = gr.Textbox(
107
- lines=1,
108
- value=stable_negative_prompt_list[0],
109
- label='Negative Prompt'
110
  )
111
 
112
  with gr.Accordion("Advanced Options", open=False):
113
  controlnet_pose_guidance_scale = gr.Slider(
114
- minimum=0.1,
115
- maximum=15,
116
- step=0.1,
117
- value=7.5,
118
- label='Guidance Scale'
119
  )
120
 
121
  controlnet_pose_num_inference_step = gr.Slider(
122
- minimum=1,
123
- maximum=100,
124
- step=1,
125
- value=50,
126
- label='Num Inference Step'
127
  )
128
 
129
- controlnet_pose_predict = gr.Button(value='Generator')
130
 
131
  with gr.Column():
132
- output_image = gr.Image(label='Output')
133
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
  controlnet_pose_predict.click(
135
  fn=stable_diffusion_controlnet_pose,
136
  inputs=[
@@ -142,5 +166,5 @@ def stable_diffusion_controlnet_pose_app():
142
  controlnet_pose_guidance_scale,
143
  controlnet_pose_num_inference_step,
144
  ],
145
- outputs=output_image
146
  )
 
 
 
 
 
 
 
1
  import gradio as gr
2
  import torch
3
+ from controlnet_aux import OpenposeDetector
4
+ from diffusers import (
5
+ ControlNetModel,
6
+ StableDiffusionControlNetPipeline,
7
+ UniPCMultistepScheduler,
8
+ )
9
+ from PIL import Image
10
 
11
  stable_model_list = [
12
  "runwayml/stable-diffusion-v1-5",
 
14
  ]
15
 
16
  controlnet_pose_model_list = [
17
+ "lllyasviel/sd-controlnet-openpose",
18
+ "thibaud/controlnet-sd21-openpose-diffusers",
19
  ]
20
 
21
+ stable_prompt_list = ["a photo of a man.", "a photo of a girl."]
 
 
 
22
 
23
+ stable_negative_prompt_list = ["bad, ugly", "deformed"]
 
 
 
24
 
25
+ data_list = [
26
+ "data/test.png",
27
+ ]
28
 
29
+
30
+ def controlnet_pose(image_path: str, controlnet_pose_model_path: str):
31
+ openpose = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
32
 
33
  image = Image.open(image_path)
34
  image = openpose(image)
35
 
36
  controlnet = ControlNetModel.from_pretrained(
37
+ controlnet_pose_model_path, torch_dtype=torch.float16
 
38
  )
39
 
40
  return controlnet, image
41
 
 
 
 
 
 
 
 
 
 
42
 
43
+ def stable_diffusion_controlnet_pose(
44
+ image_path: str,
45
+ stable_model_path: str,
46
+ controlnet_pose_model_path: str,
47
+ prompt: str,
48
+ negative_prompt: str,
49
+ guidance_scale: int,
50
+ num_inference_step: int,
51
+ ):
52
+
53
+ controlnet, image = controlnet_pose(
54
+ image_path=image_path,
55
+ controlnet_pose_model_path=controlnet_pose_model_path,
56
+ )
57
 
58
  pipe = StableDiffusionControlNetPipeline.from_pretrained(
59
+ pretrained_model_name_or_path=-stable_model_path,
60
+ controlnet=controlnet,
61
+ safety_checker=None,
62
+ torch_dtype=torch.float16,
63
  )
64
 
65
  pipe.to("cuda")
 
67
  pipe.enable_xformers_memory_efficient_attention()
68
 
69
  output = pipe(
70
+ prompt=prompt,
71
+ image=image,
72
+ negative_prompt=negative_prompt,
73
+ num_inference_steps=num_inference_step,
74
+ guidance_scale=guidance_scale,
75
  ).images
76
 
77
  return output[0]
 
82
  with gr.Row():
83
  with gr.Column():
84
  controlnet_pose_image_file = gr.Image(
85
+ type="filepath", label="Image"
 
86
  )
87
 
88
  controlnet_pose_stable_model_id = gr.Dropdown(
89
+ choices=stable_model_list,
90
+ value=stable_model_list[0],
91
+ label="Stable Model Id",
92
  )
93
+
94
  controlnet_pose_model_id = gr.Dropdown(
95
  choices=stable_model_list,
96
  value=stable_model_list[1],
97
+ label="ControlNet Model Id",
98
  )
 
99
 
100
  controlnet_pose_prompt = gr.Textbox(
101
+ lines=1, value=stable_prompt_list[0], label="Prompt"
 
 
102
  )
103
 
104
  controlnet_pose_negative_prompt = gr.Textbox(
105
+ lines=1,
106
+ value=stable_negative_prompt_list[0],
107
+ label="Negative Prompt",
108
  )
109
 
110
  with gr.Accordion("Advanced Options", open=False):
111
  controlnet_pose_guidance_scale = gr.Slider(
112
+ minimum=0.1,
113
+ maximum=15,
114
+ step=0.1,
115
+ value=7.5,
116
+ label="Guidance Scale",
117
  )
118
 
119
  controlnet_pose_num_inference_step = gr.Slider(
120
+ minimum=1,
121
+ maximum=100,
122
+ step=1,
123
+ value=50,
124
+ label="Num Inference Step",
125
  )
126
 
127
+ controlnet_pose_predict = gr.Button(value="Generator")
128
 
129
  with gr.Column():
130
+ output_image = gr.Image(label="Output")
131
+
132
+ gr.Examples(
133
+ fn=stable_diffusion_controlnet_pose,
134
+ examples=[
135
+ [
136
+ data_list[0],
137
+ stable_model_list[0],
138
+ controlnet_pose_model_list[0],
139
+ stable_prompt_list[0],
140
+ stable_negative_prompt_list[0],
141
+ 7.5,
142
+ 50,
143
+ ]
144
+ ],
145
+ inputs=[
146
+ controlnet_pose_image_file,
147
+ controlnet_pose_stable_model_id,
148
+ controlnet_pose_model_id,
149
+ controlnet_pose_prompt,
150
+ controlnet_pose_negative_prompt,
151
+ controlnet_pose_guidance_scale,
152
+ controlnet_pose_num_inference_step,
153
+ ],
154
+ outputs=[output_image],
155
+ label="ControlNet Pose Example",
156
+ cache_examples=False,
157
+ )
158
  controlnet_pose_predict.click(
159
  fn=stable_diffusion_controlnet_pose,
160
  inputs=[
 
166
  controlnet_pose_guidance_scale,
167
  controlnet_pose_num_inference_step,
168
  ],
169
+ outputs=output_image,
170
  )
diffusion_webui/controlnet/controlnet_scribble.py CHANGED
@@ -1,11 +1,12 @@
1
- from diffusers import ( StableDiffusionControlNetPipeline,
2
- ControlNetModel, UniPCMultistepScheduler)
3
-
4
- from controlnet_aux import HEDdetector
5
-
6
- from PIL import Image
7
  import gradio as gr
8
  import torch
 
 
 
 
 
 
 
9
 
10
  stable_model_list = [
11
  "runwayml/stable-diffusion-v1-5",
@@ -13,23 +14,21 @@ stable_model_list = [
13
  ]
14
 
15
  controlnet_hed_model_list = [
16
- "fusing/stable-diffusion-v1-5-controlnet-hed",
17
- "thibaud/controlnet-sd21-scribble-diffusers"
18
  ]
19
 
20
- stable_prompt_list = [
21
- "a photo of a man.",
22
- "a photo of a girl."
23
- ]
24
 
25
- stable_negative_prompt_list = [
26
- "bad, ugly",
27
- "deformed"
28
- ]
29
 
30
 
31
- def controlnet_scribble(image_path:str, controlnet_hed_model_path:str):
32
- hed = HEDdetector.from_pretrained('lllyasviel/ControlNet')
33
 
34
  image = Image.open(image_path)
35
  image = hed(image, scribble=True)
@@ -40,23 +39,27 @@ def controlnet_scribble(image_path:str, controlnet_hed_model_path:str):
40
 
41
  return controlnet, image
42
 
43
- def stable_diffusion_controlnet_scribble(
44
- image_path:str,
45
- stable_model_path:str,
46
- controlnet_hed_model_path:str,
47
- prompt:str,
48
- negative_prompt:str,
49
- guidance_scale:int,
50
- num_inference_step:int,
51
- ):
52
 
53
- controlnet, image = controlnet_scribble(image_path=image_path, controlnet_hed_model_path=controlnet_hed_model_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
54
 
55
  pipe = StableDiffusionControlNetPipeline.from_pretrained(
56
- pretrained_model_name_or_path=stable_model_path,
57
- controlnet=controlnet,
58
- safety_checker=None,
59
- torch_dtype=torch.float16
60
  )
61
 
62
  pipe.to("cuda")
@@ -64,70 +67,94 @@ def stable_diffusion_controlnet_scribble(
64
  pipe.enable_xformers_memory_efficient_attention()
65
 
66
  output = pipe(
67
- prompt = prompt,
68
- image = image,
69
- negative_prompt = negative_prompt,
70
- num_inference_steps = num_inference_step,
71
- guidance_scale = guidance_scale,
72
  ).images
73
 
74
  return output[0]
75
 
 
76
  def stable_diffusion_controlnet_scribble_app():
77
  with gr.Blocks():
78
  with gr.Row():
79
  with gr.Column():
80
  controlnet_scribble_image_file = gr.Image(
81
- type='filepath',
82
- label='Image'
83
  )
84
 
85
  controlnet_scribble_stablev1_model_id = gr.Dropdown(
86
- choices=stable_model_list,
87
- value=stable_model_list[0],
88
- label='Stable v1.5 Model Id'
89
  )
90
-
91
  controlnet_scribble_stablev2_model_id = gr.Dropdown(
92
  choices=stable_model_list,
93
  value=stable_model_list[1],
94
- label='Stable v2.1 Model Id'
95
  )
96
 
97
  controlnet_scribble_prompt = gr.Textbox(
98
- lines=1,
99
- value=stable_prompt_list[0],
100
- label='Prompt'
101
  )
102
 
103
  controlnet_scribble_negative_prompt = gr.Textbox(
104
- lines=1,
105
- value=stable_negative_prompt_list[0],
106
- label='Negative Prompt'
107
  )
108
 
109
  with gr.Accordion("Advanced Options", open=False):
110
  controlnet_scribble_guidance_scale = gr.Slider(
111
- minimum=0.1,
112
- maximum=15,
113
- step=0.1,
114
- value=7.5,
115
- label='Guidance Scale'
116
  )
117
 
118
  controlnet_scribble_num_inference_step = gr.Slider(
119
- minimum=1,
120
- maximum=100,
121
- step=1,
122
- value=50,
123
- label='Num Inference Step'
124
  )
125
 
126
- controlnet_scribble_predict = gr.Button(value='Generator')
127
 
128
  with gr.Column():
129
- output_image = gr.Image(label='Output')
130
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
  controlnet_scribble_predict.click(
132
  fn=stable_diffusion_controlnet_scribble,
133
  inputs=[
@@ -139,5 +166,5 @@ def stable_diffusion_controlnet_scribble_app():
139
  controlnet_scribble_guidance_scale,
140
  controlnet_scribble_num_inference_step,
141
  ],
142
- outputs=output_image
143
  )
 
 
 
 
 
 
 
1
  import gradio as gr
2
  import torch
3
+ from controlnet_aux import HEDdetector
4
+ from diffusers import (
5
+ ControlNetModel,
6
+ StableDiffusionControlNetPipeline,
7
+ UniPCMultistepScheduler,
8
+ )
9
+ from PIL import Image
10
 
11
  stable_model_list = [
12
  "runwayml/stable-diffusion-v1-5",
 
14
  ]
15
 
16
  controlnet_hed_model_list = [
17
+ "lllyasviel/sd-controlnet-scribble",
18
+ "thibaud/controlnet-sd21-scribble-diffusers",
19
  ]
20
 
21
+ stable_prompt_list = ["a photo of a man.", "a photo of a girl."]
22
+
23
+ stable_negative_prompt_list = ["bad, ugly", "deformed"]
 
24
 
25
+ data_list = [
26
+ "data/test.png",
27
+ ]
 
28
 
29
 
30
+ def controlnet_scribble(image_path: str, controlnet_hed_model_path: str):
31
+ hed = HEDdetector.from_pretrained("lllyasviel/ControlNet")
32
 
33
  image = Image.open(image_path)
34
  image = hed(image, scribble=True)
 
39
 
40
  return controlnet, image
41
 
 
 
 
 
 
 
 
 
 
42
 
43
+ def stable_diffusion_controlnet_scribble(
44
+ image_path: str,
45
+ stable_model_path: str,
46
+ controlnet_hed_model_path: str,
47
+ prompt: str,
48
+ negative_prompt: str,
49
+ guidance_scale: int,
50
+ num_inference_step: int,
51
+ ):
52
+
53
+ controlnet, image = controlnet_scribble(
54
+ image_path=image_path,
55
+ controlnet_hed_model_path=controlnet_hed_model_path,
56
+ )
57
 
58
  pipe = StableDiffusionControlNetPipeline.from_pretrained(
59
+ pretrained_model_name_or_path=stable_model_path,
60
+ controlnet=controlnet,
61
+ safety_checker=None,
62
+ torch_dtype=torch.float16,
63
  )
64
 
65
  pipe.to("cuda")
 
67
  pipe.enable_xformers_memory_efficient_attention()
68
 
69
  output = pipe(
70
+ prompt=prompt,
71
+ image=image,
72
+ negative_prompt=negative_prompt,
73
+ num_inference_steps=num_inference_step,
74
+ guidance_scale=guidance_scale,
75
  ).images
76
 
77
  return output[0]
78
 
79
+
80
  def stable_diffusion_controlnet_scribble_app():
81
  with gr.Blocks():
82
  with gr.Row():
83
  with gr.Column():
84
  controlnet_scribble_image_file = gr.Image(
85
+ type="filepath", label="Image"
 
86
  )
87
 
88
  controlnet_scribble_stablev1_model_id = gr.Dropdown(
89
+ choices=stable_model_list,
90
+ value=stable_model_list[0],
91
+ label="Stable v1.5 Model Id",
92
  )
93
+
94
  controlnet_scribble_stablev2_model_id = gr.Dropdown(
95
  choices=stable_model_list,
96
  value=stable_model_list[1],
97
+ label="Stable v2.1 Model Id",
98
  )
99
 
100
  controlnet_scribble_prompt = gr.Textbox(
101
+ lines=1, value=stable_prompt_list[0], label="Prompt"
 
 
102
  )
103
 
104
  controlnet_scribble_negative_prompt = gr.Textbox(
105
+ lines=1,
106
+ value=stable_negative_prompt_list[0],
107
+ label="Negative Prompt",
108
  )
109
 
110
  with gr.Accordion("Advanced Options", open=False):
111
  controlnet_scribble_guidance_scale = gr.Slider(
112
+ minimum=0.1,
113
+ maximum=15,
114
+ step=0.1,
115
+ value=7.5,
116
+ label="Guidance Scale",
117
  )
118
 
119
  controlnet_scribble_num_inference_step = gr.Slider(
120
+ minimum=1,
121
+ maximum=100,
122
+ step=1,
123
+ value=50,
124
+ label="Num Inference Step",
125
  )
126
 
127
+ controlnet_scribble_predict = gr.Button(value="Generator")
128
 
129
  with gr.Column():
130
+ output_image = gr.Image(label="Output")
131
+
132
+ gr.Examples(
133
+ fn=stable_diffusion_controlnet_scribble,
134
+ examples=[
135
+ [
136
+ data_list[0],
137
+ stable_model_list[0],
138
+ controlnet_hed_model_list[0],
139
+ stable_prompt_list[0],
140
+ stable_negative_prompt_list[0],
141
+ 7.5,
142
+ 50,
143
+ ],
144
+ ],
145
+ inputs=[
146
+ controlnet_scribble_image_file,
147
+ controlnet_scribble_stablev1_model_id,
148
+ controlnet_scribble_stablev2_model_id,
149
+ controlnet_scribble_prompt,
150
+ controlnet_scribble_negative_prompt,
151
+ controlnet_scribble_guidance_scale,
152
+ controlnet_scribble_num_inference_step,
153
+ ],
154
+ outputs=[output_image],
155
+ label="ControlNet Scribble Example",
156
+ cache_examples=False,
157
+ )
158
  controlnet_scribble_predict.click(
159
  fn=stable_diffusion_controlnet_scribble,
160
  inputs=[
 
166
  controlnet_scribble_guidance_scale,
167
  controlnet_scribble_num_inference_step,
168
  ],
169
+ outputs=output_image,
170
  )
diffusion_webui/controlnet/controlnet_seg.py CHANGED
@@ -1,86 +1,200 @@
1
- from transformers import AutoImageProcessor, UperNetForSemanticSegmentation
2
- import torch
3
- from diffusers import (StableDiffusionControlNetPipeline,
4
- ControlNetModel, UniPCMultistepScheduler)
5
-
6
-
7
- from PIL import Image
8
  import gradio as gr
9
  import numpy as np
10
  import torch
 
 
 
 
 
 
 
11
 
12
  stable_model_list = [
13
  "runwayml/stable-diffusion-v1-5",
14
- "stabilityai/stable-diffusion-2",
15
- "stabilityai/stable-diffusion-2-base",
16
- "stabilityai/stable-diffusion-2-1",
17
- "stabilityai/stable-diffusion-2-1-base"
18
  ]
19
 
20
- stable_prompt_list = [
21
- "a photo of a man.",
22
- "a photo of a girl."
23
- ]
24
 
25
- stable_negative_prompt_list = [
26
- "bad, ugly",
27
- "deformed"
28
- ]
 
29
 
30
 
31
  def ade_palette():
32
  """ADE20K palette that maps each class to RGB values."""
33
- return [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
34
- [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
35
- [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
36
- [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
37
- [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
38
- [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
39
- [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
40
- [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
41
- [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
42
- [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
43
- [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
44
- [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
45
- [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
46
- [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
47
- [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255],
48
- [11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255],
49
- [0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0],
50
- [255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0],
51
- [0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255],
52
- [173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255],
53
- [255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20],
54
- [255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255],
55
- [255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255],
56
- [0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255],
57
- [0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0],
58
- [143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0],
59
- [8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255],
60
- [255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112],
61
- [92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160],
62
- [163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163],
63
- [255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0],
64
- [255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0],
65
- [10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255],
66
- [255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204],
67
- [41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255],
68
- [71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255],
69
- [184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194],
70
- [102, 255, 0], [92, 0, 255]]
71
-
72
-
73
- def controlnet_mlsd(image_path:str):
74
- image_processor = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-small")
75
- image_segmentor = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-small")
76
-
77
- image = Image.open(image_path).convert('RGB')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  pixel_values = image_processor(image, return_tensors="pt").pixel_values
79
 
80
  with torch.no_grad():
81
  outputs = image_segmentor(pixel_values)
82
 
83
- seg = image_processor.post_process_semantic_segmentation(outputs, target_sizes=[image.size[::-1]])[0]
 
 
84
 
85
  color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8)
86
  palette = np.array(ade_palette())
@@ -91,28 +205,28 @@ def controlnet_mlsd(image_path:str):
91
  color_seg = color_seg.astype(np.uint8)
92
  image = Image.fromarray(color_seg)
93
  controlnet = ControlNetModel.from_pretrained(
94
- "fusing/stable-diffusion-v1-5-controlnet-seg", torch_dtype=torch.float16
95
  )
96
 
97
  return controlnet, image
98
 
99
 
100
  def stable_diffusion_controlnet_seg(
101
- image_path:str,
102
- model_path:str,
103
- prompt:str,
104
- negative_prompt:str,
105
- guidance_scale:int,
106
- num_inference_step:int,
107
- ):
108
 
109
  controlnet, image = controlnet_mlsd(image_path=image_path)
110
 
111
  pipe = StableDiffusionControlNetPipeline.from_pretrained(
112
- pretrained_model_name_or_path=model_path,
113
- controlnet=controlnet,
114
- safety_checker=None,
115
- torch_dtype=torch.float16
116
  )
117
 
118
  pipe.to("cuda")
@@ -120,64 +234,87 @@ def stable_diffusion_controlnet_seg(
120
  pipe.enable_xformers_memory_efficient_attention()
121
 
122
  output = pipe(
123
- prompt = prompt,
124
- image = image,
125
- negative_prompt = negative_prompt,
126
- num_inference_steps = num_inference_step,
127
- guidance_scale = guidance_scale,
128
  ).images
129
 
130
  return output[0]
131
 
 
132
  def stable_diffusion_controlnet_seg_app():
133
  with gr.Blocks():
134
  with gr.Row():
135
  with gr.Column():
136
  controlnet_seg_image_file = gr.Image(
137
- type='filepath',
138
- label='Image'
139
  )
140
 
141
  controlnet_seg_model_id = gr.Dropdown(
142
- choices=stable_model_list,
143
- value=stable_model_list[0],
144
- label='Stable Model Id'
145
  )
146
 
147
  controlnet_seg_prompt = gr.Textbox(
148
- lines=1,
149
- value=stable_prompt_list[0],
150
- label='Prompt'
151
  )
152
 
153
  controlnet_seg_negative_prompt = gr.Textbox(
154
- lines=1,
155
- value=stable_negative_prompt_list[0],
156
- label='Negative Prompt'
157
  )
158
 
159
  with gr.Accordion("Advanced Options", open=False):
160
  controlnet_seg_guidance_scale = gr.Slider(
161
- minimum=0.1,
162
- maximum=15,
163
- step=0.1,
164
- value=7.5,
165
- label='Guidance Scale'
166
  )
167
 
168
  controlnet_seg_num_inference_step = gr.Slider(
169
- minimum=1,
170
- maximum=100,
171
- step=1,
172
- value=50,
173
- label='Num Inference Step'
174
  )
175
 
176
- controlnet_seg_predict = gr.Button(value='Generator')
177
 
178
  with gr.Column():
179
- output_image = gr.Image(label='Output')
180
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
181
  controlnet_seg_predict.click(
182
  fn=stable_diffusion_controlnet_seg,
183
  inputs=[
@@ -190,4 +327,3 @@ def stable_diffusion_controlnet_seg_app():
190
  ],
191
  outputs=[output_image],
192
  )
193
-
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  import numpy as np
3
  import torch
4
+ from diffusers import (
5
+ ControlNetModel,
6
+ StableDiffusionControlNetPipeline,
7
+ UniPCMultistepScheduler,
8
+ )
9
+ from PIL import Image
10
+ from transformers import AutoImageProcessor, UperNetForSemanticSegmentation
11
 
12
  stable_model_list = [
13
  "runwayml/stable-diffusion-v1-5",
 
 
 
 
14
  ]
15
 
16
+ stable_prompt_list = ["a photo of a man.", "a photo of a girl."]
 
 
 
17
 
18
+ stable_negative_prompt_list = ["bad, ugly", "deformed"]
19
+
20
+ data_list = [
21
+ "data/test.png",
22
+ ]
23
 
24
 
25
  def ade_palette():
26
  """ADE20K palette that maps each class to RGB values."""
27
+ return [
28
+ [120, 120, 120],
29
+ [180, 120, 120],
30
+ [6, 230, 230],
31
+ [80, 50, 50],
32
+ [4, 200, 3],
33
+ [120, 120, 80],
34
+ [140, 140, 140],
35
+ [204, 5, 255],
36
+ [230, 230, 230],
37
+ [4, 250, 7],
38
+ [224, 5, 255],
39
+ [235, 255, 7],
40
+ [150, 5, 61],
41
+ [120, 120, 70],
42
+ [8, 255, 51],
43
+ [255, 6, 82],
44
+ [143, 255, 140],
45
+ [204, 255, 4],
46
+ [255, 51, 7],
47
+ [204, 70, 3],
48
+ [0, 102, 200],
49
+ [61, 230, 250],
50
+ [255, 6, 51],
51
+ [11, 102, 255],
52
+ [255, 7, 71],
53
+ [255, 9, 224],
54
+ [9, 7, 230],
55
+ [220, 220, 220],
56
+ [255, 9, 92],
57
+ [112, 9, 255],
58
+ [8, 255, 214],
59
+ [7, 255, 224],
60
+ [255, 184, 6],
61
+ [10, 255, 71],
62
+ [255, 41, 10],
63
+ [7, 255, 255],
64
+ [224, 255, 8],
65
+ [102, 8, 255],
66
+ [255, 61, 6],
67
+ [255, 194, 7],
68
+ [255, 122, 8],
69
+ [0, 255, 20],
70
+ [255, 8, 41],
71
+ [255, 5, 153],
72
+ [6, 51, 255],
73
+ [235, 12, 255],
74
+ [160, 150, 20],
75
+ [0, 163, 255],
76
+ [140, 140, 140],
77
+ [250, 10, 15],
78
+ [20, 255, 0],
79
+ [31, 255, 0],
80
+ [255, 31, 0],
81
+ [255, 224, 0],
82
+ [153, 255, 0],
83
+ [0, 0, 255],
84
+ [255, 71, 0],
85
+ [0, 235, 255],
86
+ [0, 173, 255],
87
+ [31, 0, 255],
88
+ [11, 200, 200],
89
+ [255, 82, 0],
90
+ [0, 255, 245],
91
+ [0, 61, 255],
92
+ [0, 255, 112],
93
+ [0, 255, 133],
94
+ [255, 0, 0],
95
+ [255, 163, 0],
96
+ [255, 102, 0],
97
+ [194, 255, 0],
98
+ [0, 143, 255],
99
+ [51, 255, 0],
100
+ [0, 82, 255],
101
+ [0, 255, 41],
102
+ [0, 255, 173],
103
+ [10, 0, 255],
104
+ [173, 255, 0],
105
+ [0, 255, 153],
106
+ [255, 92, 0],
107
+ [255, 0, 255],
108
+ [255, 0, 245],
109
+ [255, 0, 102],
110
+ [255, 173, 0],
111
+ [255, 0, 20],
112
+ [255, 184, 184],
113
+ [0, 31, 255],
114
+ [0, 255, 61],
115
+ [0, 71, 255],
116
+ [255, 0, 204],
117
+ [0, 255, 194],
118
+ [0, 255, 82],
119
+ [0, 10, 255],
120
+ [0, 112, 255],
121
+ [51, 0, 255],
122
+ [0, 194, 255],
123
+ [0, 122, 255],
124
+ [0, 255, 163],
125
+ [255, 153, 0],
126
+ [0, 255, 10],
127
+ [255, 112, 0],
128
+ [143, 255, 0],
129
+ [82, 0, 255],
130
+ [163, 255, 0],
131
+ [255, 235, 0],
132
+ [8, 184, 170],
133
+ [133, 0, 255],
134
+ [0, 255, 92],
135
+ [184, 0, 255],
136
+ [255, 0, 31],
137
+ [0, 184, 255],
138
+ [0, 214, 255],
139
+ [255, 0, 112],
140
+ [92, 255, 0],
141
+ [0, 224, 255],
142
+ [112, 224, 255],
143
+ [70, 184, 160],
144
+ [163, 0, 255],
145
+ [153, 0, 255],
146
+ [71, 255, 0],
147
+ [255, 0, 163],
148
+ [255, 204, 0],
149
+ [255, 0, 143],
150
+ [0, 255, 235],
151
+ [133, 255, 0],
152
+ [255, 0, 235],
153
+ [245, 0, 255],
154
+ [255, 0, 122],
155
+ [255, 245, 0],
156
+ [10, 190, 212],
157
+ [214, 255, 0],
158
+ [0, 204, 255],
159
+ [20, 0, 255],
160
+ [255, 255, 0],
161
+ [0, 153, 255],
162
+ [0, 41, 255],
163
+ [0, 255, 204],
164
+ [41, 0, 255],
165
+ [41, 255, 0],
166
+ [173, 0, 255],
167
+ [0, 245, 255],
168
+ [71, 0, 255],
169
+ [122, 0, 255],
170
+ [0, 255, 184],
171
+ [0, 92, 255],
172
+ [184, 255, 0],
173
+ [0, 133, 255],
174
+ [255, 214, 0],
175
+ [25, 194, 194],
176
+ [102, 255, 0],
177
+ [92, 0, 255],
178
+ ]
179
+
180
+
181
+ def controlnet_mlsd(image_path: str):
182
+ image_processor = AutoImageProcessor.from_pretrained(
183
+ "openmmlab/upernet-convnext-small"
184
+ )
185
+ image_segmentor = UperNetForSemanticSegmentation.from_pretrained(
186
+ "openmmlab/upernet-convnext-small"
187
+ )
188
+
189
+ image = Image.open(image_path).convert("RGB")
190
  pixel_values = image_processor(image, return_tensors="pt").pixel_values
191
 
192
  with torch.no_grad():
193
  outputs = image_segmentor(pixel_values)
194
 
195
+ seg = image_processor.post_process_semantic_segmentation(
196
+ outputs, target_sizes=[image.size[::-1]]
197
+ )[0]
198
 
199
  color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8)
200
  palette = np.array(ade_palette())
 
205
  color_seg = color_seg.astype(np.uint8)
206
  image = Image.fromarray(color_seg)
207
  controlnet = ControlNetModel.from_pretrained(
208
+ "lllyasviel/sd-controlnet-seg", torch_dtype=torch.float16
209
  )
210
 
211
  return controlnet, image
212
 
213
 
214
  def stable_diffusion_controlnet_seg(
215
+ image_path: str,
216
+ model_path: str,
217
+ prompt: str,
218
+ negative_prompt: str,
219
+ guidance_scale: int,
220
+ num_inference_step: int,
221
+ ):
222
 
223
  controlnet, image = controlnet_mlsd(image_path=image_path)
224
 
225
  pipe = StableDiffusionControlNetPipeline.from_pretrained(
226
+ pretrained_model_name_or_path=model_path,
227
+ controlnet=controlnet,
228
+ safety_checker=None,
229
+ torch_dtype=torch.float16,
230
  )
231
 
232
  pipe.to("cuda")
 
234
  pipe.enable_xformers_memory_efficient_attention()
235
 
236
  output = pipe(
237
+ prompt=prompt,
238
+ image=image,
239
+ negative_prompt=negative_prompt,
240
+ num_inference_steps=num_inference_step,
241
+ guidance_scale=guidance_scale,
242
  ).images
243
 
244
  return output[0]
245
 
246
+
247
  def stable_diffusion_controlnet_seg_app():
248
  with gr.Blocks():
249
  with gr.Row():
250
  with gr.Column():
251
  controlnet_seg_image_file = gr.Image(
252
+ type="filepath", label="Image"
 
253
  )
254
 
255
  controlnet_seg_model_id = gr.Dropdown(
256
+ choices=stable_model_list,
257
+ value=stable_model_list[0],
258
+ label="Stable Model Id",
259
  )
260
 
261
  controlnet_seg_prompt = gr.Textbox(
262
+ lines=1, value=stable_prompt_list[0], label="Prompt"
 
 
263
  )
264
 
265
  controlnet_seg_negative_prompt = gr.Textbox(
266
+ lines=1,
267
+ value=stable_negative_prompt_list[0],
268
+ label="Negative Prompt",
269
  )
270
 
271
  with gr.Accordion("Advanced Options", open=False):
272
  controlnet_seg_guidance_scale = gr.Slider(
273
+ minimum=0.1,
274
+ maximum=15,
275
+ step=0.1,
276
+ value=7.5,
277
+ label="Guidance Scale",
278
  )
279
 
280
  controlnet_seg_num_inference_step = gr.Slider(
281
+ minimum=1,
282
+ maximum=100,
283
+ step=1,
284
+ value=50,
285
+ label="Num Inference Step",
286
  )
287
 
288
+ controlnet_seg_predict = gr.Button(value="Generator")
289
 
290
  with gr.Column():
291
+ output_image = gr.Image(label="Output")
292
+
293
+ gr.Examples(
294
+ fn=stable_diffusion_controlnet_seg,
295
+ examples=[
296
+ [
297
+ data_list[0],
298
+ stable_model_list[0],
299
+ stable_prompt_list[0],
300
+ stable_negative_prompt_list[0],
301
+ 7.5,
302
+ 50,
303
+ ],
304
+ ],
305
+ inputs=[
306
+ controlnet_seg_image_file,
307
+ controlnet_seg_model_id,
308
+ controlnet_seg_prompt,
309
+ controlnet_seg_negative_prompt,
310
+ controlnet_seg_guidance_scale,
311
+ controlnet_seg_num_inference_step,
312
+ ],
313
+ outputs=[output_image],
314
+ cache_examples=False,
315
+ label="ControlNet Segmentation Example",
316
+ )
317
+
318
  controlnet_seg_predict.click(
319
  fn=stable_diffusion_controlnet_seg,
320
  inputs=[
 
327
  ],
328
  outputs=[output_image],
329
  )
 
diffusion_webui/helpers.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusion_webui.controlnet.controlnet_canny import (
2
+ stable_diffusion_controlnet_canny_app,
3
+ )
4
+ from diffusion_webui.controlnet.controlnet_depth import (
5
+ stable_diffusion_controlnet_depth_app,
6
+ )
7
+ from diffusion_webui.controlnet.controlnet_hed import (
8
+ stable_diffusion_controlnet_hed_app,
9
+ )
10
+ from diffusion_webui.controlnet.controlnet_mlsd import (
11
+ stable_diffusion_controlnet_mlsd_app,
12
+ )
13
+ from diffusion_webui.controlnet.controlnet_pose import (
14
+ stable_diffusion_controlnet_pose_app,
15
+ )
16
+ from diffusion_webui.controlnet.controlnet_scribble import (
17
+ stable_diffusion_controlnet_scribble_app,
18
+ )
19
+ from diffusion_webui.controlnet.controlnet_seg import (
20
+ stable_diffusion_controlnet_seg_app,
21
+ )
22
+ from diffusion_webui.stable_diffusion.img2img_app import (
23
+ stable_diffusion_img2img_app,
24
+ )
25
+ from diffusion_webui.stable_diffusion.inpaint_app import (
26
+ stable_diffusion_inpaint_app,
27
+ )
28
+ from diffusion_webui.stable_diffusion.keras_txt2img import (
29
+ keras_stable_diffusion_app,
30
+ )
31
+ from diffusion_webui.stable_diffusion.text2img_app import (
32
+ stable_diffusion_text2img_app,
33
+ )
diffusion_webui/stable_diffusion/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (191 Bytes). View file
 
diffusion_webui/stable_diffusion/__pycache__/img2img_app.cpython-38.pyc ADDED
Binary file (2.55 kB). View file
 
diffusion_webui/stable_diffusion/__pycache__/inpaint_app.cpython-38.pyc ADDED
Binary file (2.42 kB). View file
 
diffusion_webui/stable_diffusion/__pycache__/keras_txt2img.cpython-38.pyc ADDED
Binary file (2.79 kB). View file
 
diffusion_webui/stable_diffusion/__pycache__/text2img_app.cpython-38.pyc ADDED
Binary file (2.74 kB). View file
 
diffusion_webui/stable_diffusion/img2img_app.py CHANGED
@@ -1,54 +1,46 @@
1
- from diffusers import StableDiffusionImg2ImgPipeline, DDIMScheduler
2
-
3
- from PIL import Image
4
  import gradio as gr
5
  import torch
 
 
6
 
7
  stable_model_list = [
8
  "runwayml/stable-diffusion-v1-5",
9
- "stabilityai/stable-diffusion-2",
10
- "stabilityai/stable-diffusion-2-base",
11
  "stabilityai/stable-diffusion-2-1",
12
- "stabilityai/stable-diffusion-2-1-base"
13
  ]
14
 
15
- stable_prompt_list = [
16
- "a photo of a man.",
17
- "a photo of a girl."
18
- ]
19
 
20
- stable_negative_prompt_list = [
21
- "bad, ugly",
22
- "deformed"
23
- ]
 
24
 
25
 
26
  def stable_diffusion_img2img(
27
- image_path:str,
28
- model_path:str,
29
- prompt:str,
30
- negative_prompt:str,
31
- guidance_scale:int,
32
- num_inference_step:int,
33
- ):
34
 
35
  image = Image.open(image_path)
36
 
37
  pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
38
- model_path,
39
- safety_checker=None,
40
- torch_dtype=torch.float16
41
  )
42
  pipe.to("cuda")
43
  pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
44
  pipe.enable_xformers_memory_efficient_attention()
45
 
46
  output = pipe(
47
- prompt = prompt,
48
- image = image,
49
- negative_prompt = negative_prompt,
50
- num_inference_steps = num_inference_step,
51
- guidance_scale = guidance_scale,
52
  ).images
53
 
54
  return output[0]
@@ -59,50 +51,72 @@ def stable_diffusion_img2img_app():
59
  with gr.Row():
60
  with gr.Column():
61
  image2image2_image_file = gr.Image(
62
- type='filepath',
63
- label='Image'
64
  )
65
 
66
  image2image_model_path = gr.Dropdown(
67
- choices=stable_model_list,
68
- value=stable_model_list[0],
69
- label='Image-Image Model Id'
70
  )
71
 
72
  image2image_prompt = gr.Textbox(
73
- lines=1,
74
- value=stable_prompt_list[0],
75
- label='Prompt'
76
  )
77
 
78
  image2image_negative_prompt = gr.Textbox(
79
- lines=1,
80
- value=stable_negative_prompt_list[0],
81
- label='Negative Prompt'
82
  )
83
 
84
  with gr.Accordion("Advanced Options", open=False):
85
  image2image_guidance_scale = gr.Slider(
86
- minimum=0.1,
87
- maximum=15,
88
- step=0.1,
89
- value=7.5,
90
- label='Guidance Scale'
91
  )
92
 
93
  image2image_num_inference_step = gr.Slider(
94
- minimum=1,
95
- maximum=100,
96
- step=1,
97
- value=50,
98
- label='Num Inference Step'
99
  )
100
 
101
- image2image_predict = gr.Button(value='Generator')
102
 
103
  with gr.Column():
104
- output_image = gr.Image(label='Output')
105
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
  image2image_predict.click(
107
  fn=stable_diffusion_img2img,
108
  inputs=[
@@ -112,6 +126,6 @@ def stable_diffusion_img2img_app():
112
  image2image_negative_prompt,
113
  image2image_guidance_scale,
114
  image2image_num_inference_step,
115
- ],
116
  outputs=[output_image],
117
  )
 
 
 
 
1
  import gradio as gr
2
  import torch
3
+ from diffusers import DDIMScheduler, StableDiffusionImg2ImgPipeline
4
+ from PIL import Image
5
 
6
  stable_model_list = [
7
  "runwayml/stable-diffusion-v1-5",
 
 
8
  "stabilityai/stable-diffusion-2-1",
 
9
  ]
10
 
11
+ stable_prompt_list = ["a photo of a man.", "a photo of a girl."]
 
 
 
12
 
13
+ stable_negative_prompt_list = ["bad, ugly", "deformed"]
14
+
15
+ data_list = [
16
+ "data/test.png",
17
+ ]
18
 
19
 
20
  def stable_diffusion_img2img(
21
+ image_path: str,
22
+ model_path: str,
23
+ prompt: str,
24
+ negative_prompt: str,
25
+ guidance_scale: int,
26
+ num_inference_step: int,
27
+ ):
28
 
29
  image = Image.open(image_path)
30
 
31
  pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
32
+ model_path, safety_checker=None, torch_dtype=torch.float16
 
 
33
  )
34
  pipe.to("cuda")
35
  pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
36
  pipe.enable_xformers_memory_efficient_attention()
37
 
38
  output = pipe(
39
+ prompt=prompt,
40
+ image=image,
41
+ negative_prompt=negative_prompt,
42
+ num_inference_steps=num_inference_step,
43
+ guidance_scale=guidance_scale,
44
  ).images
45
 
46
  return output[0]
 
51
  with gr.Row():
52
  with gr.Column():
53
  image2image2_image_file = gr.Image(
54
+ type="filepath", label="Image"
 
55
  )
56
 
57
  image2image_model_path = gr.Dropdown(
58
+ choices=stable_model_list,
59
+ value=stable_model_list[0],
60
+ label="Image-Image Model Id",
61
  )
62
 
63
  image2image_prompt = gr.Textbox(
64
+ lines=1, value=stable_prompt_list[0], label="Prompt"
 
 
65
  )
66
 
67
  image2image_negative_prompt = gr.Textbox(
68
+ lines=1,
69
+ value=stable_negative_prompt_list[0],
70
+ label="Negative Prompt",
71
  )
72
 
73
  with gr.Accordion("Advanced Options", open=False):
74
  image2image_guidance_scale = gr.Slider(
75
+ minimum=0.1,
76
+ maximum=15,
77
+ step=0.1,
78
+ value=7.5,
79
+ label="Guidance Scale",
80
  )
81
 
82
  image2image_num_inference_step = gr.Slider(
83
+ minimum=1,
84
+ maximum=100,
85
+ step=1,
86
+ value=50,
87
+ label="Num Inference Step",
88
  )
89
 
90
+ image2image_predict = gr.Button(value="Generator")
91
 
92
  with gr.Column():
93
+ output_image = gr.Image(label="Output")
94
+
95
+ gr.Examples(
96
+ fn=stable_diffusion_img2img,
97
+ examples=[
98
+ [
99
+ data_list[0],
100
+ stable_model_list[0],
101
+ stable_prompt_list[0],
102
+ stable_negative_prompt_list[0],
103
+ 7.5,
104
+ 50,
105
+ ],
106
+ ],
107
+ inputs=[
108
+ image2image2_image_file,
109
+ image2image_model_path,
110
+ image2image_prompt,
111
+ image2image_negative_prompt,
112
+ image2image_guidance_scale,
113
+ image2image_num_inference_step,
114
+ ],
115
+ outputs=[output_image],
116
+ cache_examples=False,
117
+ label="Image-Image Generator",
118
+ )
119
+
120
  image2image_predict.click(
121
  fn=stable_diffusion_img2img,
122
  inputs=[
 
126
  image2image_negative_prompt,
127
  image2image_guidance_scale,
128
  image2image_num_inference_step,
129
+ ],
130
  outputs=[output_image],
131
  )
diffusion_webui/stable_diffusion/inpaint_app.py CHANGED
@@ -1,32 +1,25 @@
1
- from diffusers import DiffusionPipeline, DDIMScheduler
2
- import torch
3
-
4
  import gradio as gr
 
 
5
 
6
  stable_inpiant_model_list = [
7
  "stabilityai/stable-diffusion-2-inpainting",
8
- "runwayml/stable-diffusion-inpainting"
9
  ]
10
 
11
- stable_prompt_list = [
12
- "a photo of a man.",
13
- "a photo of a girl."
14
- ]
15
 
16
- stable_negative_prompt_list = [
17
- "bad, ugly",
18
- "deformed"
19
- ]
20
 
21
 
22
  def stable_diffusion_inpaint(
23
- dict:str,
24
- model_path:str,
25
- prompt:str,
26
- negative_prompt:str,
27
- guidance_scale:int,
28
- num_inference_step:int,
29
- ):
30
 
31
  image = dict["image"].convert("RGB").resize((512, 512))
32
  mask_image = dict["mask"].convert("RGB").resize((512, 512))
@@ -35,17 +28,17 @@ def stable_diffusion_inpaint(
35
  revision="fp16",
36
  torch_dtype=torch.float16,
37
  )
38
- pipe.to('cuda')
39
  pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
40
  pipe.enable_xformers_memory_efficient_attention()
41
 
42
  output = pipe(
43
- prompt = prompt,
44
- image = image,
45
  mask_image=mask_image,
46
- negative_prompt = negative_prompt,
47
- num_inference_steps = num_inference_step,
48
- guidance_scale = guidance_scale,
49
  ).images
50
 
51
  return output[0]
@@ -56,54 +49,51 @@ def stable_diffusion_inpaint_app():
56
  with gr.Row():
57
  with gr.Column():
58
  inpaint_image_file = gr.Image(
59
- source='upload',
60
- tool='sketch',
61
- elem_id="image_upload",
62
- type="pil",
63
- label="Upload"
64
  )
65
 
66
  inpaint_model_id = gr.Dropdown(
67
- choices=stable_inpiant_model_list,
68
- value=stable_inpiant_model_list[0],
69
- label='Inpaint Model Id'
70
  )
71
 
72
  inpaint_prompt = gr.Textbox(
73
- lines=1,
74
- value=stable_prompt_list[0],
75
- label='Prompt'
76
  )
77
 
78
  inpaint_negative_prompt = gr.Textbox(
79
- lines=1,
80
- value=stable_negative_prompt_list[0],
81
- label='Negative Prompt'
82
  )
83
 
84
  with gr.Accordion("Advanced Options", open=False):
85
  inpaint_guidance_scale = gr.Slider(
86
- minimum=0.1,
87
- maximum=15,
88
- step=0.1,
89
- value=7.5,
90
- label='Guidance Scale'
91
  )
92
 
93
  inpaint_num_inference_step = gr.Slider(
94
- minimum=1,
95
- maximum=100,
96
- step=1,
97
- value=50,
98
- label='Num Inference Step'
99
  )
100
 
101
- inpaint_predict = gr.Button(value='Generator')
102
 
103
-
104
  with gr.Column():
105
  output_image = gr.Gallery(label="Outputs")
106
-
107
  inpaint_predict.click(
108
  fn=stable_diffusion_inpaint,
109
  inputs=[
@@ -114,6 +104,5 @@ def stable_diffusion_inpaint_app():
114
  inpaint_guidance_scale,
115
  inpaint_num_inference_step,
116
  ],
117
- outputs=output_image
118
  )
119
-
 
 
 
 
1
  import gradio as gr
2
+ import torch
3
+ from diffusers import DDIMScheduler, DiffusionPipeline
4
 
5
  stable_inpiant_model_list = [
6
  "stabilityai/stable-diffusion-2-inpainting",
7
+ "runwayml/stable-diffusion-inpainting",
8
  ]
9
 
10
+ stable_prompt_list = ["a photo of a man.", "a photo of a girl."]
 
 
 
11
 
12
+ stable_negative_prompt_list = ["bad, ugly", "deformed"]
 
 
 
13
 
14
 
15
  def stable_diffusion_inpaint(
16
+ dict: str,
17
+ model_path: str,
18
+ prompt: str,
19
+ negative_prompt: str,
20
+ guidance_scale: int,
21
+ num_inference_step: int,
22
+ ):
23
 
24
  image = dict["image"].convert("RGB").resize((512, 512))
25
  mask_image = dict["mask"].convert("RGB").resize((512, 512))
 
28
  revision="fp16",
29
  torch_dtype=torch.float16,
30
  )
31
+ pipe.to("cuda")
32
  pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
33
  pipe.enable_xformers_memory_efficient_attention()
34
 
35
  output = pipe(
36
+ prompt=prompt,
37
+ image=image,
38
  mask_image=mask_image,
39
+ negative_prompt=negative_prompt,
40
+ num_inference_steps=num_inference_step,
41
+ guidance_scale=guidance_scale,
42
  ).images
43
 
44
  return output[0]
 
49
  with gr.Row():
50
  with gr.Column():
51
  inpaint_image_file = gr.Image(
52
+ source="upload",
53
+ tool="sketch",
54
+ elem_id="image_upload",
55
+ type="pil",
56
+ label="Upload",
57
  )
58
 
59
  inpaint_model_id = gr.Dropdown(
60
+ choices=stable_inpiant_model_list,
61
+ value=stable_inpiant_model_list[0],
62
+ label="Inpaint Model Id",
63
  )
64
 
65
  inpaint_prompt = gr.Textbox(
66
+ lines=1, value=stable_prompt_list[0], label="Prompt"
 
 
67
  )
68
 
69
  inpaint_negative_prompt = gr.Textbox(
70
+ lines=1,
71
+ value=stable_negative_prompt_list[0],
72
+ label="Negative Prompt",
73
  )
74
 
75
  with gr.Accordion("Advanced Options", open=False):
76
  inpaint_guidance_scale = gr.Slider(
77
+ minimum=0.1,
78
+ maximum=15,
79
+ step=0.1,
80
+ value=7.5,
81
+ label="Guidance Scale",
82
  )
83
 
84
  inpaint_num_inference_step = gr.Slider(
85
+ minimum=1,
86
+ maximum=100,
87
+ step=1,
88
+ value=50,
89
+ label="Num Inference Step",
90
  )
91
 
92
+ inpaint_predict = gr.Button(value="Generator")
93
 
 
94
  with gr.Column():
95
  output_image = gr.Gallery(label="Outputs")
96
+
97
  inpaint_predict.click(
98
  fn=stable_diffusion_inpaint,
99
  inputs=[
 
104
  inpaint_guidance_scale,
105
  inpaint_num_inference_step,
106
  ],
107
+ outputs=output_image,
108
  )
 
diffusion_webui/stable_diffusion/keras_txt2img.py CHANGED
@@ -1,8 +1,8 @@
 
 
1
  from huggingface_hub import from_pretrained_keras
2
  from keras_cv import models
3
  from tensorflow import keras
4
- import tensorflow as tf
5
- import gradio as gr
6
 
7
  keras_model_list = [
8
  "keras-dreambooth/keras_diffusion_lowpoly_world",
@@ -11,105 +11,128 @@ keras_model_list = [
11
  ]
12
 
13
  stable_prompt_list = [
14
- "a photo of lowpoly_world",
15
- "Flower vase inspired by pink floyd division bell"
16
- ]
 
 
17
 
18
- stable_negative_prompt_list = [
19
- "bad, ugly",
20
- "deformed"
21
- ]
22
 
23
  def keras_stable_diffusion(
24
- model_path:str,
25
- prompt:str,
26
- negative_prompt:str,
27
- guidance_scale:int,
28
- num_inference_step:int,
29
- height:int,
30
- width:int,
31
- ):
32
-
33
- with tf.device('/GPU:0'):
34
  keras.mixed_precision.set_global_policy("mixed_float16")
35
-
36
  sd_dreambooth_model = models.StableDiffusion(
37
- img_width=height,
38
- img_height=width
39
- )
40
-
41
  db_diffusion_model = from_pretrained_keras(model_path)
42
  sd_dreambooth_model._diffusion_model = db_diffusion_model
43
-
44
  generated_images = sd_dreambooth_model.text_to_image(
45
  prompt=prompt,
46
  negative_prompt=negative_prompt,
47
  num_steps=num_inference_step,
48
- unconditional_guidance_scale=guidance_scale
49
  )
50
 
51
  return generated_images
52
 
 
53
  def keras_stable_diffusion_app():
54
  with gr.Blocks():
55
  with gr.Row():
56
  with gr.Column():
57
  keras_text2image_model_path = gr.Dropdown(
58
- choices=keras_model_list,
59
- value=keras_model_list[0],
60
- label='Text-Image Model Id'
61
  )
62
 
63
  keras_text2image_prompt = gr.Textbox(
64
- lines=1,
65
- value=stable_prompt_list[0],
66
- label='Prompt'
67
  )
68
 
69
  keras_text2image_negative_prompt = gr.Textbox(
70
- lines=1,
71
- value=stable_negative_prompt_list[0],
72
- label='Negative Prompt'
73
  )
74
 
75
  with gr.Accordion("Advanced Options", open=False):
76
  keras_text2image_guidance_scale = gr.Slider(
77
- minimum=0.1,
78
- maximum=15,
79
- step=0.1,
80
- value=7.5,
81
- label='Guidance Scale'
82
  )
83
 
84
  keras_text2image_num_inference_step = gr.Slider(
85
- minimum=1,
86
- maximum=100,
87
- step=1,
88
- value=50,
89
- label='Num Inference Step'
90
  )
91
 
92
  keras_text2image_height = gr.Slider(
93
- minimum=128,
94
- maximum=1280,
95
- step=32,
96
- value=512,
97
- label='Image Height'
98
  )
99
 
100
  keras_text2image_width = gr.Slider(
101
- minimum=128,
102
- maximum=1280,
103
- step=32,
104
- value=512,
105
- label='Image Height'
106
  )
107
 
108
- keras_text2image_predict = gr.Button(value='Generator')
109
-
110
  with gr.Column():
111
- output_image = gr.Gallery(label='Output')
112
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
  keras_text2image_predict.click(
114
  fn=keras_stable_diffusion,
115
  inputs=[
@@ -119,7 +142,7 @@ def keras_stable_diffusion_app():
119
  keras_text2image_guidance_scale,
120
  keras_text2image_num_inference_step,
121
  keras_text2image_height,
122
- keras_text2image_width
123
  ],
124
- outputs=output_image
125
  )
 
1
+ import gradio as gr
2
+ import tensorflow as tf
3
  from huggingface_hub import from_pretrained_keras
4
  from keras_cv import models
5
  from tensorflow import keras
 
 
6
 
7
  keras_model_list = [
8
  "keras-dreambooth/keras_diffusion_lowpoly_world",
 
11
  ]
12
 
13
  stable_prompt_list = [
14
+ "a photo of lowpoly_world",
15
+ "Flower vase inspired by pink floyd division bell",
16
+ ]
17
+
18
+ stable_negative_prompt_list = ["bad, ugly", "deformed"]
19
 
 
 
 
 
20
 
21
  def keras_stable_diffusion(
22
+ model_path: str,
23
+ prompt: str,
24
+ negative_prompt: str,
25
+ guidance_scale: int,
26
+ num_inference_step: int,
27
+ height: int,
28
+ width: int,
29
+ ):
30
+
31
+ with tf.device("/GPU:0"):
32
  keras.mixed_precision.set_global_policy("mixed_float16")
33
+
34
  sd_dreambooth_model = models.StableDiffusion(
35
+ img_width=height, img_height=width
36
+ )
37
+
 
38
  db_diffusion_model = from_pretrained_keras(model_path)
39
  sd_dreambooth_model._diffusion_model = db_diffusion_model
40
+
41
  generated_images = sd_dreambooth_model.text_to_image(
42
  prompt=prompt,
43
  negative_prompt=negative_prompt,
44
  num_steps=num_inference_step,
45
+ unconditional_guidance_scale=guidance_scale,
46
  )
47
 
48
  return generated_images
49
 
50
+
51
  def keras_stable_diffusion_app():
52
  with gr.Blocks():
53
  with gr.Row():
54
  with gr.Column():
55
  keras_text2image_model_path = gr.Dropdown(
56
+ choices=keras_model_list,
57
+ value=keras_model_list[0],
58
+ label="Text-Image Model Id",
59
  )
60
 
61
  keras_text2image_prompt = gr.Textbox(
62
+ lines=1, value=stable_prompt_list[0], label="Prompt"
 
 
63
  )
64
 
65
  keras_text2image_negative_prompt = gr.Textbox(
66
+ lines=1,
67
+ value=stable_negative_prompt_list[0],
68
+ label="Negative Prompt",
69
  )
70
 
71
  with gr.Accordion("Advanced Options", open=False):
72
  keras_text2image_guidance_scale = gr.Slider(
73
+ minimum=0.1,
74
+ maximum=15,
75
+ step=0.1,
76
+ value=7.5,
77
+ label="Guidance Scale",
78
  )
79
 
80
  keras_text2image_num_inference_step = gr.Slider(
81
+ minimum=1,
82
+ maximum=100,
83
+ step=1,
84
+ value=50,
85
+ label="Num Inference Step",
86
  )
87
 
88
  keras_text2image_height = gr.Slider(
89
+ minimum=128,
90
+ maximum=1280,
91
+ step=32,
92
+ value=512,
93
+ label="Image Height",
94
  )
95
 
96
  keras_text2image_width = gr.Slider(
97
+ minimum=128,
98
+ maximum=1280,
99
+ step=32,
100
+ value=512,
101
+ label="Image Height",
102
  )
103
 
104
+ keras_text2image_predict = gr.Button(value="Generator")
105
+
106
  with gr.Column():
107
+ output_image = gr.Gallery(label="Output")
108
+
109
+ gr.Examples(
110
+ fn=keras_stable_diffusion,
111
+ inputs=[
112
+ keras_text2image_model_path,
113
+ keras_text2image_prompt,
114
+ keras_text2image_negative_prompt,
115
+ keras_text2image_guidance_scale,
116
+ keras_text2image_num_inference_step,
117
+ keras_text2image_height,
118
+ keras_text2image_width,
119
+ ],
120
+ outputs=[output_image],
121
+ examples=[
122
+ [
123
+ keras_model_list[0],
124
+ stable_prompt_list[0],
125
+ stable_negative_prompt_list[0],
126
+ 7.5,
127
+ 50,
128
+ 512,
129
+ 512,
130
+ ],
131
+ ],
132
+ label="Keras Stable Diffusion Example",
133
+ cache_examples=False,
134
+ )
135
+
136
  keras_text2image_predict.click(
137
  fn=keras_stable_diffusion,
138
  inputs=[
 
142
  keras_text2image_guidance_scale,
143
  keras_text2image_num_inference_step,
144
  keras_text2image_height,
145
+ keras_text2image_width,
146
  ],
147
+ outputs=output_image,
148
  )
diffusion_webui/stable_diffusion/text2img_app.py CHANGED
@@ -1,6 +1,6 @@
1
- from diffusers import StableDiffusionPipeline, DDIMScheduler
2
  import gradio as gr
3
  import torch
 
4
 
5
  stable_model_list = [
6
  "runwayml/stable-diffusion-v1-5",
@@ -10,34 +10,26 @@ stable_model_list = [
10
  "andite/anything-v4.0",
11
  "Lykon/DreamShaper",
12
  "nitrosocke/Nitro-Diffusion",
13
- "dreamlike-art/dreamlike-diffusion-1.0"
14
-
15
  ]
16
 
17
- stable_prompt_list = [
18
- "a photo of a man.",
19
- "a photo of a girl."
20
- ]
21
 
22
- stable_negative_prompt_list = [
23
- "bad, ugly",
24
- "deformed"
25
- ]
26
 
27
  def stable_diffusion_text2img(
28
- model_path:str,
29
- prompt:str,
30
- negative_prompt:str,
31
- guidance_scale:int,
32
- num_inference_step:int,
33
- height:int,
34
- width:int,
35
- ):
36
 
37
  pipe = StableDiffusionPipeline.from_pretrained(
38
- model_path,
39
- safety_checker=None,
40
- torch_dtype=torch.float16
41
  ).to("cuda")
42
 
43
  pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
@@ -54,67 +46,91 @@ def stable_diffusion_text2img(
54
 
55
  return images[0]
56
 
 
57
  def stable_diffusion_text2img_app():
58
  with gr.Blocks():
59
  with gr.Row():
60
  with gr.Column():
61
  text2image_model_path = gr.Dropdown(
62
- choices=stable_model_list,
63
- value=stable_model_list[0],
64
- label='Text-Image Model Id'
65
  )
66
 
67
  text2image_prompt = gr.Textbox(
68
- lines=1,
69
- value=stable_prompt_list[0],
70
- label='Prompt'
71
  )
72
 
73
  text2image_negative_prompt = gr.Textbox(
74
- lines=1,
75
- value=stable_negative_prompt_list[0],
76
- label='Negative Prompt'
77
  )
78
 
79
  with gr.Accordion("Advanced Options", open=False):
80
  text2image_guidance_scale = gr.Slider(
81
- minimum=0.1,
82
- maximum=15,
83
- step=0.1,
84
- value=7.5,
85
- label='Guidance Scale'
86
  )
87
 
88
  text2image_num_inference_step = gr.Slider(
89
- minimum=1,
90
- maximum=100,
91
- step=1,
92
- value=50,
93
- label='Num Inference Step'
94
  )
95
 
96
  text2image_height = gr.Slider(
97
- minimum=128,
98
- maximum=1280,
99
- step=32,
100
- value=512,
101
- label='Image Height'
102
  )
103
 
104
  text2image_width = gr.Slider(
105
- minimum=128,
106
- maximum=1280,
107
- step=32,
108
- value=768,
109
- label='Image Height'
110
  )
111
 
112
- text2image_predict = gr.Button(value='Generator')
113
-
114
  with gr.Column():
115
- output_image = gr.Image(label='Output')
116
-
117
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
  text2image_predict.click(
119
  fn=stable_diffusion_text2img,
120
  inputs=[
@@ -126,5 +142,5 @@ def stable_diffusion_text2img_app():
126
  text2image_height,
127
  text2image_width,
128
  ],
129
- outputs=output_image
130
  )
 
 
1
  import gradio as gr
2
  import torch
3
+ from diffusers import DDIMScheduler, StableDiffusionPipeline
4
 
5
  stable_model_list = [
6
  "runwayml/stable-diffusion-v1-5",
 
10
  "andite/anything-v4.0",
11
  "Lykon/DreamShaper",
12
  "nitrosocke/Nitro-Diffusion",
13
+ "dreamlike-art/dreamlike-diffusion-1.0",
 
14
  ]
15
 
16
+ stable_prompt_list = ["a photo of a man.", "a photo of a girl."]
17
+
18
+ stable_negative_prompt_list = ["bad, ugly", "deformed"]
 
19
 
 
 
 
 
20
 
21
  def stable_diffusion_text2img(
22
+ model_path: str,
23
+ prompt: str,
24
+ negative_prompt: str,
25
+ guidance_scale: int,
26
+ num_inference_step: int,
27
+ height: int,
28
+ width: int,
29
+ ):
30
 
31
  pipe = StableDiffusionPipeline.from_pretrained(
32
+ model_path, safety_checker=None, torch_dtype=torch.float16
 
 
33
  ).to("cuda")
34
 
35
  pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
 
46
 
47
  return images[0]
48
 
49
+
50
  def stable_diffusion_text2img_app():
51
  with gr.Blocks():
52
  with gr.Row():
53
  with gr.Column():
54
  text2image_model_path = gr.Dropdown(
55
+ choices=stable_model_list,
56
+ value=stable_model_list[0],
57
+ label="Text-Image Model Id",
58
  )
59
 
60
  text2image_prompt = gr.Textbox(
61
+ lines=1, value=stable_prompt_list[0], label="Prompt"
 
 
62
  )
63
 
64
  text2image_negative_prompt = gr.Textbox(
65
+ lines=1,
66
+ value=stable_negative_prompt_list[0],
67
+ label="Negative Prompt",
68
  )
69
 
70
  with gr.Accordion("Advanced Options", open=False):
71
  text2image_guidance_scale = gr.Slider(
72
+ minimum=0.1,
73
+ maximum=15,
74
+ step=0.1,
75
+ value=7.5,
76
+ label="Guidance Scale",
77
  )
78
 
79
  text2image_num_inference_step = gr.Slider(
80
+ minimum=1,
81
+ maximum=100,
82
+ step=1,
83
+ value=50,
84
+ label="Num Inference Step",
85
  )
86
 
87
  text2image_height = gr.Slider(
88
+ minimum=128,
89
+ maximum=1280,
90
+ step=32,
91
+ value=512,
92
+ label="Image Height",
93
  )
94
 
95
  text2image_width = gr.Slider(
96
+ minimum=128,
97
+ maximum=1280,
98
+ step=32,
99
+ value=768,
100
+ label="Image Width",
101
  )
102
 
103
+ text2image_predict = gr.Button(value="Generator")
104
+
105
  with gr.Column():
106
+ output_image = gr.Image(label="Output")
107
+
108
+ gr.Examples(
109
+ examples=[
110
+ [
111
+ stable_model_list[0],
112
+ stable_prompt_list[0],
113
+ stable_negative_prompt_list[0],
114
+ 7.5,
115
+ 50,
116
+ 512,
117
+ 768,
118
+ ]
119
+ ],
120
+ inputs=[
121
+ text2image_model_path,
122
+ text2image_prompt,
123
+ text2image_negative_prompt,
124
+ text2image_guidance_scale,
125
+ text2image_num_inference_step,
126
+ text2image_height,
127
+ text2image_width,
128
+ ],
129
+ outputs=[output_image],
130
+ cache_examples=False,
131
+ fn=stable_diffusion_text2img,
132
+ label="Text2Image Example",
133
+ )
134
  text2image_predict.click(
135
  fn=stable_diffusion_text2img,
136
  inputs=[
 
142
  text2image_height,
143
  text2image_width,
144
  ],
145
+ outputs=output_image,
146
  )
pyproject.toml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ [tool.black]
2
+ line-length = 80
3
+
4
+ [tool.isort]
5
+ line_length = 80
6
+ profile = "black"
script/code_formatter.sh ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ black . --config pyproject.toml
2
+ isort .