kadirnar commited on
Commit
216e3b6
1 Parent(s): 83a77b5

Upload 27 files

Browse files
diffusion_webui/__init__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "1.4.1"
 
1
+ __version__ = "1.6.0"
diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_canny.py ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import gradio as gr
3
+ import numpy as np
4
+ import torch
5
+ from diffusers import ControlNetModel, StableDiffusionControlNetPipeline
6
+ from PIL import Image
7
+
8
+ from diffusion_webui.utils.model_list import (
9
+ controlnet_canny_model_list,
10
+ stable_model_list,
11
+ )
12
+ from diffusion_webui.utils.scheduler_list import (
13
+ SCHEDULER_LIST,
14
+ get_scheduler_list,
15
+ )
16
+
17
+ # https://github.com/mikonvergence/ControlNetInpaint
18
+
19
+
20
+ class StableDiffusionControlNetInpaintCannyGenerator:
21
+ def __init__(self):
22
+ self.pipe = None
23
+
24
+ def load_model(self, stable_model_path, controlnet_model_path, scheduler):
25
+ if self.pipe is None:
26
+ controlnet = ControlNetModel.from_pretrained(
27
+ controlnet_model_path, torch_dtype=torch.float16
28
+ )
29
+ self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
30
+ pretrained_model_name_or_path=stable_model_path,
31
+ controlnet=controlnet,
32
+ safety_checker=None,
33
+ torch_dtype=torch.float16,
34
+ )
35
+
36
+ self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
37
+ self.pipe.to("cuda")
38
+ self.pipe.enable_xformers_memory_efficient_attention()
39
+
40
+ return self.pipe
41
+
42
+ def controlnet_canny_inpaint(
43
+ self,
44
+ image_path: str,
45
+ ):
46
+ image = image_path["image"].convert("RGB").resize((512, 512))
47
+ image = np.array(image)
48
+
49
+ image = cv2.Canny(image, 100, 200)
50
+ image = image[:, :, None]
51
+ image = np.concatenate([image, image, image], axis=2)
52
+ image = Image.fromarray(image)
53
+
54
+ return image
55
+
56
+ def generate_image(
57
+ self,
58
+ image_path: str,
59
+ stable_model_path: str,
60
+ controlnet_model_path: str,
61
+ prompt: str,
62
+ negative_prompt: str,
63
+ num_images_per_prompt: int,
64
+ guidance_scale: int,
65
+ num_inference_step: int,
66
+ controlnet_conditioning_scale: int,
67
+ scheduler: str,
68
+ seed_generator: int,
69
+ ):
70
+
71
+ image = self.controlnet_canny_inpaint(image_path=image_path)
72
+
73
+ pipe = self.load_model(
74
+ stable_model_path=stable_model_path,
75
+ controlnet_model_path=controlnet_model_path,
76
+ scheduler=scheduler,
77
+ )
78
+
79
+ if seed_generator == 0:
80
+ random_seed = torch.randint(0, 1000000, (1,))
81
+ generator = torch.manual_seed(random_seed)
82
+ else:
83
+ generator = torch.manual_seed(seed_generator)
84
+
85
+ output = pipe(
86
+ prompt=prompt,
87
+ image=image,
88
+ negative_prompt=negative_prompt,
89
+ num_images_per_prompt=num_images_per_prompt,
90
+ num_inference_steps=num_inference_step,
91
+ guidance_scale=guidance_scale,
92
+ controlnet_conditioning_scale=controlnet_conditioning_scale,
93
+ generator=generator,
94
+ ).images
95
+
96
+ return output
97
+
98
+ def app():
99
+ with gr.Blocks():
100
+ with gr.Row():
101
+ with gr.Column():
102
+ controlnet_canny_inpaint_image_file = gr.Image(
103
+ source="upload",
104
+ tool="sketch",
105
+ elem_id="image_upload",
106
+ type="pil",
107
+ label="Upload",
108
+ )
109
+
110
+ controlnet_canny_inpaint_prompt = gr.Textbox(
111
+ lines=1, placeholder="Prompt", show_label=False
112
+ )
113
+
114
+ controlnet_canny_inpaint_negative_prompt = gr.Textbox(
115
+ lines=1,
116
+ show_label=False,
117
+ placeholder="Negative Prompt",
118
+ )
119
+ with gr.Row():
120
+ with gr.Column():
121
+ controlnet_canny_inpaint_stable_model_id = (
122
+ gr.Dropdown(
123
+ choices=stable_model_list,
124
+ value=stable_model_list[0],
125
+ label="Stable Model Id",
126
+ )
127
+ )
128
+
129
+ controlnet_canny_inpaint_guidance_scale = gr.Slider(
130
+ minimum=0.1,
131
+ maximum=15,
132
+ step=0.1,
133
+ value=7.5,
134
+ label="Guidance Scale",
135
+ )
136
+
137
+ controlnet_canny_inpaint_num_inference_step = (
138
+ gr.Slider(
139
+ minimum=1,
140
+ maximum=100,
141
+ step=1,
142
+ value=50,
143
+ label="Num Inference Step",
144
+ )
145
+ )
146
+ controlnet_canny_inpaint_num_images_per_prompt = (
147
+ gr.Slider(
148
+ minimum=1,
149
+ maximum=10,
150
+ step=1,
151
+ value=1,
152
+ label="Number Of Images",
153
+ )
154
+ )
155
+ with gr.Row():
156
+ with gr.Column():
157
+ controlnet_canny_inpaint_model_id = gr.Dropdown(
158
+ choices=controlnet_canny_model_list,
159
+ value=controlnet_canny_model_list[0],
160
+ label="Controlnet Model Id",
161
+ )
162
+ controlnet_canny_inpaint_scheduler = (
163
+ gr.Dropdown(
164
+ choices=SCHEDULER_LIST,
165
+ value=SCHEDULER_LIST[0],
166
+ label="Scheduler",
167
+ )
168
+ )
169
+ controlnet_canny_inpaint_controlnet_conditioning_scale = gr.Slider(
170
+ minimum=0.1,
171
+ maximum=1.0,
172
+ step=0.1,
173
+ value=0.5,
174
+ label="Controlnet Conditioning Scale",
175
+ )
176
+
177
+ controlnet_canny_inpaint_seed_generator = (
178
+ gr.Slider(
179
+ minimum=0,
180
+ maximum=1000000,
181
+ step=1,
182
+ value=0,
183
+ label="Seed Generator",
184
+ )
185
+ )
186
+
187
+ controlnet_canny_inpaint_predict = gr.Button(
188
+ value="Generator"
189
+ )
190
+
191
+ with gr.Column():
192
+ output_image = gr.Gallery(
193
+ label="Generated images",
194
+ show_label=False,
195
+ elem_id="gallery",
196
+ ).style(grid=(1, 2))
197
+
198
+ controlnet_canny_inpaint_predict.click(
199
+ fn=StableDiffusionControlNetInpaintCannyGenerator().generate_image,
200
+ inputs=[
201
+ controlnet_canny_inpaint_image_file,
202
+ controlnet_canny_inpaint_stable_model_id,
203
+ controlnet_canny_inpaint_model_id,
204
+ controlnet_canny_inpaint_prompt,
205
+ controlnet_canny_inpaint_negative_prompt,
206
+ controlnet_canny_inpaint_num_images_per_prompt,
207
+ controlnet_canny_inpaint_guidance_scale,
208
+ controlnet_canny_inpaint_num_inference_step,
209
+ controlnet_canny_inpaint_controlnet_conditioning_scale,
210
+ controlnet_canny_inpaint_scheduler,
211
+ controlnet_canny_inpaint_seed_generator,
212
+ ],
213
+ outputs=[output_image],
214
+ )
diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_depth.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import torch
4
+ from diffusers import ControlNetModel, StableDiffusionControlNetPipeline
5
+ from PIL import Image
6
+ from transformers import pipeline
7
+
8
+ from diffusion_webui.utils.model_list import (
9
+ controlnet_depth_model_list,
10
+ stable_model_list,
11
+ )
12
+ from diffusion_webui.utils.scheduler_list import (
13
+ SCHEDULER_LIST,
14
+ get_scheduler_list,
15
+ )
16
+
17
+ # https://github.com/mikonvergence/ControlNetInpaint
18
+
19
+
20
+ class StableDiffusionControlInpaintNetDepthGenerator:
21
+ def __init__(self):
22
+ self.pipe = None
23
+
24
+ def load_model(self, stable_model_path, controlnet_model_path, scheduler):
25
+ if self.pipe is None:
26
+ controlnet = ControlNetModel.from_pretrained(
27
+ controlnet_model_path, torch_dtype=torch.float16
28
+ )
29
+ self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
30
+ pretrained_model_name_or_path=stable_model_path,
31
+ controlnet=controlnet,
32
+ safety_checker=None,
33
+ torch_dtype=torch.float16,
34
+ )
35
+
36
+ self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
37
+ self.pipe.to("cuda")
38
+ self.pipe.enable_xformers_memory_efficient_attention()
39
+
40
+ return self.pipe
41
+
42
+ def controlnet_inpaint_depth(self, image_path: str):
43
+ depth_estimator = pipeline("depth-estimation")
44
+ image = image_path["image"].convert("RGB").resize((512, 512))
45
+ image = depth_estimator(image)["depth"]
46
+ image = np.array(image)
47
+ image = image[:, :, None]
48
+ image = np.concatenate([image, image, image], axis=2)
49
+ image = Image.fromarray(image)
50
+
51
+ return image
52
+
53
+ def generate_image(
54
+ self,
55
+ image_path: str,
56
+ stable_model_path: str,
57
+ controlnet_model_path: str,
58
+ prompt: str,
59
+ negative_prompt: str,
60
+ num_images_per_prompt: int,
61
+ guidance_scale: int,
62
+ num_inference_step: int,
63
+ controlnet_conditioning_scale: int,
64
+ scheduler: str,
65
+ seed_generator: int,
66
+ ):
67
+
68
+ image = self.controlnet_inpaint_depth(image_path=image_path)
69
+
70
+ pipe = self.load_model(
71
+ stable_model_path=stable_model_path,
72
+ controlnet_model_path=controlnet_model_path,
73
+ scheduler=scheduler,
74
+ )
75
+
76
+ if seed_generator == 0:
77
+ random_seed = torch.randint(0, 1000000, (1,))
78
+ generator = torch.manual_seed(random_seed)
79
+ else:
80
+ generator = torch.manual_seed(seed_generator)
81
+
82
+ output = pipe(
83
+ prompt=prompt,
84
+ image=image,
85
+ negative_prompt=negative_prompt,
86
+ num_images_per_prompt=num_images_per_prompt,
87
+ num_inference_steps=num_inference_step,
88
+ guidance_scale=guidance_scale,
89
+ controlnet_conditioning_scale=controlnet_conditioning_scale,
90
+ generator=generator,
91
+ ).images
92
+
93
+ return output
94
+
95
+ def app():
96
+ with gr.Blocks():
97
+ with gr.Row():
98
+ with gr.Column():
99
+ controlnet_depth_inpaint_image_file = gr.Image(
100
+ source="upload",
101
+ tool="sketch",
102
+ elem_id="image_upload",
103
+ type="pil",
104
+ label="Upload",
105
+ )
106
+
107
+ controlnet_depth_inpaint_prompt = gr.Textbox(
108
+ lines=1, placeholder="Prompt", show_label=False
109
+ )
110
+
111
+ controlnet_depth_inpaint_negative_prompt = gr.Textbox(
112
+ lines=1,
113
+ show_label=False,
114
+ placeholder="Negative Prompt",
115
+ )
116
+ with gr.Row():
117
+ with gr.Column():
118
+ controlnet_depth_inpaint_stable_model_id = (
119
+ gr.Dropdown(
120
+ choices=stable_model_list,
121
+ value=stable_model_list[0],
122
+ label="Stable Model Id",
123
+ )
124
+ )
125
+
126
+ controlnet_depth_inpaint_guidance_scale = gr.Slider(
127
+ minimum=0.1,
128
+ maximum=15,
129
+ step=0.1,
130
+ value=7.5,
131
+ label="Guidance Scale",
132
+ )
133
+
134
+ controlnet_depth_inpaint_num_inference_step = (
135
+ gr.Slider(
136
+ minimum=1,
137
+ maximum=100,
138
+ step=1,
139
+ value=50,
140
+ label="Num Inference Step",
141
+ )
142
+ )
143
+ controlnet_depth_inpaint_num_images_per_prompt = (
144
+ gr.Slider(
145
+ minimum=1,
146
+ maximum=10,
147
+ step=1,
148
+ value=1,
149
+ label="Number Of Images",
150
+ )
151
+ )
152
+ with gr.Row():
153
+ with gr.Column():
154
+ controlnet_depth_inpaint_model_id = gr.Dropdown(
155
+ choices=controlnet_depth_model_list,
156
+ value=controlnet_depth_model_list[0],
157
+ label="Controlnet Model Id",
158
+ )
159
+ controlnet_depth_inpaint_scheduler = (
160
+ gr.Dropdown(
161
+ choices=SCHEDULER_LIST,
162
+ value=SCHEDULER_LIST[0],
163
+ label="Scheduler",
164
+ )
165
+ )
166
+ controlnet_depth_inpaint_controlnet_conditioning_scale = gr.Slider(
167
+ minimum=0.1,
168
+ maximum=1.0,
169
+ step=0.1,
170
+ value=0.5,
171
+ label="Controlnet Conditioning Scale",
172
+ )
173
+
174
+ controlnet_depth_inpaint_seed_generator = (
175
+ gr.Slider(
176
+ minimum=0,
177
+ maximum=1000000,
178
+ step=1,
179
+ value=0,
180
+ label="Seed Generator",
181
+ )
182
+ )
183
+
184
+ controlnet_depth_inpaint_predict = gr.Button(
185
+ value="Generator"
186
+ )
187
+
188
+ with gr.Column():
189
+ output_image = gr.Gallery(
190
+ label="Generated images",
191
+ show_label=False,
192
+ elem_id="gallery",
193
+ ).style(grid=(1, 2))
194
+
195
+ controlnet_depth_inpaint_predict.click(
196
+ fn=StableDiffusionControlInpaintNetDepthGenerator().generate_image,
197
+ inputs=[
198
+ controlnet_depth_inpaint_image_file,
199
+ controlnet_depth_inpaint_stable_model_id,
200
+ controlnet_depth_inpaint_model_id,
201
+ controlnet_depth_inpaint_prompt,
202
+ controlnet_depth_inpaint_negative_prompt,
203
+ controlnet_depth_inpaint_num_images_per_prompt,
204
+ controlnet_depth_inpaint_guidance_scale,
205
+ controlnet_depth_inpaint_num_inference_step,
206
+ controlnet_depth_inpaint_controlnet_conditioning_scale,
207
+ controlnet_depth_inpaint_scheduler,
208
+ controlnet_depth_inpaint_seed_generator,
209
+ ],
210
+ outputs=[output_image],
211
+ )
diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_hed.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import torch
4
+ from controlnet_aux import HEDdetector
5
+ from diffusers import ControlNetModel, StableDiffusionControlNetPipeline
6
+
7
+ from diffusion_webui.utils.model_list import (
8
+ controlnet_hed_model_list,
9
+ stable_model_list,
10
+ )
11
+ from diffusion_webui.utils.scheduler_list import (
12
+ SCHEDULER_LIST,
13
+ get_scheduler_list,
14
+ )
15
+
16
+ # https://github.com/mikonvergence/ControlNetInpaint
17
+
18
+
19
+ class StableDiffusionControlNetInpaintHedGenerator:
20
+ def __init__(self):
21
+ self.pipe = None
22
+
23
+ def load_model(self, stable_model_path, controlnet_model_path, scheduler):
24
+ if self.pipe is None:
25
+ controlnet = ControlNetModel.from_pretrained(
26
+ controlnet_model_path, torch_dtype=torch.float16
27
+ )
28
+ self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
29
+ pretrained_model_name_or_path=stable_model_path,
30
+ controlnet=controlnet,
31
+ safety_checker=None,
32
+ torch_dtype=torch.float16,
33
+ )
34
+
35
+ self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
36
+ self.pipe.to("cuda")
37
+ self.pipe.enable_xformers_memory_efficient_attention()
38
+
39
+ return self.pipe
40
+
41
+ def controlnet_inpaint_hed(self, image_path: str):
42
+ hed = HEDdetector.from_pretrained("lllyasviel/ControlNet")
43
+ image = image_path["image"].convert("RGB").resize((512, 512))
44
+ image = np.array(image)
45
+ image = hed(image)
46
+
47
+ return image
48
+
49
+ def generate_image(
50
+ self,
51
+ image_path: str,
52
+ stable_model_path: str,
53
+ controlnet_model_path: str,
54
+ prompt: str,
55
+ negative_prompt: str,
56
+ num_images_per_prompt: int,
57
+ guidance_scale: int,
58
+ num_inference_step: int,
59
+ controlnet_conditioning_scale: int,
60
+ scheduler: str,
61
+ seed_generator: int,
62
+ ):
63
+
64
+ image = self.controlnet_inpaint_hed(image_path=image_path)
65
+
66
+ pipe = self.load_model(
67
+ stable_model_path=stable_model_path,
68
+ controlnet_model_path=controlnet_model_path,
69
+ scheduler=scheduler,
70
+ )
71
+
72
+ if seed_generator == 0:
73
+ random_seed = torch.randint(0, 1000000, (1,))
74
+ generator = torch.manual_seed(random_seed)
75
+ else:
76
+ generator = torch.manual_seed(seed_generator)
77
+
78
+ output = pipe(
79
+ prompt=prompt,
80
+ image=image,
81
+ negative_prompt=negative_prompt,
82
+ num_images_per_prompt=num_images_per_prompt,
83
+ num_inference_steps=num_inference_step,
84
+ guidance_scale=guidance_scale,
85
+ controlnet_conditioning_scale=controlnet_conditioning_scale,
86
+ generator=generator,
87
+ ).images
88
+
89
+ return output
90
+
91
+ def app():
92
+ with gr.Blocks():
93
+ with gr.Row():
94
+ with gr.Column():
95
+ controlnet_hed_inpaint_image_file = gr.Image(
96
+ source="upload",
97
+ tool="sketch",
98
+ elem_id="image_upload",
99
+ type="pil",
100
+ label="Upload",
101
+ )
102
+
103
+ controlnet_hed_inpaint_prompt = gr.Textbox(
104
+ lines=1, placeholder="Prompt", show_label=False
105
+ )
106
+
107
+ controlnet_hed_inpaint_negative_prompt = gr.Textbox(
108
+ lines=1,
109
+ show_label=False,
110
+ placeholder="Negative Prompt",
111
+ )
112
+ with gr.Row():
113
+ with gr.Column():
114
+ controlnet_hed_inpaint_stable_model_id = (
115
+ gr.Dropdown(
116
+ choices=stable_model_list,
117
+ value=stable_model_list[0],
118
+ label="Stable Model Id",
119
+ )
120
+ )
121
+
122
+ controlnet_hed_inpaint_guidance_scale = gr.Slider(
123
+ minimum=0.1,
124
+ maximum=15,
125
+ step=0.1,
126
+ value=7.5,
127
+ label="Guidance Scale",
128
+ )
129
+
130
+ controlnet_hed_inpaint_num_inference_step = (
131
+ gr.Slider(
132
+ minimum=1,
133
+ maximum=100,
134
+ step=1,
135
+ value=50,
136
+ label="Num Inference Step",
137
+ )
138
+ )
139
+ controlnet_hed_inpaint_num_images_per_prompt = (
140
+ gr.Slider(
141
+ minimum=1,
142
+ maximum=10,
143
+ step=1,
144
+ value=1,
145
+ label="Number Of Images",
146
+ )
147
+ )
148
+ with gr.Row():
149
+ with gr.Column():
150
+ controlnet_hed_inpaint_model_id = gr.Dropdown(
151
+ choices=controlnet_hed_model_list,
152
+ value=controlnet_hed_model_list[0],
153
+ label="Controlnet Model Id",
154
+ )
155
+ controlnet_hed_inpaint_scheduler = gr.Dropdown(
156
+ choices=SCHEDULER_LIST,
157
+ value=SCHEDULER_LIST[0],
158
+ label="Scheduler",
159
+ )
160
+ controlnet_hed_inpaint_controlnet_conditioning_scale = gr.Slider(
161
+ minimum=0.1,
162
+ maximum=1.0,
163
+ step=0.1,
164
+ value=0.5,
165
+ label="Controlnet Conditioning Scale",
166
+ )
167
+
168
+ controlnet_hed_inpaint_seed_generator = (
169
+ gr.Slider(
170
+ minimum=0,
171
+ maximum=1000000,
172
+ step=1,
173
+ value=0,
174
+ label="Seed Generator",
175
+ )
176
+ )
177
+
178
+ controlnet_hed_inpaint_predict = gr.Button(
179
+ value="Generator"
180
+ )
181
+
182
+ with gr.Column():
183
+ output_image = gr.Gallery(
184
+ label="Generated images",
185
+ show_label=False,
186
+ elem_id="gallery",
187
+ ).style(grid=(1, 2))
188
+
189
+ controlnet_hed_inpaint_predict.click(
190
+ fn=StableDiffusionControlNetInpaintHedGenerator().generate_image,
191
+ inputs=[
192
+ controlnet_hed_inpaint_image_file,
193
+ controlnet_hed_inpaint_stable_model_id,
194
+ controlnet_hed_inpaint_model_id,
195
+ controlnet_hed_inpaint_prompt,
196
+ controlnet_hed_inpaint_negative_prompt,
197
+ controlnet_hed_inpaint_num_images_per_prompt,
198
+ controlnet_hed_inpaint_guidance_scale,
199
+ controlnet_hed_inpaint_num_inference_step,
200
+ controlnet_hed_inpaint_controlnet_conditioning_scale,
201
+ controlnet_hed_inpaint_scheduler,
202
+ controlnet_hed_inpaint_seed_generator,
203
+ ],
204
+ outputs=[output_image],
205
+ )
diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_mlsd.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import torch
4
+ from controlnet_aux import MLSDdetector
5
+ from diffusers import ControlNetModel, StableDiffusionControlNetPipeline
6
+
7
+ from diffusion_webui.utils.model_list import (
8
+ controlnet_mlsd_model_list,
9
+ stable_model_list,
10
+ )
11
+ from diffusion_webui.utils.scheduler_list import (
12
+ SCHEDULER_LIST,
13
+ get_scheduler_list,
14
+ )
15
+
16
+ # https://github.com/mikonvergence/ControlNetInpaint
17
+
18
+
19
+ class StableDiffusionControlNetInpaintMlsdGenerator:
20
+ def __init__(self):
21
+ self.pipe = None
22
+
23
+ def load_model(self, stable_model_path, controlnet_model_path, scheduler):
24
+ if self.pipe is None:
25
+ controlnet = ControlNetModel.from_pretrained(
26
+ controlnet_model_path, torch_dtype=torch.float16
27
+ )
28
+ self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
29
+ pretrained_model_name_or_path=stable_model_path,
30
+ controlnet=controlnet,
31
+ safety_checker=None,
32
+ torch_dtype=torch.float16,
33
+ )
34
+
35
+ self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
36
+ self.pipe.to("cuda")
37
+ self.pipe.enable_xformers_memory_efficient_attention()
38
+
39
+ return self.pipe
40
+
41
+ def controlnet_inpaint_mlsd(self, image_path: str):
42
+ mlsd = MLSDdetector.from_pretrained("lllyasviel/ControlNet")
43
+ image = image_path["image"].convert("RGB").resize((512, 512))
44
+ image = np.array(image)
45
+ image = mlsd(image)
46
+
47
+ return image
48
+
49
+ def generate_image(
50
+ self,
51
+ image_path: str,
52
+ stable_model_path: str,
53
+ controlnet_model_path: str,
54
+ prompt: str,
55
+ negative_prompt: str,
56
+ num_images_per_prompt: int,
57
+ guidance_scale: int,
58
+ num_inference_step: int,
59
+ controlnet_conditioning_scale: int,
60
+ scheduler: str,
61
+ seed_generator: int,
62
+ ):
63
+
64
+ image = self.controlnet_inpaint_mlsd(image_path=image_path)
65
+
66
+ pipe = self.load_model(
67
+ stable_model_path=stable_model_path,
68
+ controlnet_model_path=controlnet_model_path,
69
+ scheduler=scheduler,
70
+ )
71
+
72
+ if seed_generator == 0:
73
+ random_seed = torch.randint(0, 1000000, (1,))
74
+ generator = torch.manual_seed(random_seed)
75
+ else:
76
+ generator = torch.manual_seed(seed_generator)
77
+
78
+ output = pipe(
79
+ prompt=prompt,
80
+ image=image,
81
+ negative_prompt=negative_prompt,
82
+ num_images_per_prompt=num_images_per_prompt,
83
+ num_inference_steps=num_inference_step,
84
+ guidance_scale=guidance_scale,
85
+ controlnet_conditioning_scale=controlnet_conditioning_scale,
86
+ generator=generator,
87
+ ).images
88
+
89
+ return output
90
+
91
+ def app():
92
+ with gr.Blocks():
93
+ with gr.Row():
94
+ with gr.Column():
95
+ controlnet_mlsd_inpaint_image_file = gr.Image(
96
+ source="upload",
97
+ tool="sketch",
98
+ elem_id="image_upload",
99
+ type="pil",
100
+ label="Upload",
101
+ )
102
+
103
+ controlnet_mlsd_inpaint_prompt = gr.Textbox(
104
+ lines=1, placeholder="Prompt", show_label=False
105
+ )
106
+
107
+ controlnet_mlsd_inpaint_negative_prompt = gr.Textbox(
108
+ lines=1,
109
+ show_label=False,
110
+ placeholder="Negative Prompt",
111
+ )
112
+ with gr.Row():
113
+ with gr.Column():
114
+ controlnet_mlsd_inpaint_stable_model_id = (
115
+ gr.Dropdown(
116
+ choices=stable_model_list,
117
+ value=stable_model_list[0],
118
+ label="Stable Model Id",
119
+ )
120
+ )
121
+
122
+ controlnet_mlsd_inpaint_guidance_scale = gr.Slider(
123
+ minimum=0.1,
124
+ maximum=15,
125
+ step=0.1,
126
+ value=7.5,
127
+ label="Guidance Scale",
128
+ )
129
+
130
+ controlnet_mlsd_inpaint_num_inference_step = (
131
+ gr.Slider(
132
+ minimum=1,
133
+ maximum=100,
134
+ step=1,
135
+ value=50,
136
+ label="Num Inference Step",
137
+ )
138
+ )
139
+ controlnet_mlsd_inpaint_num_images_per_prompt = (
140
+ gr.Slider(
141
+ minimum=1,
142
+ maximum=10,
143
+ step=1,
144
+ value=1,
145
+ label="Number Of Images",
146
+ )
147
+ )
148
+ with gr.Row():
149
+ with gr.Column():
150
+ controlnet_mlsd_inpaint_model_id = gr.Dropdown(
151
+ choices=controlnet_mlsd_model_list,
152
+ value=controlnet_mlsd_model_list[0],
153
+ label="Controlnet Model Id",
154
+ )
155
+ controlnet_mlsd_inpaint_scheduler = gr.Dropdown(
156
+ choices=SCHEDULER_LIST,
157
+ value=SCHEDULER_LIST[0],
158
+ label="Scheduler",
159
+ )
160
+ controlnet_mlsd_inpaint_controlnet_conditioning_scale = gr.Slider(
161
+ minimum=0.1,
162
+ maximum=1.0,
163
+ step=0.1,
164
+ value=0.5,
165
+ label="Controlnet Conditioning Scale",
166
+ )
167
+
168
+ controlnet_mlsd_inpaint_seed_generator = (
169
+ gr.Slider(
170
+ minimum=0,
171
+ maximum=1000000,
172
+ step=1,
173
+ value=0,
174
+ label="Seed Generator",
175
+ )
176
+ )
177
+
178
+ controlnet_mlsd_inpaint_predict = gr.Button(
179
+ value="Generator"
180
+ )
181
+
182
+ with gr.Column():
183
+ output_image = gr.Gallery(
184
+ label="Generated images",
185
+ show_label=False,
186
+ elem_id="gallery",
187
+ ).style(grid=(1, 2))
188
+
189
+ controlnet_mlsd_inpaint_predict.click(
190
+ fn=StableDiffusionControlNetInpaintMlsdGenerator().generate_image,
191
+ inputs=[
192
+ controlnet_mlsd_inpaint_image_file,
193
+ controlnet_mlsd_inpaint_stable_model_id,
194
+ controlnet_mlsd_inpaint_model_id,
195
+ controlnet_mlsd_inpaint_prompt,
196
+ controlnet_mlsd_inpaint_negative_prompt,
197
+ controlnet_mlsd_inpaint_num_images_per_prompt,
198
+ controlnet_mlsd_inpaint_guidance_scale,
199
+ controlnet_mlsd_inpaint_num_inference_step,
200
+ controlnet_mlsd_inpaint_controlnet_conditioning_scale,
201
+ controlnet_mlsd_inpaint_scheduler,
202
+ controlnet_mlsd_inpaint_seed_generator,
203
+ ],
204
+ outputs=[output_image],
205
+ )
diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_pose.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import torch
4
+ from controlnet_aux import OpenposeDetector
5
+ from diffusers import ControlNetModel, StableDiffusionControlNetPipeline
6
+
7
+ from diffusion_webui.utils.model_list import (
8
+ controlnet_pose_model_list,
9
+ stable_model_list,
10
+ )
11
+ from diffusion_webui.utils.scheduler_list import (
12
+ SCHEDULER_LIST,
13
+ get_scheduler_list,
14
+ )
15
+
16
+ # https://github.com/mikonvergence/ControlNetInpaint
17
+
18
+
19
+ class StableDiffusionControlNetInpaintPoseGenerator:
20
+ def __init__(self):
21
+ self.pipe = None
22
+
23
+ def load_model(self, stable_model_path, controlnet_model_path, scheduler):
24
+ if self.pipe is None:
25
+ controlnet = ControlNetModel.from_pretrained(
26
+ controlnet_model_path, torch_dtype=torch.float16
27
+ )
28
+
29
+ self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
30
+ pretrained_model_name_or_path=stable_model_path,
31
+ controlnet=controlnet,
32
+ safety_checker=None,
33
+ torch_dtype=torch.float16,
34
+ )
35
+
36
+ self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
37
+ self.pipe.to("cuda")
38
+ self.pipe.enable_xformers_memory_efficient_attention()
39
+
40
+ return self.pipe
41
+
42
+ def controlnet_pose_inpaint(self, image_path: str):
43
+ openpose = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
44
+
45
+ image = image_path["image"].convert("RGB").resize((512, 512))
46
+ image = np.array(image)
47
+ image = openpose(image)
48
+
49
+ return image
50
+
51
+ def generate_image(
52
+ self,
53
+ image_path: str,
54
+ stable_model_path: str,
55
+ controlnet_model_path: str,
56
+ prompt: str,
57
+ negative_prompt: str,
58
+ num_images_per_prompt: int,
59
+ guidance_scale: int,
60
+ num_inference_step: int,
61
+ controlnet_conditioning_scale: int,
62
+ scheduler: str,
63
+ seed_generator: int,
64
+ ):
65
+
66
+ image = self.controlnet_pose_inpaint(image_path=image_path)
67
+
68
+ pipe = self.load_model(
69
+ stable_model_path=stable_model_path,
70
+ controlnet_model_path=controlnet_model_path,
71
+ scheduler=scheduler,
72
+ )
73
+
74
+ if seed_generator == 0:
75
+ random_seed = torch.randint(0, 1000000, (1,))
76
+ generator = torch.manual_seed(random_seed)
77
+ else:
78
+ generator = torch.manual_seed(seed_generator)
79
+
80
+ output = pipe(
81
+ prompt=prompt,
82
+ image=image,
83
+ negative_prompt=negative_prompt,
84
+ num_images_per_prompt=num_images_per_prompt,
85
+ num_inference_steps=num_inference_step,
86
+ guidance_scale=guidance_scale,
87
+ controlnet_conditioning_scale=controlnet_conditioning_scale,
88
+ generator=generator,
89
+ ).images
90
+
91
+ return output
92
+
93
+ def app():
94
+ with gr.Blocks():
95
+ with gr.Row():
96
+ with gr.Column():
97
+ controlnet_pose_inpaint_image_file = gr.Image(
98
+ source="upload",
99
+ tool="sketch",
100
+ elem_id="image_upload",
101
+ type="pil",
102
+ label="Upload",
103
+ )
104
+
105
+ controlnet_pose_inpaint_prompt = gr.Textbox(
106
+ lines=1, placeholder="Prompt", show_label=False
107
+ )
108
+
109
+ controlnet_pose_inpaint_negative_prompt = gr.Textbox(
110
+ lines=1,
111
+ show_label=False,
112
+ placeholder="Negative Prompt",
113
+ )
114
+ with gr.Row():
115
+ with gr.Column():
116
+ controlnet_pose_inpaint_stable_model_id = (
117
+ gr.Dropdown(
118
+ choices=stable_model_list,
119
+ value=stable_model_list[0],
120
+ label="Stable Model Id",
121
+ )
122
+ )
123
+
124
+ controlnet_pose_inpaint_guidance_scale = gr.Slider(
125
+ minimum=0.1,
126
+ maximum=15,
127
+ step=0.1,
128
+ value=7.5,
129
+ label="Guidance Scale",
130
+ )
131
+
132
+ controlnet_pose_inpaint_num_inference_step = (
133
+ gr.Slider(
134
+ minimum=1,
135
+ maximum=100,
136
+ step=1,
137
+ value=50,
138
+ label="Num Inference Step",
139
+ )
140
+ )
141
+ controlnet_pose_inpaint_num_images_per_prompt = (
142
+ gr.Slider(
143
+ minimum=1,
144
+ maximum=10,
145
+ step=1,
146
+ value=1,
147
+ label="Number Of Images",
148
+ )
149
+ )
150
+ with gr.Row():
151
+ with gr.Column():
152
+ controlnet_pose_inpaint_model_id = gr.Dropdown(
153
+ choices=controlnet_pose_model_list,
154
+ value=controlnet_pose_model_list[0],
155
+ label="Controlnet Model Id",
156
+ )
157
+ controlnet_pose_inpaint_scheduler = gr.Dropdown(
158
+ choices=SCHEDULER_LIST,
159
+ value=SCHEDULER_LIST[0],
160
+ label="Scheduler",
161
+ )
162
+ controlnet_pose_inpaint_controlnet_conditioning_scale = gr.Slider(
163
+ minimum=0.1,
164
+ maximum=1.0,
165
+ step=0.1,
166
+ value=0.5,
167
+ label="Controlnet Conditioning Scale",
168
+ )
169
+
170
+ controlnet_pose_inpaint_seed_generator = (
171
+ gr.Slider(
172
+ minimum=0,
173
+ maximum=1000000,
174
+ step=1,
175
+ value=0,
176
+ label="Seed Generator",
177
+ )
178
+ )
179
+
180
+ controlnet_pose_inpaint_predict = gr.Button(
181
+ value="Generator"
182
+ )
183
+
184
+ with gr.Column():
185
+ output_image = gr.Gallery(
186
+ label="Generated images",
187
+ show_label=False,
188
+ elem_id="gallery",
189
+ ).style(grid=(1, 2))
190
+
191
+ controlnet_pose_inpaint_predict.click(
192
+ fn=StableDiffusionControlNetInpaintPoseGenerator().generate_image,
193
+ inputs=[
194
+ controlnet_pose_inpaint_image_file,
195
+ controlnet_pose_inpaint_stable_model_id,
196
+ controlnet_pose_inpaint_model_id,
197
+ controlnet_pose_inpaint_prompt,
198
+ controlnet_pose_inpaint_negative_prompt,
199
+ controlnet_pose_inpaint_num_images_per_prompt,
200
+ controlnet_pose_inpaint_guidance_scale,
201
+ controlnet_pose_inpaint_num_inference_step,
202
+ controlnet_pose_inpaint_controlnet_conditioning_scale,
203
+ controlnet_pose_inpaint_scheduler,
204
+ controlnet_pose_inpaint_seed_generator,
205
+ ],
206
+ outputs=[output_image],
207
+ )
diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_scribble.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import torch
4
+ from controlnet_aux import HEDdetector
5
+ from diffusers import ControlNetModel, StableDiffusionControlNetPipeline
6
+
7
+ from diffusion_webui.utils.model_list import (
8
+ controlnet_scribble_model_list,
9
+ stable_model_list,
10
+ )
11
+ from diffusion_webui.utils.scheduler_list import (
12
+ SCHEDULER_LIST,
13
+ get_scheduler_list,
14
+ )
15
+
16
+ # https://github.com/mikonvergence/ControlNetInpaint
17
+
18
+ class StableDiffusionControlNetInpaintScribbleGenerator:
19
+ def __init__(self):
20
+ self.pipe = None
21
+
22
+ def load_model(self, stable_model_path, controlnet_model_path, scheduler):
23
+ if self.pipe is None:
24
+ controlnet = ControlNetModel.from_pretrained(
25
+ controlnet_model_path, torch_dtype=torch.float16
26
+ )
27
+
28
+ self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
29
+ pretrained_model_name_or_path=stable_model_path,
30
+ controlnet=controlnet,
31
+ safety_checker=None,
32
+ torch_dtype=torch.float16,
33
+ )
34
+
35
+ self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
36
+ self.pipe.to("cuda")
37
+ self.pipe.enable_xformers_memory_efficient_attention()
38
+
39
+ return self.pipe
40
+
41
+ def controlnet_inpaint_scribble(self, image_path: str):
42
+ hed = HEDdetector.from_pretrained("lllyasviel/ControlNet")
43
+
44
+ image = image_path["image"].convert("RGB").resize((512, 512))
45
+ image = np.array(image)
46
+ image = hed(image, scribble=True)
47
+
48
+ return image
49
+
50
+ def generate_image(
51
+ self,
52
+ image_path: str,
53
+ stable_model_path: str,
54
+ controlnet_model_path: str,
55
+ prompt: str,
56
+ negative_prompt: str,
57
+ num_images_per_prompt: int,
58
+ guidance_scale: int,
59
+ num_inference_step: int,
60
+ controlnet_conditioning_scale: int,
61
+ scheduler: str,
62
+ seed_generator: int,
63
+ ):
64
+
65
+ image = self.controlnet_inpaint_scribble(image_path=image_path)
66
+
67
+ pipe = self.load_model(
68
+ stable_model_path=stable_model_path,
69
+ controlnet_model_path=controlnet_model_path,
70
+ scheduler=scheduler,
71
+ )
72
+
73
+ if seed_generator == 0:
74
+ random_seed = torch.randint(0, 1000000, (1,))
75
+ generator = torch.manual_seed(random_seed)
76
+ else:
77
+ generator = torch.manual_seed(seed_generator)
78
+
79
+ output = pipe(
80
+ prompt=prompt,
81
+ image=image,
82
+ negative_prompt=negative_prompt,
83
+ num_images_per_prompt=num_images_per_prompt,
84
+ num_inference_steps=num_inference_step,
85
+ guidance_scale=guidance_scale,
86
+ controlnet_conditioning_scale=controlnet_conditioning_scale,
87
+ generator=generator,
88
+ ).images
89
+
90
+ return output
91
+
92
+ def app():
93
+ with gr.Blocks():
94
+ with gr.Row():
95
+ with gr.Column():
96
+ controlnet_scribble_inpaint_image_file = gr.Image(
97
+ source="upload",
98
+ tool="sketch",
99
+ elem_id="image_upload",
100
+ type="pil",
101
+ label="Upload",
102
+ )
103
+
104
+ controlnet_scribble_inpaint_prompt = gr.Textbox(
105
+ lines=1, placeholder="Prompt", show_label=False
106
+ )
107
+
108
+ controlnet_scribble_inpaint_negative_prompt = gr.Textbox(
109
+ lines=1,
110
+ show_label=False,
111
+ placeholder="Negative Prompt",
112
+ )
113
+ with gr.Row():
114
+ with gr.Column():
115
+ controlnet_scribble_inpaint_stable_model_id = (
116
+ gr.Dropdown(
117
+ choices=stable_model_list,
118
+ value=stable_model_list[0],
119
+ label="Stable Model Id",
120
+ )
121
+ )
122
+
123
+ controlnet_scribble_inpaint_guidance_scale = (
124
+ gr.Slider(
125
+ minimum=0.1,
126
+ maximum=15,
127
+ step=0.1,
128
+ value=7.5,
129
+ label="Guidance Scale",
130
+ )
131
+ )
132
+
133
+ controlnet_scribble_inpaint_num_inference_step = (
134
+ gr.Slider(
135
+ minimum=1,
136
+ maximum=100,
137
+ step=1,
138
+ value=50,
139
+ label="Num Inference Step",
140
+ )
141
+ )
142
+ controlnet_scribble_inpaint_num_images_per_prompt = gr.Slider(
143
+ minimum=1,
144
+ maximum=10,
145
+ step=1,
146
+ value=1,
147
+ label="Number Of Images",
148
+ )
149
+ with gr.Row():
150
+ with gr.Column():
151
+ controlnet_scribble_inpaint_model_id = (
152
+ gr.Dropdown(
153
+ choices=controlnet_scribble_model_list,
154
+ value=controlnet_scribble_model_list[0],
155
+ label="Controlnet Model Id",
156
+ )
157
+ )
158
+ controlnet_scribble_inpaint_scheduler = (
159
+ gr.Dropdown(
160
+ choices=SCHEDULER_LIST,
161
+ value=SCHEDULER_LIST[0],
162
+ label="Scheduler",
163
+ )
164
+ )
165
+ controlnet_scribble_inpaint_controlnet_conditioning_scale = gr.Slider(
166
+ minimum=0.1,
167
+ maximum=1.0,
168
+ step=0.1,
169
+ value=0.5,
170
+ label="Controlnet Conditioning Scale",
171
+ )
172
+
173
+ controlnet_scribble_inpaint_seed_generator = (
174
+ gr.Slider(
175
+ minimum=0,
176
+ maximum=1000000,
177
+ step=1,
178
+ value=0,
179
+ label="Seed Generator",
180
+ )
181
+ )
182
+
183
+ controlnet_scribble_inpaint_predict = gr.Button(
184
+ value="Generator"
185
+ )
186
+
187
+ with gr.Column():
188
+ output_image = gr.Gallery(
189
+ label="Generated images",
190
+ show_label=False,
191
+ elem_id="gallery",
192
+ ).style(grid=(1, 2))
193
+
194
+ controlnet_scribble_inpaint_predict.click(
195
+ fn=StableDiffusionControlNetInpaintScribbleGenerator().generate_image,
196
+ inputs=[
197
+ controlnet_scribble_inpaint_image_file,
198
+ controlnet_scribble_inpaint_stable_model_id,
199
+ controlnet_scribble_inpaint_model_id,
200
+ controlnet_scribble_inpaint_prompt,
201
+ controlnet_scribble_inpaint_negative_prompt,
202
+ controlnet_scribble_inpaint_num_images_per_prompt,
203
+ controlnet_scribble_inpaint_guidance_scale,
204
+ controlnet_scribble_inpaint_num_inference_step,
205
+ controlnet_scribble_inpaint_controlnet_conditioning_scale,
206
+ controlnet_scribble_inpaint_scheduler,
207
+ controlnet_scribble_inpaint_seed_generator,
208
+ ],
209
+ outputs=[output_image],
210
+ )
diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_seg.py ADDED
@@ -0,0 +1,390 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import torch
4
+ from diffusers import ControlNetModel, StableDiffusionControlNetPipeline
5
+ from PIL import Image
6
+ from transformers import AutoImageProcessor, UperNetForSemanticSegmentation
7
+
8
+ from diffusion_webui.utils.model_list import (
9
+ controlnet_seg_model_list,
10
+ stable_model_list,
11
+ )
12
+ from diffusion_webui.utils.scheduler_list import (
13
+ SCHEDULER_LIST,
14
+ get_scheduler_list,
15
+ )
16
+
17
+ # https://github.com/mikonvergence/ControlNetInpaint
18
+
19
+
20
+ def ade_palette():
21
+ """ADE20K palette that maps each class to RGB values."""
22
+ return [
23
+ [120, 120, 120],
24
+ [180, 120, 120],
25
+ [6, 230, 230],
26
+ [80, 50, 50],
27
+ [4, 200, 3],
28
+ [120, 120, 80],
29
+ [140, 140, 140],
30
+ [204, 5, 255],
31
+ [230, 230, 230],
32
+ [4, 250, 7],
33
+ [224, 5, 255],
34
+ [235, 255, 7],
35
+ [150, 5, 61],
36
+ [120, 120, 70],
37
+ [8, 255, 51],
38
+ [255, 6, 82],
39
+ [143, 255, 140],
40
+ [204, 255, 4],
41
+ [255, 51, 7],
42
+ [204, 70, 3],
43
+ [0, 102, 200],
44
+ [61, 230, 250],
45
+ [255, 6, 51],
46
+ [11, 102, 255],
47
+ [255, 7, 71],
48
+ [255, 9, 224],
49
+ [9, 7, 230],
50
+ [220, 220, 220],
51
+ [255, 9, 92],
52
+ [112, 9, 255],
53
+ [8, 255, 214],
54
+ [7, 255, 224],
55
+ [255, 184, 6],
56
+ [10, 255, 71],
57
+ [255, 41, 10],
58
+ [7, 255, 255],
59
+ [224, 255, 8],
60
+ [102, 8, 255],
61
+ [255, 61, 6],
62
+ [255, 194, 7],
63
+ [255, 122, 8],
64
+ [0, 255, 20],
65
+ [255, 8, 41],
66
+ [255, 5, 153],
67
+ [6, 51, 255],
68
+ [235, 12, 255],
69
+ [160, 150, 20],
70
+ [0, 163, 255],
71
+ [140, 140, 140],
72
+ [250, 10, 15],
73
+ [20, 255, 0],
74
+ [31, 255, 0],
75
+ [255, 31, 0],
76
+ [255, 224, 0],
77
+ [153, 255, 0],
78
+ [0, 0, 255],
79
+ [255, 71, 0],
80
+ [0, 235, 255],
81
+ [0, 173, 255],
82
+ [31, 0, 255],
83
+ [11, 200, 200],
84
+ [255, 82, 0],
85
+ [0, 255, 245],
86
+ [0, 61, 255],
87
+ [0, 255, 112],
88
+ [0, 255, 133],
89
+ [255, 0, 0],
90
+ [255, 163, 0],
91
+ [255, 102, 0],
92
+ [194, 255, 0],
93
+ [0, 143, 255],
94
+ [51, 255, 0],
95
+ [0, 82, 255],
96
+ [0, 255, 41],
97
+ [0, 255, 173],
98
+ [10, 0, 255],
99
+ [173, 255, 0],
100
+ [0, 255, 153],
101
+ [255, 92, 0],
102
+ [255, 0, 255],
103
+ [255, 0, 245],
104
+ [255, 0, 102],
105
+ [255, 173, 0],
106
+ [255, 0, 20],
107
+ [255, 184, 184],
108
+ [0, 31, 255],
109
+ [0, 255, 61],
110
+ [0, 71, 255],
111
+ [255, 0, 204],
112
+ [0, 255, 194],
113
+ [0, 255, 82],
114
+ [0, 10, 255],
115
+ [0, 112, 255],
116
+ [51, 0, 255],
117
+ [0, 194, 255],
118
+ [0, 122, 255],
119
+ [0, 255, 163],
120
+ [255, 153, 0],
121
+ [0, 255, 10],
122
+ [255, 112, 0],
123
+ [143, 255, 0],
124
+ [82, 0, 255],
125
+ [163, 255, 0],
126
+ [255, 235, 0],
127
+ [8, 184, 170],
128
+ [133, 0, 255],
129
+ [0, 255, 92],
130
+ [184, 0, 255],
131
+ [255, 0, 31],
132
+ [0, 184, 255],
133
+ [0, 214, 255],
134
+ [255, 0, 112],
135
+ [92, 255, 0],
136
+ [0, 224, 255],
137
+ [112, 224, 255],
138
+ [70, 184, 160],
139
+ [163, 0, 255],
140
+ [153, 0, 255],
141
+ [71, 255, 0],
142
+ [255, 0, 163],
143
+ [255, 204, 0],
144
+ [255, 0, 143],
145
+ [0, 255, 235],
146
+ [133, 255, 0],
147
+ [255, 0, 235],
148
+ [245, 0, 255],
149
+ [255, 0, 122],
150
+ [255, 245, 0],
151
+ [10, 190, 212],
152
+ [214, 255, 0],
153
+ [0, 204, 255],
154
+ [20, 0, 255],
155
+ [255, 255, 0],
156
+ [0, 153, 255],
157
+ [0, 41, 255],
158
+ [0, 255, 204],
159
+ [41, 0, 255],
160
+ [41, 255, 0],
161
+ [173, 0, 255],
162
+ [0, 245, 255],
163
+ [71, 0, 255],
164
+ [122, 0, 255],
165
+ [0, 255, 184],
166
+ [0, 92, 255],
167
+ [184, 255, 0],
168
+ [0, 133, 255],
169
+ [255, 214, 0],
170
+ [25, 194, 194],
171
+ [102, 255, 0],
172
+ [92, 0, 255],
173
+ ]
174
+
175
+
176
+ class StableDiffusionControlNetInpaintSegGenerator:
177
+ def __init__(self):
178
+ self.pipe = None
179
+
180
+ def load_model(
181
+ self,
182
+ stable_model_path,
183
+ controlnet_model_path,
184
+ scheduler,
185
+ ):
186
+
187
+ if self.pipe is None:
188
+ controlnet = ControlNetModel.from_pretrained(
189
+ controlnet_model_path, torch_dtype=torch.float16
190
+ )
191
+ self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
192
+ pretrained_model_name_or_path=stable_model_path,
193
+ controlnet=controlnet,
194
+ safety_checker=None,
195
+ torch_dtype=torch.float16,
196
+ )
197
+
198
+ self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
199
+ self.pipe.to("cuda")
200
+ self.pipe.enable_xformers_memory_efficient_attention()
201
+
202
+ return self.pipe
203
+
204
+ def controlnet_seg_inpaint(self, image_path: str):
205
+ image_processor = AutoImageProcessor.from_pretrained(
206
+ "openmmlab/upernet-convnext-small"
207
+ )
208
+ image_segmentor = UperNetForSemanticSegmentation.from_pretrained(
209
+ "openmmlab/upernet-convnext-small"
210
+ )
211
+
212
+ image = image_path["image"].convert("RGB").resize((512, 512))
213
+ image = np.array(image)
214
+ pixel_values = image_processor(image, return_tensors="pt").pixel_values
215
+
216
+ with torch.no_grad():
217
+ outputs = image_segmentor(pixel_values)
218
+
219
+ seg = image_processor.post_process_semantic_segmentation(
220
+ outputs, target_sizes=[image.size[::-1]]
221
+ )[0]
222
+
223
+ color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8)
224
+ palette = np.array(ade_palette())
225
+
226
+ for label, color in enumerate(palette):
227
+ color_seg[seg == label, :] = color
228
+
229
+ color_seg = color_seg.astype(np.uint8)
230
+ image = Image.fromarray(color_seg)
231
+
232
+ return image
233
+
234
+ def generate_image(
235
+ self,
236
+ image_path: str,
237
+ stable_model_path: str,
238
+ controlnet_model_path: str,
239
+ prompt: str,
240
+ negative_prompt: str,
241
+ num_images_per_prompt: int,
242
+ guidance_scale: int,
243
+ num_inference_step: int,
244
+ controlnet_conditioning_scale: int,
245
+ scheduler: str,
246
+ seed_generator: int,
247
+ ):
248
+
249
+ image = self.controlnet_seg_inpaint(image_path=image_path)
250
+
251
+ pipe = self.load_model(
252
+ stable_model_path=stable_model_path,
253
+ controlnet_model_path=controlnet_model_path,
254
+ scheduler=scheduler,
255
+ )
256
+
257
+ if seed_generator == 0:
258
+ random_seed = torch.randint(0, 1000000, (1,))
259
+ generator = torch.manual_seed(random_seed)
260
+ else:
261
+ generator = torch.manual_seed(seed_generator)
262
+
263
+ output = pipe(
264
+ prompt=prompt,
265
+ image=image,
266
+ negative_prompt=negative_prompt,
267
+ num_images_per_prompt=num_images_per_prompt,
268
+ num_inference_steps=num_inference_step,
269
+ guidance_scale=guidance_scale,
270
+ controlnet_conditioning_scale=controlnet_conditioning_scale,
271
+ generator=generator,
272
+ ).images
273
+
274
+ return output
275
+
276
+ def app():
277
+ with gr.Blocks():
278
+ with gr.Row():
279
+ with gr.Column():
280
+ controlnet_seg_inpaint_image_file = gr.Image(
281
+ source="upload",
282
+ tool="sketch",
283
+ elem_id="image_upload",
284
+ type="pil",
285
+ label="Upload",
286
+ )
287
+
288
+ controlnet_seg_inpaint_prompt = gr.Textbox(
289
+ lines=1, placeholder="Prompt", show_label=False
290
+ )
291
+
292
+ controlnet_seg_inpaint_negative_prompt = gr.Textbox(
293
+ lines=1,
294
+ show_label=False,
295
+ placeholder="Negative Prompt",
296
+ )
297
+ with gr.Row():
298
+ with gr.Column():
299
+ controlnet_seg_inpaint_stable_model_id = (
300
+ gr.Dropdown(
301
+ choices=stable_model_list,
302
+ value=stable_model_list[0],
303
+ label="Stable Model Id",
304
+ )
305
+ )
306
+
307
+ controlnet_seg_inpaint_guidance_scale = gr.Slider(
308
+ minimum=0.1,
309
+ maximum=15,
310
+ step=0.1,
311
+ value=7.5,
312
+ label="Guidance Scale",
313
+ )
314
+
315
+ controlnet_seg_inpaint_num_inference_step = (
316
+ gr.Slider(
317
+ minimum=1,
318
+ maximum=100,
319
+ step=1,
320
+ value=50,
321
+ label="Num Inference Step",
322
+ )
323
+ )
324
+ controlnet_seg_inpaint_num_images_per_prompt = (
325
+ gr.Slider(
326
+ minimum=1,
327
+ maximum=10,
328
+ step=1,
329
+ value=1,
330
+ label="Number Of Images",
331
+ )
332
+ )
333
+ with gr.Row():
334
+ with gr.Column():
335
+ controlnet_seg_inpaint_model_id = gr.Dropdown(
336
+ choices=controlnet_seg_model_list,
337
+ value=controlnet_seg_model_list[0],
338
+ label="Controlnet Model Id",
339
+ )
340
+ controlnet_seg_inpaint_scheduler = gr.Dropdown(
341
+ choices=SCHEDULER_LIST,
342
+ value=SCHEDULER_LIST[0],
343
+ label="Scheduler",
344
+ )
345
+ controlnet_seg_inpaint_controlnet_conditioning_scale = gr.Slider(
346
+ minimum=0.1,
347
+ maximum=1.0,
348
+ step=0.1,
349
+ value=0.5,
350
+ label="Controlnet Conditioning Scale",
351
+ )
352
+
353
+ controlnet_seg_inpaint_seed_generator = (
354
+ gr.Slider(
355
+ minimum=0,
356
+ maximum=1000000,
357
+ step=1,
358
+ value=0,
359
+ label="Seed Generator",
360
+ )
361
+ )
362
+
363
+ controlnet_seg_inpaint_predict = gr.Button(
364
+ value="Generator"
365
+ )
366
+
367
+ with gr.Column():
368
+ output_image = gr.Gallery(
369
+ label="Generated images",
370
+ show_label=False,
371
+ elem_id="gallery",
372
+ ).style(grid=(1, 2))
373
+
374
+ controlnet_seg_inpaint_predict.click(
375
+ fn=StableDiffusionControlNetInpaintSegGenerator().generate_image,
376
+ inputs=[
377
+ controlnet_seg_inpaint_image_file,
378
+ controlnet_seg_inpaint_stable_model_id,
379
+ controlnet_seg_inpaint_model_id,
380
+ controlnet_seg_inpaint_prompt,
381
+ controlnet_seg_inpaint_negative_prompt,
382
+ controlnet_seg_inpaint_num_images_per_prompt,
383
+ controlnet_seg_inpaint_guidance_scale,
384
+ controlnet_seg_inpaint_num_inference_step,
385
+ controlnet_seg_inpaint_controlnet_conditioning_scale,
386
+ controlnet_seg_inpaint_scheduler,
387
+ controlnet_seg_inpaint_seed_generator,
388
+ ],
389
+ outputs=[output_image],
390
+ )
diffusion_webui/diffusion_models/controlnet/controlnet_seg.py CHANGED
@@ -203,7 +203,7 @@ class StableDiffusionControlNetSegGenerator:
203
  "openmmlab/upernet-convnext-small"
204
  )
205
 
206
- image = Image.open(image_path).convert("RGB")
207
  pixel_values = image_processor(image, return_tensors="pt").pixel_values
208
 
209
  with torch.no_grad():
 
203
  "openmmlab/upernet-convnext-small"
204
  )
205
 
206
+ image = image_path["image"].convert("RGB").resize((512, 512))
207
  pixel_values = image_processor(image, return_tensors="pt").pixel_values
208
 
209
  with torch.no_grad():
diffusion_webui/helpers.py CHANGED
@@ -7,8 +7,26 @@ from diffusion_webui.diffusion_models.controlnet.controlnet_depth import (
7
  from diffusion_webui.diffusion_models.controlnet.controlnet_hed import (
8
  StableDiffusionControlNetHEDGenerator,
9
  )
10
- from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.controlnet_inpaint_app import (
11
- StableDiffusionControlInpaintNetCannyGenerator,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  )
13
  from diffusion_webui.diffusion_models.controlnet.controlnet_mlsd import (
14
  StableDiffusionControlNetMLSDGenerator,
 
7
  from diffusion_webui.diffusion_models.controlnet.controlnet_hed import (
8
  StableDiffusionControlNetHEDGenerator,
9
  )
10
+ from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.controlnet_inpaint_canny import (
11
+ StableDiffusionControlNetInpaintCannyGenerator,
12
+ )
13
+ from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.controlnet_inpaint_depth import (
14
+ StableDiffusionControlInpaintNetDepthGenerator,
15
+ )
16
+ from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.controlnet_inpaint_hed import (
17
+ StableDiffusionControlNetInpaintHedGenerator,
18
+ )
19
+ from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.controlnet_inpaint_mlsd import (
20
+ StableDiffusionControlNetInpaintMlsdGenerator,
21
+ )
22
+ from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.controlnet_inpaint_pose import (
23
+ StableDiffusionControlNetInpaintPoseGenerator,
24
+ )
25
+ from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.controlnet_inpaint_scribble import (
26
+ StableDiffusionControlNetInpaintScribbleGenerator,
27
+ )
28
+ from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.controlnet_inpaint_seg import (
29
+ StableDiffusionControlNetInpaintSegGenerator,
30
  )
31
  from diffusion_webui.diffusion_models.controlnet.controlnet_mlsd import (
32
  StableDiffusionControlNetMLSDGenerator,
diffusion_webui/utils/model_list.py CHANGED
@@ -1,7 +1,7 @@
1
  stable_model_list = [
2
  "runwayml/stable-diffusion-v1-5",
3
  "stabilityai/stable-diffusion-2-1",
4
- "prompthero/openjourney"
5
  ]
6
 
7
  controlnet_canny_model_list = [
@@ -32,3 +32,11 @@ stable_inpiant_model_list = [
32
  "stabilityai/stable-diffusion-2-inpainting",
33
  "runwayml/stable-diffusion-inpainting",
34
  ]
 
 
 
 
 
 
 
 
 
1
  stable_model_list = [
2
  "runwayml/stable-diffusion-v1-5",
3
  "stabilityai/stable-diffusion-2-1",
4
+ "prompthero/openjourney-v4",
5
  ]
6
 
7
  controlnet_canny_model_list = [
 
32
  "stabilityai/stable-diffusion-2-inpainting",
33
  "runwayml/stable-diffusion-inpainting",
34
  ]
35
+
36
+ controlnet_mlsd_model_list = [
37
+ "lllyasviel/sd-controlnet-mlsd",
38
+ ]
39
+
40
+ controlnet_seg_model_list = [
41
+ "lllyasviel/sd-controlnet-seg",
42
+ ]