kadirnar commited on
Commit
cab0202
1 Parent(s): 8dce1f7

Upload 15 files

Browse files
app.py CHANGED
@@ -1,27 +1,8 @@
1
  import gradio as gr
2
 
3
  from diffusion_webui import (
4
- CodeformerUpscalerGenerator,
5
- StableDiffusionControlInpaintNetDepthGenerator,
6
- StableDiffusionControlNetCannyGenerator,
7
- StableDiffusionControlNetDepthGenerator,
8
- StableDiffusionControlNetHEDGenerator,
9
- StableDiffusionControlNetInpaintCannyGenerator,
10
- StableDiffusionControlNetInpaintHedGenerator,
11
- StableDiffusionControlNetInpaintMlsdGenerator,
12
- StableDiffusionControlNetInpaintPoseGenerator,
13
- StableDiffusionControlNetInpaintScribbleGenerator,
14
- StableDiffusionControlNetInpaintSegGenerator,
15
- StableDiffusionControlNetLineArtAnimeGenerator,
16
- StableDiffusionControlNetLineArtGenerator,
17
- StableDiffusionControlNetMLSDGenerator,
18
- StableDiffusionControlNetNormalGenerator,
19
- StableDiffusionControlNetPix2PixGenerator,
20
- StableDiffusionControlNetPoseGenerator,
21
- StableDiffusionControlNetScribbleGenerator,
22
- StableDiffusionControlNetSegGenerator,
23
- StableDiffusionControlNetShuffleGenerator,
24
- StableDiffusionControlNetSoftEdgeGenerator,
25
  StableDiffusionImage2ImageGenerator,
26
  StableDiffusionInpaintGenerator,
27
  StableDiffusionText2ImageGenerator,
@@ -33,56 +14,16 @@ def diffusion_app():
33
  with app:
34
  with gr.Row():
35
  with gr.Column():
36
- with gr.Tab("Text2Img"):
37
  StableDiffusionText2ImageGenerator.app()
38
- with gr.Tab("Img2Img"):
39
  StableDiffusionImage2ImageGenerator.app()
40
- with gr.Tab("Inpaint"):
41
  StableDiffusionInpaintGenerator.app()
42
- with gr.Tab("ControlNet"):
43
- with gr.Tab("Canny"):
44
- StableDiffusionControlNetCannyGenerator.app()
45
- with gr.Tab("Depth"):
46
- StableDiffusionControlNetDepthGenerator.app()
47
- with gr.Tab("HED"):
48
- StableDiffusionControlNetHEDGenerator.app()
49
- with gr.Tab("MLSD"):
50
- StableDiffusionControlNetMLSDGenerator.app()
51
- with gr.Tab("Pose"):
52
- StableDiffusionControlNetPoseGenerator.app()
53
- with gr.Tab("Scribble"):
54
- StableDiffusionControlNetScribbleGenerator.app()
55
- with gr.Tab("Normal"):
56
- StableDiffusionControlNetNormalGenerator.app()
57
- with gr.Tab("Seg"):
58
- StableDiffusionControlNetSegGenerator.app()
59
- with gr.Tab("Shuffle"):
60
- StableDiffusionControlNetShuffleGenerator.app()
61
- with gr.Tab("Pix2Pix"):
62
- StableDiffusionControlNetPix2PixGenerator.app()
63
- with gr.Tab("LineArt"):
64
- StableDiffusionControlNetLineArtGenerator.app()
65
- with gr.Tab("LineArtAnime"):
66
- StableDiffusionControlNetLineArtAnimeGenerator.app()
67
- with gr.Tab("SoftEdge"):
68
- StableDiffusionControlNetSoftEdgeGenerator.app()
69
- with gr.Tab("ControlNet Inpaint"):
70
- with gr.Tab("Canny"):
71
- StableDiffusionControlNetInpaintCannyGenerator.app()
72
- with gr.Tab("Depth"):
73
- StableDiffusionControlInpaintNetDepthGenerator.app()
74
- with gr.Tab("HED"):
75
- StableDiffusionControlNetInpaintHedGenerator.app()
76
- with gr.Tab("MLSD"):
77
- StableDiffusionControlNetInpaintMlsdGenerator.app()
78
- with gr.Tab("Pose"):
79
- StableDiffusionControlNetInpaintPoseGenerator.app()
80
- with gr.Tab("Scribble"):
81
- StableDiffusionControlNetInpaintScribbleGenerator.app()
82
- with gr.Tab("Seg"):
83
- StableDiffusionControlNetInpaintSegGenerator.app()
84
- with gr.Tab("Upscaler"):
85
- CodeformerUpscalerGenerator.app()
86
 
87
  app.queue(concurrency_count=1)
88
  app.launch(debug=True, enable_queue=True)
 
1
  import gradio as gr
2
 
3
  from diffusion_webui import (
4
+ StableDiffusionControlNetGenerator,
5
+ StableDiffusionControlNetInpaintGenerator,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  StableDiffusionImage2ImageGenerator,
7
  StableDiffusionInpaintGenerator,
8
  StableDiffusionText2ImageGenerator,
 
14
  with app:
15
  with gr.Row():
16
  with gr.Column():
17
+ with gr.Tab(label="Text2Image"):
18
  StableDiffusionText2ImageGenerator.app()
19
+ with gr.Tab(label="Image2Image"):
20
  StableDiffusionImage2ImageGenerator.app()
21
+ with gr.Tab(label="Inpaint"):
22
  StableDiffusionInpaintGenerator.app()
23
+ with gr.Tab(label="Controlnet"):
24
+ StableDiffusionControlNetGenerator.app()
25
+ with gr.Tab(label="Controlnet Inpaint"):
26
+ StableDiffusionControlNetInpaintGenerator.app()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
  app.queue(concurrency_count=1)
29
  app.launch(debug=True, enable_queue=True)
diffusion_webui/__init__.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusion_webui.diffusion_models.controlnet_inpaint_pipeline import (
2
+ StableDiffusionControlNetInpaintGenerator,
3
+ )
4
+ from diffusion_webui.diffusion_models.controlnet_pipeline import (
5
+ StableDiffusionControlNetGenerator,
6
+ )
7
+ from diffusion_webui.diffusion_models.img2img_app import (
8
+ StableDiffusionImage2ImageGenerator,
9
+ )
10
+ from diffusion_webui.diffusion_models.inpaint_app import (
11
+ StableDiffusionInpaintGenerator,
12
+ )
13
+ from diffusion_webui.diffusion_models.text2img_app import (
14
+ StableDiffusionText2ImageGenerator,
15
+ )
16
+
17
+ __version__ = "2.5.0"
diffusion_webui/diffusion_models/__init__.py ADDED
File without changes
diffusion_webui/diffusion_models/base_controlnet_pipeline.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ class ControlnetPipeline:
2
+ def __init__(self):
3
+ self.pipe = None
4
+
5
+ def load_model(self, stable_model_path: str, controlnet_model_path: str):
6
+ raise NotImplementedError()
7
+
8
+ def load_image(self, image_path: str):
9
+ raise NotImplementedError()
10
+
11
+ def controlnet_preprocces(self, read_image: str):
12
+ raise NotImplementedError()
13
+
14
+ def generate_image(
15
+ self,
16
+ image_path: str,
17
+ stable_model_path: str,
18
+ controlnet_model_path: str,
19
+ prompt: str,
20
+ negative_prompt: str,
21
+ num_images_per_prompt: int,
22
+ guidance_scale: int,
23
+ num_inference_step: int,
24
+ controlnet_conditioning_scale: int,
25
+ scheduler: str,
26
+ seed_generator: int,
27
+ ):
28
+ raise NotImplementedError()
29
+
30
+ def web_interface():
31
+ raise NotImplementedError()
diffusion_webui/diffusion_models/controlnet_inpaint_pipeline.py ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import torch
4
+ from diffusers import ControlNetModel, StableDiffusionControlNetInpaintPipeline
5
+ from PIL import Image
6
+
7
+ from diffusion_webui.diffusion_models.base_controlnet_pipeline import (
8
+ ControlnetPipeline,
9
+ )
10
+ from diffusion_webui.utils.model_list import (
11
+ controlnet_model_list,
12
+ stable_model_list,
13
+ )
14
+ from diffusion_webui.utils.preprocces_utils import PREPROCCES_DICT
15
+ from diffusion_webui.utils.scheduler_list import (
16
+ SCHEDULER_MAPPING,
17
+ get_scheduler,
18
+ )
19
+
20
+
21
+ class StableDiffusionControlNetInpaintGenerator(ControlnetPipeline):
22
+ def __init__(self):
23
+ super().__init__()
24
+
25
+ def load_model(self, stable_model_path, controlnet_model_path, scheduler):
26
+ if self.pipe is None:
27
+ controlnet = ControlNetModel.from_pretrained(
28
+ controlnet_model_path, torch_dtype=torch.float16
29
+ )
30
+ self.pipe = (
31
+ StableDiffusionControlNetInpaintPipeline.from_pretrained(
32
+ pretrained_model_name_or_path=stable_model_path,
33
+ controlnet=controlnet,
34
+ safety_checker=None,
35
+ torch_dtype=torch.float16,
36
+ )
37
+ )
38
+
39
+ self.pipe = get_scheduler(pipe=self.pipe, scheduler=scheduler)
40
+ self.pipe.to("cuda")
41
+ self.pipe.enable_xformers_memory_efficient_attention()
42
+
43
+ return self.pipe
44
+
45
+ def load_image(self, image):
46
+ image = np.array(image)
47
+ image = Image.fromarray(image)
48
+ return image
49
+
50
+ def controlnet_preprocces(
51
+ self,
52
+ read_image: str,
53
+ preprocces_type: str,
54
+ ):
55
+ processed_image = PREPROCCES_DICT[preprocces_type](read_image)
56
+ return processed_image
57
+
58
+ def generate_image(
59
+ self,
60
+ image_path: str,
61
+ stable_model_path: str,
62
+ controlnet_model_path: str,
63
+ prompt: str,
64
+ negative_prompt: str,
65
+ num_images_per_prompt: int,
66
+ height: int,
67
+ width: int,
68
+ strength: int,
69
+ guess_mode: bool,
70
+ guidance_scale: int,
71
+ num_inference_step: int,
72
+ controlnet_conditioning_scale: int,
73
+ scheduler: str,
74
+ seed_generator: int,
75
+ preprocces_type: str,
76
+ ):
77
+ normal_image = image_path["image"].convert("RGB").resize((512, 512))
78
+ mask_image = image_path["mask"].convert("RGB").resize((512, 512))
79
+
80
+ normal_image = self.load_image(image=normal_image)
81
+ mask_image = self.load_image(image=mask_image)
82
+
83
+ control_image = self.controlnet_preprocces(
84
+ read_image=normal_image, preprocces_type=preprocces_type
85
+ )
86
+ pipe = self.load_model(
87
+ stable_model_path=stable_model_path,
88
+ controlnet_model_path=controlnet_model_path,
89
+ scheduler=scheduler,
90
+ )
91
+
92
+ if seed_generator == 0:
93
+ random_seed = torch.randint(0, 1000000, (1,))
94
+ generator = torch.manual_seed(random_seed)
95
+ else:
96
+ generator = torch.manual_seed(seed_generator)
97
+
98
+ output = pipe(
99
+ prompt=prompt,
100
+ image=normal_image,
101
+ height=height,
102
+ width=width,
103
+ mask_image=mask_image,
104
+ strength=strength,
105
+ guess_mode=guess_mode,
106
+ control_image=control_image,
107
+ negative_prompt=negative_prompt,
108
+ num_images_per_prompt=num_images_per_prompt,
109
+ num_inference_steps=num_inference_step,
110
+ guidance_scale=guidance_scale,
111
+ controlnet_conditioning_scale=float(controlnet_conditioning_scale),
112
+ generator=generator,
113
+ ).images
114
+
115
+ return output
116
+
117
+ def app():
118
+ with gr.Blocks():
119
+ with gr.Row():
120
+ with gr.Column():
121
+ controlnet_inpaint_image_path = gr.Image(
122
+ source="upload",
123
+ tool="sketch",
124
+ elem_id="image_upload",
125
+ type="pil",
126
+ label="Upload",
127
+ ).style(height=260)
128
+
129
+ controlnet_inpaint_prompt = gr.Textbox(
130
+ lines=1, placeholder="Prompt", show_label=False
131
+ )
132
+ controlnet_inpaint_negative_prompt = gr.Textbox(
133
+ lines=1, placeholder="Negative Prompt", show_label=False
134
+ )
135
+
136
+ with gr.Row():
137
+ with gr.Column():
138
+ controlnet_inpaint_stable_model_path = gr.Dropdown(
139
+ choices=stable_model_list,
140
+ value=stable_model_list[0],
141
+ label="Stable Model Path",
142
+ )
143
+ controlnet_inpaint_preprocces_type = gr.Dropdown(
144
+ choices=list(PREPROCCES_DICT.keys()),
145
+ value=list(PREPROCCES_DICT.keys())[0],
146
+ label="Preprocess Type",
147
+ )
148
+ controlnet_inpaint_conditioning_scale = gr.Slider(
149
+ minimum=0.0,
150
+ maximum=1.0,
151
+ step=0.1,
152
+ value=1.0,
153
+ label="ControlNet Conditioning Scale",
154
+ )
155
+ controlnet_inpaint_guidance_scale = gr.Slider(
156
+ minimum=0.1,
157
+ maximum=15,
158
+ step=0.1,
159
+ value=7.5,
160
+ label="Guidance Scale",
161
+ )
162
+ controlnet_inpaint_height = gr.Slider(
163
+ minimum=128,
164
+ maximum=1280,
165
+ step=32,
166
+ value=512,
167
+ label="Height",
168
+ )
169
+ controlnet_inpaint_width = gr.Slider(
170
+ minimum=128,
171
+ maximum=1280,
172
+ step=32,
173
+ value=512,
174
+ label="Width",
175
+ )
176
+ controlnet_inpaint_guess_mode = gr.Checkbox(
177
+ label="Guess Mode"
178
+ )
179
+
180
+ with gr.Column():
181
+ controlnet_inpaint_model_path = gr.Dropdown(
182
+ choices=controlnet_model_list,
183
+ value=controlnet_model_list[0],
184
+ label="ControlNet Model Path",
185
+ )
186
+ controlnet_inpaint_scheduler = gr.Dropdown(
187
+ choices=list(SCHEDULER_MAPPING.keys()),
188
+ value=list(SCHEDULER_MAPPING.keys())[0],
189
+ label="Scheduler",
190
+ )
191
+ controlnet_inpaint_strength = gr.Slider(
192
+ minimum=0.1,
193
+ maximum=15,
194
+ step=0.1,
195
+ value=7.5,
196
+ label="Strength",
197
+ )
198
+ controlnet_inpaint_num_inference_step = gr.Slider(
199
+ minimum=1,
200
+ maximum=150,
201
+ step=1,
202
+ value=30,
203
+ label="Num Inference Step",
204
+ )
205
+ controlnet_inpaint_num_images_per_prompt = (
206
+ gr.Slider(
207
+ minimum=1,
208
+ maximum=4,
209
+ step=1,
210
+ value=1,
211
+ label="Number Of Images",
212
+ )
213
+ )
214
+ controlnet_inpaint_seed_generator = gr.Slider(
215
+ minimum=0,
216
+ maximum=1000000,
217
+ step=1,
218
+ value=0,
219
+ label="Seed(0 for random)",
220
+ )
221
+
222
+ # Button to generate the image
223
+ controlnet_inpaint_predict_button = gr.Button(
224
+ value="Generate Image"
225
+ )
226
+
227
+ with gr.Column():
228
+ # Gallery to display the generated images
229
+ controlnet_inpaint_output_image = gr.Gallery(
230
+ label="Generated images",
231
+ show_label=False,
232
+ elem_id="gallery",
233
+ ).style(grid=(1, 2))
234
+
235
+ controlnet_inpaint_predict_button.click(
236
+ fn=StableDiffusionControlNetInpaintGenerator().generate_image,
237
+ inputs=[
238
+ controlnet_inpaint_image_path,
239
+ controlnet_inpaint_stable_model_path,
240
+ controlnet_inpaint_model_path,
241
+ controlnet_inpaint_prompt,
242
+ controlnet_inpaint_negative_prompt,
243
+ controlnet_inpaint_num_images_per_prompt,
244
+ controlnet_inpaint_height,
245
+ controlnet_inpaint_width,
246
+ controlnet_inpaint_strength,
247
+ controlnet_inpaint_guess_mode,
248
+ controlnet_inpaint_guidance_scale,
249
+ controlnet_inpaint_num_inference_step,
250
+ controlnet_inpaint_conditioning_scale,
251
+ controlnet_inpaint_scheduler,
252
+ controlnet_inpaint_seed_generator,
253
+ controlnet_inpaint_preprocces_type,
254
+ ],
255
+ outputs=[controlnet_inpaint_output_image],
256
+ )
diffusion_webui/diffusion_models/controlnet_pipeline.py ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from diffusers import ControlNetModel, StableDiffusionControlNetPipeline
4
+ from PIL import Image
5
+
6
+ from diffusion_webui.diffusion_models.base_controlnet_pipeline import (
7
+ ControlnetPipeline,
8
+ )
9
+ from diffusion_webui.utils.model_list import (
10
+ controlnet_model_list,
11
+ stable_model_list,
12
+ )
13
+ from diffusion_webui.utils.preprocces_utils import PREPROCCES_DICT
14
+ from diffusion_webui.utils.scheduler_list import (
15
+ SCHEDULER_MAPPING,
16
+ get_scheduler,
17
+ )
18
+
19
+
20
+ class StableDiffusionControlNetGenerator(ControlnetPipeline):
21
+ def __init__(self):
22
+ self.pipe = None
23
+
24
+ def load_model(self, stable_model_path, controlnet_model_path, scheduler):
25
+ if self.pipe is None:
26
+ controlnet = ControlNetModel.from_pretrained(
27
+ controlnet_model_path, torch_dtype=torch.float16
28
+ )
29
+ self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
30
+ pretrained_model_name_or_path=stable_model_path,
31
+ controlnet=controlnet,
32
+ safety_checker=None,
33
+ torch_dtype=torch.float16,
34
+ )
35
+
36
+ self.pipe = get_scheduler(pipe=self.pipe, scheduler=scheduler)
37
+ self.pipe.to("cuda")
38
+ self.pipe.enable_xformers_memory_efficient_attention()
39
+
40
+ return self.pipe
41
+
42
+ def controlnet_preprocces(
43
+ self,
44
+ read_image: str,
45
+ preprocces_type: str,
46
+ ):
47
+ processed_image = PREPROCCES_DICT[preprocces_type](read_image)
48
+ return processed_image
49
+
50
+ def generate_image(
51
+ self,
52
+ image_path: str,
53
+ stable_model_path: str,
54
+ controlnet_model_path: str,
55
+ height: int,
56
+ width: int,
57
+ guess_mode: bool,
58
+ controlnet_conditioning_scale: int,
59
+ prompt: str,
60
+ negative_prompt: str,
61
+ num_images_per_prompt: int,
62
+ guidance_scale: int,
63
+ num_inference_step: int,
64
+ scheduler: str,
65
+ seed_generator: int,
66
+ preprocces_type: str,
67
+ ):
68
+ pipe = self.load_model(
69
+ stable_model_path=stable_model_path,
70
+ controlnet_model_path=controlnet_model_path,
71
+ scheduler=scheduler,
72
+ )
73
+
74
+ read_image = Image.open(image_path)
75
+ controlnet_image = self.controlnet_preprocces(
76
+ read_image=read_image, preprocces_type=preprocces_type
77
+ )
78
+
79
+ if seed_generator == 0:
80
+ random_seed = torch.randint(0, 1000000, (1,))
81
+ generator = torch.manual_seed(random_seed)
82
+ else:
83
+ generator = torch.manual_seed(seed_generator)
84
+
85
+ output = pipe(
86
+ prompt=prompt,
87
+ height=height,
88
+ width=width,
89
+ controlnet_conditioning_scale=float(controlnet_conditioning_scale),
90
+ guess_mode=guess_mode,
91
+ image=controlnet_image,
92
+ negative_prompt=negative_prompt,
93
+ num_images_per_prompt=num_images_per_prompt,
94
+ num_inference_steps=num_inference_step,
95
+ guidance_scale=guidance_scale,
96
+ generator=generator,
97
+ ).images
98
+
99
+ return output
100
+
101
+ def app():
102
+ with gr.Blocks():
103
+ with gr.Row():
104
+ with gr.Column():
105
+ controlnet_image_path = gr.Image(
106
+ type="filepath", label="Image"
107
+ ).style(height=260)
108
+ controlnet_prompt = gr.Textbox(
109
+ lines=1, placeholder="Prompt", show_label=False
110
+ )
111
+ controlnet_negative_prompt = gr.Textbox(
112
+ lines=1, placeholder="Negative Prompt", show_label=False
113
+ )
114
+
115
+ with gr.Row():
116
+ with gr.Column():
117
+ controlnet_stable_model_path = gr.Dropdown(
118
+ choices=stable_model_list,
119
+ value=stable_model_list[0],
120
+ label="Stable Model Path",
121
+ )
122
+ controlnet_preprocces_type = gr.Dropdown(
123
+ choices=list(PREPROCCES_DICT.keys()),
124
+ value=list(PREPROCCES_DICT.keys())[0],
125
+ label="Preprocess Type",
126
+ )
127
+ controlnet_conditioning_scale = gr.Slider(
128
+ minimum=0.0,
129
+ maximum=1.0,
130
+ step=0.1,
131
+ value=1.0,
132
+ label="ControlNet Conditioning Scale",
133
+ )
134
+ controlnet_guidance_scale = gr.Slider(
135
+ minimum=0.1,
136
+ maximum=15,
137
+ step=0.1,
138
+ value=7.5,
139
+ label="Guidance Scale",
140
+ )
141
+ controlnet_height = gr.Slider(
142
+ minimum=128,
143
+ maximum=1280,
144
+ step=32,
145
+ value=512,
146
+ label="Height",
147
+ )
148
+ controlnet_width = gr.Slider(
149
+ minimum=128,
150
+ maximum=1280,
151
+ step=32,
152
+ value=512,
153
+ label="Width",
154
+ )
155
+
156
+ with gr.Row():
157
+ with gr.Column():
158
+ controlnet_model_path = gr.Dropdown(
159
+ choices=controlnet_model_list,
160
+ value=controlnet_model_list[0],
161
+ label="ControlNet Model Path",
162
+ )
163
+ controlnet_scheduler = gr.Dropdown(
164
+ choices=list(SCHEDULER_MAPPING.keys()),
165
+ value=list(SCHEDULER_MAPPING.keys())[0],
166
+ label="Scheduler",
167
+ )
168
+ controlnet_num_inference_step = gr.Slider(
169
+ minimum=1,
170
+ maximum=150,
171
+ step=1,
172
+ value=30,
173
+ label="Num Inference Step",
174
+ )
175
+
176
+ controlnet_num_images_per_prompt = gr.Slider(
177
+ minimum=1,
178
+ maximum=4,
179
+ step=1,
180
+ value=1,
181
+ label="Number Of Images",
182
+ )
183
+ controlnet_seed_generator = gr.Slider(
184
+ minimum=0,
185
+ maximum=1000000,
186
+ step=1,
187
+ value=0,
188
+ label="Seed(0 for random)",
189
+ )
190
+ controlnet_guess_mode = gr.Checkbox(
191
+ label="Guess Mode"
192
+ )
193
+
194
+ # Button to generate the image
195
+ predict_button = gr.Button(value="Generate Image")
196
+
197
+ with gr.Column():
198
+ # Gallery to display the generated images
199
+ output_image = gr.Gallery(
200
+ label="Generated images",
201
+ show_label=False,
202
+ elem_id="gallery",
203
+ ).style(grid=(1, 2))
204
+
205
+ predict_button.click(
206
+ fn=StableDiffusionControlNetGenerator().generate_image,
207
+ inputs=[
208
+ controlnet_image_path,
209
+ controlnet_stable_model_path,
210
+ controlnet_model_path,
211
+ controlnet_height,
212
+ controlnet_width,
213
+ controlnet_guess_mode,
214
+ controlnet_conditioning_scale,
215
+ controlnet_prompt,
216
+ controlnet_negative_prompt,
217
+ controlnet_num_images_per_prompt,
218
+ controlnet_guidance_scale,
219
+ controlnet_num_inference_step,
220
+ controlnet_scheduler,
221
+ controlnet_seed_generator,
222
+ controlnet_preprocces_type,
223
+ ],
224
+ outputs=[output_image],
225
+ )
diffusion_webui/diffusion_models/img2img_app.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from diffusers import StableDiffusionImg2ImgPipeline
4
+ from PIL import Image
5
+
6
+ from diffusion_webui.utils.model_list import stable_model_list
7
+ from diffusion_webui.utils.scheduler_list import (
8
+ SCHEDULER_MAPPING,
9
+ get_scheduler,
10
+ )
11
+
12
+
13
+ class StableDiffusionImage2ImageGenerator:
14
+ def __init__(self):
15
+ self.pipe = None
16
+
17
+ def load_model(self, model_path, scheduler):
18
+ if self.pipe is None:
19
+ self.pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
20
+ model_path, safety_checker=None, torch_dtype=torch.float16
21
+ )
22
+
23
+ self.pipe = get_scheduler(pipe=self.pipe, scheduler=scheduler)
24
+ self.pipe.to("cuda")
25
+ self.pipe.enable_xformers_memory_efficient_attention()
26
+
27
+ return self.pipe
28
+
29
+ def generate_image(
30
+ self,
31
+ image_path: str,
32
+ model_path: str,
33
+ prompt: str,
34
+ negative_prompt: str,
35
+ num_images_per_prompt: int,
36
+ scheduler: str,
37
+ guidance_scale: int,
38
+ num_inference_step: int,
39
+ seed_generator=0,
40
+ ):
41
+ pipe = self.load_model(
42
+ model_path=model_path,
43
+ scheduler=scheduler,
44
+ )
45
+
46
+ if seed_generator == 0:
47
+ random_seed = torch.randint(0, 1000000, (1,))
48
+ generator = torch.manual_seed(random_seed)
49
+ else:
50
+ generator = torch.manual_seed(seed_generator)
51
+
52
+ image = Image.open(image_path)
53
+ images = pipe(
54
+ prompt,
55
+ image=image,
56
+ negative_prompt=negative_prompt,
57
+ num_images_per_prompt=num_images_per_prompt,
58
+ num_inference_steps=num_inference_step,
59
+ guidance_scale=guidance_scale,
60
+ generator=generator,
61
+ ).images
62
+
63
+ return images
64
+
65
+ def app():
66
+ with gr.Blocks():
67
+ with gr.Row():
68
+ with gr.Column():
69
+ image2image_image_file = gr.Image(
70
+ type="filepath", label="Image"
71
+ ).style(height=260)
72
+
73
+ image2image_prompt = gr.Textbox(
74
+ lines=1,
75
+ placeholder="Prompt",
76
+ show_label=False,
77
+ )
78
+
79
+ image2image_negative_prompt = gr.Textbox(
80
+ lines=1,
81
+ placeholder="Negative Prompt",
82
+ show_label=False,
83
+ )
84
+
85
+ with gr.Row():
86
+ with gr.Column():
87
+ image2image_model_path = gr.Dropdown(
88
+ choices=stable_model_list,
89
+ value=stable_model_list[0],
90
+ label="Stable Model Id",
91
+ )
92
+
93
+ image2image_guidance_scale = gr.Slider(
94
+ minimum=0.1,
95
+ maximum=15,
96
+ step=0.1,
97
+ value=7.5,
98
+ label="Guidance Scale",
99
+ )
100
+ image2image_num_inference_step = gr.Slider(
101
+ minimum=1,
102
+ maximum=100,
103
+ step=1,
104
+ value=50,
105
+ label="Num Inference Step",
106
+ )
107
+ with gr.Row():
108
+ with gr.Column():
109
+ image2image_scheduler = gr.Dropdown(
110
+ choices=list(SCHEDULER_MAPPING.keys()),
111
+ value=list(SCHEDULER_MAPPING.keys())[0],
112
+ label="Scheduler",
113
+ )
114
+ image2image_num_images_per_prompt = gr.Slider(
115
+ minimum=1,
116
+ maximum=30,
117
+ step=1,
118
+ value=1,
119
+ label="Number Of Images",
120
+ )
121
+
122
+ image2image_seed_generator = gr.Slider(
123
+ minimum=0,
124
+ maximum=1000000,
125
+ step=1,
126
+ value=0,
127
+ label="Seed(0 for random)",
128
+ )
129
+
130
+ image2image_predict_button = gr.Button(value="Generator")
131
+
132
+ with gr.Column():
133
+ output_image = gr.Gallery(
134
+ label="Generated images",
135
+ show_label=False,
136
+ elem_id="gallery",
137
+ ).style(grid=(1, 2))
138
+
139
+ image2image_predict_button.click(
140
+ fn=StableDiffusionImage2ImageGenerator().generate_image,
141
+ inputs=[
142
+ image2image_image_file,
143
+ image2image_model_path,
144
+ image2image_prompt,
145
+ image2image_negative_prompt,
146
+ image2image_num_images_per_prompt,
147
+ image2image_scheduler,
148
+ image2image_guidance_scale,
149
+ image2image_num_inference_step,
150
+ image2image_seed_generator,
151
+ ],
152
+ outputs=[output_image],
153
+ )
diffusion_webui/diffusion_models/inpaint_app.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from diffusers import DiffusionPipeline
4
+
5
+ from diffusion_webui.utils.model_list import stable_inpiant_model_list
6
+
7
+
8
+ class StableDiffusionInpaintGenerator:
9
+ def __init__(self):
10
+ self.pipe = None
11
+
12
+ def load_model(self, model_path):
13
+ if self.pipe is None:
14
+ self.pipe = DiffusionPipeline.from_pretrained(
15
+ model_path, revision="fp16", torch_dtype=torch.float16
16
+ )
17
+
18
+ self.pipe.to("cuda")
19
+ self.pipe.enable_xformers_memory_efficient_attention()
20
+
21
+ return self.pipe
22
+
23
+ def generate_image(
24
+ self,
25
+ pil_image: str,
26
+ model_path: str,
27
+ prompt: str,
28
+ negative_prompt: str,
29
+ num_images_per_prompt: int,
30
+ guidance_scale: int,
31
+ num_inference_step: int,
32
+ seed_generator=0,
33
+ ):
34
+ image = pil_image["image"].convert("RGB").resize((512, 512))
35
+ mask_image = pil_image["mask"].convert("RGB").resize((512, 512))
36
+ pipe = self.load_model(model_path)
37
+
38
+ if seed_generator == 0:
39
+ random_seed = torch.randint(0, 1000000, (1,))
40
+ generator = torch.manual_seed(random_seed)
41
+ else:
42
+ generator = torch.manual_seed(seed_generator)
43
+
44
+ output = pipe(
45
+ prompt=prompt,
46
+ image=image,
47
+ mask_image=mask_image,
48
+ negative_prompt=negative_prompt,
49
+ num_images_per_prompt=num_images_per_prompt,
50
+ num_inference_steps=num_inference_step,
51
+ guidance_scale=guidance_scale,
52
+ generator=generator,
53
+ ).images
54
+
55
+ return output
56
+
57
+ def app():
58
+ with gr.Blocks():
59
+ with gr.Row():
60
+ with gr.Column():
61
+ stable_diffusion_inpaint_image_file = gr.Image(
62
+ source="upload",
63
+ tool="sketch",
64
+ elem_id="image_upload",
65
+ type="pil",
66
+ label="Upload",
67
+ ).style(height=260)
68
+
69
+ stable_diffusion_inpaint_prompt = gr.Textbox(
70
+ lines=1,
71
+ placeholder="Prompt",
72
+ show_label=False,
73
+ )
74
+
75
+ stable_diffusion_inpaint_negative_prompt = gr.Textbox(
76
+ lines=1,
77
+ placeholder="Negative Prompt",
78
+ show_label=False,
79
+ )
80
+ stable_diffusion_inpaint_model_id = gr.Dropdown(
81
+ choices=stable_inpiant_model_list,
82
+ value=stable_inpiant_model_list[0],
83
+ label="Inpaint Model Id",
84
+ )
85
+ with gr.Row():
86
+ with gr.Column():
87
+ stable_diffusion_inpaint_guidance_scale = gr.Slider(
88
+ minimum=0.1,
89
+ maximum=15,
90
+ step=0.1,
91
+ value=7.5,
92
+ label="Guidance Scale",
93
+ )
94
+
95
+ stable_diffusion_inpaint_num_inference_step = (
96
+ gr.Slider(
97
+ minimum=1,
98
+ maximum=100,
99
+ step=1,
100
+ value=50,
101
+ label="Num Inference Step",
102
+ )
103
+ )
104
+
105
+ with gr.Row():
106
+ with gr.Column():
107
+ stable_diffusion_inpiant_num_images_per_prompt = gr.Slider(
108
+ minimum=1,
109
+ maximum=10,
110
+ step=1,
111
+ value=1,
112
+ label="Number Of Images",
113
+ )
114
+ stable_diffusion_inpaint_seed_generator = (
115
+ gr.Slider(
116
+ minimum=0,
117
+ maximum=1000000,
118
+ step=1,
119
+ value=0,
120
+ label="Seed(0 for random)",
121
+ )
122
+ )
123
+
124
+ stable_diffusion_inpaint_predict = gr.Button(
125
+ value="Generator"
126
+ )
127
+
128
+ with gr.Column():
129
+ output_image = gr.Gallery(
130
+ label="Generated images",
131
+ show_label=False,
132
+ elem_id="gallery",
133
+ ).style(grid=(1, 2))
134
+
135
+ stable_diffusion_inpaint_predict.click(
136
+ fn=StableDiffusionInpaintGenerator().generate_image,
137
+ inputs=[
138
+ stable_diffusion_inpaint_image_file,
139
+ stable_diffusion_inpaint_model_id,
140
+ stable_diffusion_inpaint_prompt,
141
+ stable_diffusion_inpaint_negative_prompt,
142
+ stable_diffusion_inpiant_num_images_per_prompt,
143
+ stable_diffusion_inpaint_guidance_scale,
144
+ stable_diffusion_inpaint_num_inference_step,
145
+ stable_diffusion_inpaint_seed_generator,
146
+ ],
147
+ outputs=[output_image],
148
+ )
diffusion_webui/diffusion_models/text2img_app.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from diffusers import StableDiffusionPipeline
4
+
5
+ from diffusion_webui.utils.model_list import stable_model_list
6
+ from diffusion_webui.utils.scheduler_list import (
7
+ SCHEDULER_MAPPING,
8
+ get_scheduler,
9
+ )
10
+
11
+
12
+ class StableDiffusionText2ImageGenerator:
13
+ def __init__(self):
14
+ self.pipe = None
15
+
16
+ def load_model(
17
+ self,
18
+ model_path,
19
+ scheduler,
20
+ ):
21
+ if self.pipe is None:
22
+ self.pipe = StableDiffusionPipeline.from_pretrained(
23
+ model_path, safety_checker=None, torch_dtype=torch.float16
24
+ )
25
+
26
+ self.pipe = get_scheduler(pipe=self.pipe, scheduler=scheduler)
27
+ self.pipe.to("cuda")
28
+ self.pipe.enable_xformers_memory_efficient_attention()
29
+
30
+ return self.pipe
31
+
32
+ def generate_image(
33
+ self,
34
+ model_path: str,
35
+ prompt: str,
36
+ negative_prompt: str,
37
+ num_images_per_prompt: int,
38
+ scheduler: str,
39
+ guidance_scale: int,
40
+ num_inference_step: int,
41
+ height: int,
42
+ width: int,
43
+ seed_generator=0,
44
+ ):
45
+ pipe = self.load_model(
46
+ model_path=model_path,
47
+ scheduler=scheduler,
48
+ )
49
+ if seed_generator == 0:
50
+ random_seed = torch.randint(0, 1000000, (1,))
51
+ generator = torch.manual_seed(random_seed)
52
+ else:
53
+ generator = torch.manual_seed(seed_generator)
54
+
55
+ images = pipe(
56
+ prompt=prompt,
57
+ height=height,
58
+ width=width,
59
+ negative_prompt=negative_prompt,
60
+ num_images_per_prompt=num_images_per_prompt,
61
+ num_inference_steps=num_inference_step,
62
+ guidance_scale=guidance_scale,
63
+ generator=generator,
64
+ ).images
65
+
66
+ return images
67
+
68
+ def app():
69
+ with gr.Blocks():
70
+ with gr.Row():
71
+ with gr.Column():
72
+ text2image_prompt = gr.Textbox(
73
+ lines=1,
74
+ placeholder="Prompt",
75
+ show_label=False,
76
+ )
77
+
78
+ text2image_negative_prompt = gr.Textbox(
79
+ lines=1,
80
+ placeholder="Negative Prompt",
81
+ show_label=False,
82
+ )
83
+ with gr.Row():
84
+ with gr.Column():
85
+ text2image_model_path = gr.Dropdown(
86
+ choices=stable_model_list,
87
+ value=stable_model_list[0],
88
+ label="Text-Image Model Id",
89
+ )
90
+
91
+ text2image_guidance_scale = gr.Slider(
92
+ minimum=0.1,
93
+ maximum=15,
94
+ step=0.1,
95
+ value=7.5,
96
+ label="Guidance Scale",
97
+ )
98
+
99
+ text2image_num_inference_step = gr.Slider(
100
+ minimum=1,
101
+ maximum=100,
102
+ step=1,
103
+ value=50,
104
+ label="Num Inference Step",
105
+ )
106
+ text2image_num_images_per_prompt = gr.Slider(
107
+ minimum=1,
108
+ maximum=30,
109
+ step=1,
110
+ value=1,
111
+ label="Number Of Images",
112
+ )
113
+ with gr.Row():
114
+ with gr.Column():
115
+ text2image_scheduler = gr.Dropdown(
116
+ choices=list(SCHEDULER_MAPPING.keys()),
117
+ value=list(SCHEDULER_MAPPING.keys())[0],
118
+ label="Scheduler",
119
+ )
120
+
121
+ text2image_height = gr.Slider(
122
+ minimum=128,
123
+ maximum=1280,
124
+ step=32,
125
+ value=512,
126
+ label="Image Height",
127
+ )
128
+
129
+ text2image_width = gr.Slider(
130
+ minimum=128,
131
+ maximum=1280,
132
+ step=32,
133
+ value=512,
134
+ label="Image Width",
135
+ )
136
+ text2image_seed_generator = gr.Slider(
137
+ label="Seed(0 for random)",
138
+ minimum=0,
139
+ maximum=1000000,
140
+ value=0,
141
+ )
142
+ text2image_predict = gr.Button(value="Generator")
143
+
144
+ with gr.Column():
145
+ output_image = gr.Gallery(
146
+ label="Generated images",
147
+ show_label=False,
148
+ elem_id="gallery",
149
+ ).style(grid=(1, 2), height=200)
150
+
151
+ text2image_predict.click(
152
+ fn=StableDiffusionText2ImageGenerator().generate_image,
153
+ inputs=[
154
+ text2image_model_path,
155
+ text2image_prompt,
156
+ text2image_negative_prompt,
157
+ text2image_num_images_per_prompt,
158
+ text2image_scheduler,
159
+ text2image_guidance_scale,
160
+ text2image_num_inference_step,
161
+ text2image_height,
162
+ text2image_width,
163
+ text2image_seed_generator,
164
+ ],
165
+ outputs=output_image,
166
+ )
diffusion_webui/utils/__init__.py ADDED
File without changes
diffusion_webui/utils/data_utils.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+
3
+
4
+ def image_grid(imgs, rows, cols):
5
+ assert len(imgs) == rows * cols
6
+
7
+ w, h = imgs[0].size
8
+ grid = Image.new("RGB", size=(cols * w, rows * h))
9
+
10
+ for i, img in enumerate(imgs):
11
+ grid.paste(img, box=(i % cols * w, i // cols * h))
12
+ return grid
diffusion_webui/utils/model_list.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ stable_model_list = [
2
+ "runwayml/stable-diffusion-v1-5",
3
+ "stabilityai/stable-diffusion-2-1",
4
+ "prompthero/openjourney-v4",
5
+ "dreamlike-art/dreamlike-diffusion-1.0",
6
+ ]
7
+
8
+ stable_inpiant_model_list = [
9
+ "stabilityai/stable-diffusion-2-inpainting",
10
+ "runwayml/stable-diffusion-inpainting",
11
+ "saik0s/realistic_vision_inpainting",
12
+ ]
13
+
14
+ controlnet_model_list = [
15
+ #"lllyasviel/sd-controlnet-canny",
16
+ "lllyasviel/control_v11p_sd15_canny",
17
+ "thibaud/controlnet-sd21-canny-diffusers",
18
+ #"lllyasviel/sd-controlnet-depth",
19
+ "lllyasviel/control_v11f1p_sd15_depth",
20
+ "thibaud/controlnet-sd21-depth-diffusers",
21
+ #"lllyasviel/sd-controlnet-openpose",
22
+ "lllyasviel/control_v11p_sd15_openpose",
23
+ "thibaud/controlnet-sd21-openpose-diffusers",
24
+ #"lllyasviel/sd-controlnet-hed",
25
+ "thibaud/controlnet-sd21-hed-diffusers",
26
+ #"lllyasviel/sd-controlnet-scribble",
27
+ "lllyasviel/control_v11p_sd15_scribble",
28
+ "thibaud/controlnet-sd21-scribble-diffusers",
29
+ #"lllyasviel/sd-controlnet-mlsd",
30
+ "lllyasviel/control_v11p_sd15_mlsd",
31
+ "lllyasviel/control_v11e_sd15_shuffle",
32
+ "lllyasviel/control_v11e_sd15_ip2p",
33
+ "lllyasviel/control_v11p_sd15_lineart",
34
+ "lllyasviel/control_v11p_sd15s2_lineart_anime",
35
+ "lllyasviel/control_v11p_sd15_softedge",
36
+ ]
diffusion_webui/utils/preprocces_utils.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from controlnet_aux import (
2
+ CannyDetector,
3
+ ContentShuffleDetector,
4
+ HEDdetector,
5
+ LineartAnimeDetector,
6
+ LineartDetector,
7
+ MediapipeFaceDetector,
8
+ MidasDetector,
9
+ MLSDdetector,
10
+ NormalBaeDetector,
11
+ OpenposeDetector,
12
+ PidiNetDetector,
13
+ SamDetector,
14
+ ZoeDetector,
15
+ )
16
+
17
+ PREPROCCES_DICT = {
18
+ "Hed": HEDdetector.from_pretrained("lllyasviel/Annotators"),
19
+ "Midas": MidasDetector.from_pretrained("lllyasviel/Annotators"),
20
+ "MLSD": MLSDdetector.from_pretrained("lllyasviel/Annotators"),
21
+ "Openpose": OpenposeDetector.from_pretrained("lllyasviel/Annotators"),
22
+ "PidiNet": PidiNetDetector.from_pretrained("lllyasviel/Annotators"),
23
+ "NormalBae": NormalBaeDetector.from_pretrained("lllyasviel/Annotators"),
24
+ "Lineart": LineartDetector.from_pretrained("lllyasviel/Annotators"),
25
+ "LineartAnime": LineartAnimeDetector.from_pretrained(
26
+ "lllyasviel/Annotators"
27
+ ),
28
+ "Zoe": ZoeDetector.from_pretrained("lllyasviel/Annotators"),
29
+ "Canny": CannyDetector(),
30
+ "ContentShuffle": ContentShuffleDetector(),
31
+ "MediapipeFace": MediapipeFaceDetector(),
32
+ }
diffusion_webui/utils/scheduler_list.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import (
2
+ DDIMInverseScheduler,
3
+ DDIMScheduler,
4
+ DDPMScheduler,
5
+ DEISMultistepScheduler,
6
+ DPMSolverMultistepInverseScheduler,
7
+ DPMSolverMultistepScheduler,
8
+ DPMSolverSinglestepScheduler,
9
+ EulerAncestralDiscreteScheduler,
10
+ EulerDiscreteScheduler,
11
+ HeunDiscreteScheduler,
12
+ IPNDMScheduler,
13
+ KarrasVeScheduler,
14
+ KDPM2AncestralDiscreteScheduler,
15
+ KDPM2DiscreteScheduler,
16
+ PNDMScheduler,
17
+ RePaintScheduler,
18
+ SchedulerMixin,
19
+ ScoreSdeVeScheduler,
20
+ UnCLIPScheduler,
21
+ UniPCMultistepScheduler,
22
+ VQDiffusionScheduler,
23
+ )
24
+
25
+ SCHEDULER_MAPPING = {
26
+ "DDIM": DDIMScheduler,
27
+ "DDIMInverse": DDIMInverseScheduler,
28
+ "DDPMScheduler": DDPMScheduler,
29
+ "DEISMultistep": DEISMultistepScheduler,
30
+ "DPMSolverMultistepInverse": DPMSolverMultistepInverseScheduler,
31
+ "DPMSolverMultistep": DPMSolverMultistepScheduler,
32
+ "DPMSolverSinglestep": DPMSolverSinglestepScheduler,
33
+ "EulerAncestralDiscrete": EulerAncestralDiscreteScheduler,
34
+ "EulerDiscrete": EulerDiscreteScheduler,
35
+ "HeunDiscrete": HeunDiscreteScheduler,
36
+ "IPNDMScheduler": IPNDMScheduler,
37
+ "KarrasVe": KarrasVeScheduler,
38
+ "KDPM2AncestralDiscrete": KDPM2AncestralDiscreteScheduler,
39
+ "KDPM2Discrete": KDPM2DiscreteScheduler,
40
+ "PNDMScheduler": PNDMScheduler,
41
+ "RePaint": RePaintScheduler,
42
+ "ScoreSdeVe": ScoreSdeVeScheduler,
43
+ "UnCLIP": UnCLIPScheduler,
44
+ "UniPCMultistep": UniPCMultistepScheduler,
45
+ "VQDiffusion": VQDiffusionScheduler,
46
+ }
47
+
48
+
49
+ def get_scheduler(pipe, scheduler):
50
+ if scheduler in SCHEDULER_MAPPING:
51
+ SchedulerClass = SCHEDULER_MAPPING[scheduler]
52
+ pipe.scheduler = SchedulerClass.from_config(pipe.scheduler.config)
53
+ else:
54
+ raise ValueError(f"Invalid scheduler name {scheduler}")
55
+
56
+ return pipe
requirements.txt CHANGED
@@ -1,8 +1,9 @@
1
  transformers
2
  bitsandbytes==0.35.0
3
  xformers
4
- controlnet_aux==0.0.3
5
- diffusers
6
  imageio
7
- triton
8
- codeformer-pip
 
 
1
  transformers
2
  bitsandbytes==0.35.0
3
  xformers
4
+ controlnet_aux
5
+ git+https://github.com/huggingface/diffusers
6
  imageio
7
+ gradio
8
+ controlnet_aux
9
+ mediapipe