kadirnar commited on
Commit
5a1997f
1 Parent(s): e1e884a

Upload 36 files

Browse files
app.py CHANGED
@@ -12,14 +12,19 @@ from diffusion_webui import (
12
  StableDiffusionControlNetInpaintPoseGenerator,
13
  StableDiffusionControlNetInpaintScribbleGenerator,
14
  StableDiffusionControlNetInpaintSegGenerator,
 
 
15
  StableDiffusionControlNetMLSDGenerator,
 
 
16
  StableDiffusionControlNetPoseGenerator,
17
  StableDiffusionControlNetScribbleGenerator,
18
  StableDiffusionControlNetSegGenerator,
 
 
19
  StableDiffusionImage2ImageGenerator,
20
  StableDiffusionInpaintGenerator,
21
  StableDiffusionText2ImageGenerator,
22
- StableDiffusionControlNetNormalGenerator,
23
  )
24
 
25
 
@@ -51,6 +56,16 @@ def diffusion_app():
51
  StableDiffusionControlNetNormalGenerator.app()
52
  with gr.Tab("Seg"):
53
  StableDiffusionControlNetSegGenerator.app()
 
 
 
 
 
 
 
 
 
 
54
  with gr.Tab("ControlNet Inpaint"):
55
  with gr.Tab("Canny"):
56
  StableDiffusionControlNetInpaintCannyGenerator.app()
 
12
  StableDiffusionControlNetInpaintPoseGenerator,
13
  StableDiffusionControlNetInpaintScribbleGenerator,
14
  StableDiffusionControlNetInpaintSegGenerator,
15
+ StableDiffusionControlNetLineArtAnimeGenerator,
16
+ StableDiffusionControlNetLineArtGenerator,
17
  StableDiffusionControlNetMLSDGenerator,
18
+ StableDiffusionControlNetNormalGenerator,
19
+ StableDiffusionControlNetPix2PixGenerator,
20
  StableDiffusionControlNetPoseGenerator,
21
  StableDiffusionControlNetScribbleGenerator,
22
  StableDiffusionControlNetSegGenerator,
23
+ StableDiffusionControlNetShuffleGenerator,
24
+ StableDiffusionControlNetSoftEdgeGenerator,
25
  StableDiffusionImage2ImageGenerator,
26
  StableDiffusionInpaintGenerator,
27
  StableDiffusionText2ImageGenerator,
 
28
  )
29
 
30
 
 
56
  StableDiffusionControlNetNormalGenerator.app()
57
  with gr.Tab("Seg"):
58
  StableDiffusionControlNetSegGenerator.app()
59
+ with gr.Tab("Shuffle"):
60
+ StableDiffusionControlNetShuffleGenerator.app()
61
+ with gr.Tab("Pix2Pix"):
62
+ StableDiffusionControlNetPix2PixGenerator.app()
63
+ with gr.Tab("LineArt"):
64
+ StableDiffusionControlNetLineArtGenerator.app()
65
+ with gr.Tab("LineArtAnime"):
66
+ StableDiffusionControlNetLineArtAnimeGenerator.app()
67
+ with gr.Tab("SoftEdge"):
68
+ StableDiffusionControlNetSoftEdgeGenerator.app()
69
  with gr.Tab("ControlNet Inpaint"):
70
  with gr.Tab("Canny"):
71
  StableDiffusionControlNetInpaintCannyGenerator.app()
diffusion_webui/__init__.py CHANGED
@@ -1,29 +1,32 @@
1
- from diffusion_webui.diffusion_models.stable_diffusion import (
2
- StableDiffusionText2ImageGenerator,
3
- StableDiffusionImage2ImageGenerator,
4
- StableDiffusionInpaintGenerator,
5
- )
6
-
7
- from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint import (
8
- StableDiffusionControlNetInpaintCannyGenerator,
9
- StableDiffusionControlInpaintNetDepthGenerator,
10
- StableDiffusionControlNetInpaintHedGenerator,
11
- StableDiffusionControlNetInpaintMlsdGenerator,
12
- StableDiffusionControlNetInpaintPoseGenerator,
13
- StableDiffusionControlNetInpaintScribbleGenerator,
14
- StableDiffusionControlNetInpaintSegGenerator,
15
- )
16
-
17
  from diffusion_webui.diffusion_models.controlnet import (
18
  StableDiffusionControlNetCannyGenerator,
19
  StableDiffusionControlNetDepthGenerator,
20
  StableDiffusionControlNetHEDGenerator,
 
 
21
  StableDiffusionControlNetMLSDGenerator,
22
  StableDiffusionControlNetNormalGenerator,
 
23
  StableDiffusionControlNetPoseGenerator,
24
  StableDiffusionControlNetScribbleGenerator,
25
  StableDiffusionControlNetSegGenerator,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  )
27
  from diffusion_webui.upscaler_models import CodeformerUpscalerGenerator
28
 
29
- __version__ = "2.2.0"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  from diffusion_webui.diffusion_models.controlnet import (
2
  StableDiffusionControlNetCannyGenerator,
3
  StableDiffusionControlNetDepthGenerator,
4
  StableDiffusionControlNetHEDGenerator,
5
+ StableDiffusionControlNetLineArtAnimeGenerator,
6
+ StableDiffusionControlNetLineArtGenerator,
7
  StableDiffusionControlNetMLSDGenerator,
8
  StableDiffusionControlNetNormalGenerator,
9
+ StableDiffusionControlNetPix2PixGenerator,
10
  StableDiffusionControlNetPoseGenerator,
11
  StableDiffusionControlNetScribbleGenerator,
12
  StableDiffusionControlNetSegGenerator,
13
+ StableDiffusionControlNetShuffleGenerator,
14
+ StableDiffusionControlNetSoftEdgeGenerator,
15
+ )
16
+ from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint import (
17
+ StableDiffusionControlInpaintNetDepthGenerator,
18
+ StableDiffusionControlNetInpaintCannyGenerator,
19
+ StableDiffusionControlNetInpaintHedGenerator,
20
+ StableDiffusionControlNetInpaintMlsdGenerator,
21
+ StableDiffusionControlNetInpaintPoseGenerator,
22
+ StableDiffusionControlNetInpaintScribbleGenerator,
23
+ StableDiffusionControlNetInpaintSegGenerator,
24
+ )
25
+ from diffusion_webui.diffusion_models.stable_diffusion import (
26
+ StableDiffusionImage2ImageGenerator,
27
+ StableDiffusionInpaintGenerator,
28
+ StableDiffusionText2ImageGenerator,
29
  )
30
  from diffusion_webui.upscaler_models import CodeformerUpscalerGenerator
31
 
32
+ __version__ = "2.4.0"
diffusion_webui/diffusion_models/controlnet/__init__.py CHANGED
@@ -1,8 +1,39 @@
1
- from diffusion_webui.diffusion_models.controlnet.controlnet_canny import StableDiffusionControlNetCannyGenerator
2
- from diffusion_webui.diffusion_models.controlnet.controlnet_depth import StableDiffusionControlNetDepthGenerator
3
- from diffusion_webui.diffusion_models.controlnet.controlnet_hed import StableDiffusionControlNetHEDGenerator
4
- from diffusion_webui.diffusion_models.controlnet.controlnet_mlsd import StableDiffusionControlNetMLSDGenerator
5
- from diffusion_webui.diffusion_models.controlnet.controlnet_normal import StableDiffusionControlNetNormalGenerator
6
- from diffusion_webui.diffusion_models.controlnet.controlnet_pose import StableDiffusionControlNetPoseGenerator
7
- from diffusion_webui.diffusion_models.controlnet.controlnet_scribble import StableDiffusionControlNetScribbleGenerator
8
- from diffusion_webui.diffusion_models.controlnet.controlnet_seg import StableDiffusionControlNetSegGenerator
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusion_webui.diffusion_models.controlnet.controlnet_canny import (
2
+ StableDiffusionControlNetCannyGenerator,
3
+ )
4
+ from diffusion_webui.diffusion_models.controlnet.controlnet_depth import (
5
+ StableDiffusionControlNetDepthGenerator,
6
+ )
7
+ from diffusion_webui.diffusion_models.controlnet.controlnet_hed import (
8
+ StableDiffusionControlNetHEDGenerator,
9
+ )
10
+ from diffusion_webui.diffusion_models.controlnet.controlnet_lineart import (
11
+ StableDiffusionControlNetLineArtGenerator,
12
+ )
13
+ from diffusion_webui.diffusion_models.controlnet.controlnet_lineart_anime import (
14
+ StableDiffusionControlNetLineArtAnimeGenerator,
15
+ )
16
+ from diffusion_webui.diffusion_models.controlnet.controlnet_mlsd import (
17
+ StableDiffusionControlNetMLSDGenerator,
18
+ )
19
+ from diffusion_webui.diffusion_models.controlnet.controlnet_normal import (
20
+ StableDiffusionControlNetNormalGenerator,
21
+ )
22
+ from diffusion_webui.diffusion_models.controlnet.controlnet_pix2pix import (
23
+ StableDiffusionControlNetPix2PixGenerator,
24
+ )
25
+ from diffusion_webui.diffusion_models.controlnet.controlnet_pose import (
26
+ StableDiffusionControlNetPoseGenerator,
27
+ )
28
+ from diffusion_webui.diffusion_models.controlnet.controlnet_scribble import (
29
+ StableDiffusionControlNetScribbleGenerator,
30
+ )
31
+ from diffusion_webui.diffusion_models.controlnet.controlnet_seg import (
32
+ StableDiffusionControlNetSegGenerator,
33
+ )
34
+ from diffusion_webui.diffusion_models.controlnet.controlnet_shuffle import (
35
+ StableDiffusionControlNetShuffleGenerator,
36
+ )
37
+ from diffusion_webui.diffusion_models.controlnet.controlnet_softedge import (
38
+ StableDiffusionControlNetSoftEdgeGenerator,
39
+ )
diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/__init__.py CHANGED
@@ -1,7 +1,21 @@
1
- from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.controlnet_inpaint_canny import StableDiffusionControlNetInpaintCannyGenerator
2
- from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.controlnet_inpaint_depth import StableDiffusionControlInpaintNetDepthGenerator
3
- from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.controlnet_inpaint_hed import StableDiffusionControlNetInpaintHedGenerator
4
- from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.controlnet_inpaint_mlsd import StableDiffusionControlNetInpaintMlsdGenerator
5
- from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.controlnet_inpaint_pose import StableDiffusionControlNetInpaintPoseGenerator
6
- from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.controlnet_inpaint_scribble import StableDiffusionControlNetInpaintScribbleGenerator
7
- from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.controlnet_inpaint_seg import StableDiffusionControlNetInpaintSegGenerator
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.controlnet_inpaint_canny import (
2
+ StableDiffusionControlNetInpaintCannyGenerator,
3
+ )
4
+ from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.controlnet_inpaint_depth import (
5
+ StableDiffusionControlInpaintNetDepthGenerator,
6
+ )
7
+ from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.controlnet_inpaint_hed import (
8
+ StableDiffusionControlNetInpaintHedGenerator,
9
+ )
10
+ from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.controlnet_inpaint_mlsd import (
11
+ StableDiffusionControlNetInpaintMlsdGenerator,
12
+ )
13
+ from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.controlnet_inpaint_pose import (
14
+ StableDiffusionControlNetInpaintPoseGenerator,
15
+ )
16
+ from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.controlnet_inpaint_scribble import (
17
+ StableDiffusionControlNetInpaintScribbleGenerator,
18
+ )
19
+ from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.controlnet_inpaint_seg import (
20
+ StableDiffusionControlNetInpaintSegGenerator,
21
+ )
diffusion_webui/diffusion_models/controlnet/controlnet_lineart.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from controlnet_aux import LineartDetector
4
+ from diffusers import ControlNetModel, StableDiffusionControlNetPipeline
5
+ from diffusers.utils import load_image
6
+
7
+ from diffusion_webui.utils.model_list import (
8
+ controlnet_lineart_model_list,
9
+ stable_model_list,
10
+ )
11
+ from diffusion_webui.utils.scheduler_list import (
12
+ SCHEDULER_LIST,
13
+ get_scheduler_list,
14
+ )
15
+
16
+
17
+ class StableDiffusionControlNetLineArtGenerator:
18
+ def __init__(self):
19
+ self.pipe = None
20
+
21
+ def load_model(self, stable_model_path, controlnet_model_path, scheduler):
22
+ if self.pipe is None:
23
+ controlnet = ControlNetModel.from_pretrained(
24
+ controlnet_model_path, torch_dtype=torch.float16
25
+ )
26
+ self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
27
+ pretrained_model_name_or_path=stable_model_path,
28
+ controlnet=controlnet,
29
+ safety_checker=None,
30
+ torch_dtype=torch.float16,
31
+ )
32
+
33
+ self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
34
+ self.pipe.to("cuda")
35
+ self.pipe.enable_xformers_memory_efficient_attention()
36
+
37
+ return self.pipe
38
+
39
+ def controlnet_lineart(
40
+ self,
41
+ image_path: str,
42
+ ):
43
+ image = load_image(image_path)
44
+ image = image.resize((512, 512))
45
+ processor = LineartDetector.from_pretrained("lllyasviel/Annotators")
46
+ control_image = processor(image)
47
+ return control_image
48
+
49
+ def generate_image(
50
+ self,
51
+ image_path: str,
52
+ stable_model_path: str,
53
+ controlnet_model_path: str,
54
+ prompt: str,
55
+ negative_prompt: str,
56
+ num_images_per_prompt: int,
57
+ guidance_scale: int,
58
+ num_inference_step: int,
59
+ scheduler: str,
60
+ seed_generator: int,
61
+ ):
62
+ pipe = self.load_model(
63
+ stable_model_path=stable_model_path,
64
+ controlnet_model_path=controlnet_model_path,
65
+ scheduler=scheduler,
66
+ )
67
+
68
+ image = self.controlnet_lineart(image_path)
69
+
70
+ if seed_generator == 0:
71
+ random_seed = torch.randint(0, 1000000, (1,))
72
+ generator = torch.manual_seed(random_seed)
73
+ else:
74
+ generator = torch.manual_seed(seed_generator)
75
+
76
+ output = pipe(
77
+ prompt=prompt,
78
+ image=image,
79
+ negative_prompt=negative_prompt,
80
+ num_images_per_prompt=num_images_per_prompt,
81
+ num_inference_steps=num_inference_step,
82
+ guidance_scale=guidance_scale,
83
+ generator=generator,
84
+ ).images
85
+
86
+ return output
87
+
88
+ def app():
89
+ with gr.Blocks():
90
+ with gr.Row():
91
+ with gr.Column():
92
+ controlnet_canny_image_file = gr.Image(
93
+ type="filepath", label="Image"
94
+ )
95
+
96
+ controlnet_canny_prompt = gr.Textbox(
97
+ lines=1,
98
+ placeholder="Prompt",
99
+ show_label=False,
100
+ )
101
+
102
+ controlnet_canny_negative_prompt = gr.Textbox(
103
+ lines=1,
104
+ placeholder="Negative Prompt",
105
+ show_label=False,
106
+ )
107
+ with gr.Row():
108
+ with gr.Column():
109
+ controlnet_canny_stable_model_id = gr.Dropdown(
110
+ choices=stable_model_list,
111
+ value=stable_model_list[0],
112
+ label="Stable Model Id",
113
+ )
114
+
115
+ controlnet_canny_guidance_scale = gr.Slider(
116
+ minimum=0.1,
117
+ maximum=15,
118
+ step=0.1,
119
+ value=7.5,
120
+ label="Guidance Scale",
121
+ )
122
+ controlnet_canny_num_inference_step = gr.Slider(
123
+ minimum=1,
124
+ maximum=100,
125
+ step=1,
126
+ value=50,
127
+ label="Num Inference Step",
128
+ )
129
+ controlnet_canny_num_images_per_prompt = gr.Slider(
130
+ minimum=1,
131
+ maximum=10,
132
+ step=1,
133
+ value=1,
134
+ label="Number Of Images",
135
+ )
136
+ with gr.Row():
137
+ with gr.Column():
138
+ controlnet_canny_model_id = gr.Dropdown(
139
+ choices=controlnet_lineart_model_list,
140
+ value=controlnet_lineart_model_list[0],
141
+ label="ControlNet Model Id",
142
+ )
143
+
144
+ controlnet_canny_scheduler = gr.Dropdown(
145
+ choices=SCHEDULER_LIST,
146
+ value=SCHEDULER_LIST[0],
147
+ label="Scheduler",
148
+ )
149
+
150
+ controlnet_canny_seed_generator = gr.Number(
151
+ value=0,
152
+ label="Seed Generator",
153
+ )
154
+ controlnet_canny_predict = gr.Button(value="Generator")
155
+
156
+ with gr.Column():
157
+ output_image = gr.Gallery(
158
+ label="Generated images",
159
+ show_label=False,
160
+ elem_id="gallery",
161
+ ).style(grid=(1, 2))
162
+
163
+ controlnet_canny_predict.click(
164
+ fn=StableDiffusionControlNetLineArtGenerator().generate_image,
165
+ inputs=[
166
+ controlnet_canny_image_file,
167
+ controlnet_canny_stable_model_id,
168
+ controlnet_canny_model_id,
169
+ controlnet_canny_prompt,
170
+ controlnet_canny_negative_prompt,
171
+ controlnet_canny_num_images_per_prompt,
172
+ controlnet_canny_guidance_scale,
173
+ controlnet_canny_num_inference_step,
174
+ controlnet_canny_scheduler,
175
+ controlnet_canny_seed_generator,
176
+ ],
177
+ outputs=[output_image],
178
+ )
diffusion_webui/diffusion_models/controlnet/controlnet_lineart_anime.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from controlnet_aux import LineartAnimeDetector
4
+ from diffusers import ControlNetModel, StableDiffusionControlNetPipeline
5
+ from diffusers.utils import load_image
6
+ from transformers import CLIPTextModel
7
+
8
+ from diffusion_webui.utils.model_list import (
9
+ controlnet_lineart_anime_model_list,
10
+ stable_model_list,
11
+ )
12
+ from diffusion_webui.utils.scheduler_list import (
13
+ SCHEDULER_LIST,
14
+ get_scheduler_list,
15
+ )
16
+
17
+
18
+ class StableDiffusionControlNetLineArtAnimeGenerator:
19
+ def __init__(self):
20
+ self.pipe = None
21
+
22
+ def load_model(self, stable_model_path, controlnet_model_path, scheduler):
23
+ if self.pipe is None:
24
+ text_encoder = CLIPTextModel.from_pretrained(
25
+ stable_model_path,
26
+ subfolder="text_encoder",
27
+ num_hidden_layers=11,
28
+ torch_dtype=torch.float16,
29
+ )
30
+
31
+ controlnet = ControlNetModel.from_pretrained(
32
+ controlnet_model_path, torch_dtype=torch.float16
33
+ )
34
+ self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
35
+ pretrained_model_name_or_path=stable_model_path,
36
+ text_encoder=text_encoder,
37
+ controlnet=controlnet,
38
+ safety_checker=None,
39
+ torch_dtype=torch.float16,
40
+ )
41
+
42
+ self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
43
+ self.pipe.to("cuda")
44
+ self.pipe.enable_xformers_memory_efficient_attention()
45
+
46
+ return self.pipe
47
+
48
+ def controlnet_lineart_anime(
49
+ self,
50
+ image_path: str,
51
+ ):
52
+ image = load_image(image_path)
53
+ image = image.resize((512, 512))
54
+ processor = LineartAnimeDetector.from_pretrained(
55
+ "lllyasviel/Annotators"
56
+ )
57
+ control_image = processor(image)
58
+ return control_image
59
+
60
+ def generate_image(
61
+ self,
62
+ image_path: str,
63
+ stable_model_path: str,
64
+ controlnet_model_path: str,
65
+ prompt: str,
66
+ negative_prompt: str,
67
+ num_images_per_prompt: int,
68
+ guidance_scale: int,
69
+ num_inference_step: int,
70
+ scheduler: str,
71
+ seed_generator: int,
72
+ ):
73
+ pipe = self.load_model(
74
+ stable_model_path=stable_model_path,
75
+ controlnet_model_path=controlnet_model_path,
76
+ scheduler=scheduler,
77
+ )
78
+
79
+ image = self.controlnet_lineart_anime(image_path)
80
+
81
+ if seed_generator == 0:
82
+ random_seed = torch.randint(0, 1000000, (1,))
83
+ generator = torch.manual_seed(random_seed)
84
+ else:
85
+ generator = torch.manual_seed(seed_generator)
86
+
87
+ output = pipe(
88
+ prompt=prompt,
89
+ image=image,
90
+ negative_prompt=negative_prompt,
91
+ num_images_per_prompt=num_images_per_prompt,
92
+ num_inference_steps=num_inference_step,
93
+ guidance_scale=guidance_scale,
94
+ generator=generator,
95
+ ).images
96
+
97
+ return output
98
+
99
+ def app():
100
+ with gr.Blocks():
101
+ with gr.Row():
102
+ with gr.Column():
103
+ controlnet_canny_image_file = gr.Image(
104
+ type="filepath", label="Image"
105
+ )
106
+
107
+ controlnet_canny_prompt = gr.Textbox(
108
+ lines=1,
109
+ placeholder="Prompt",
110
+ show_label=False,
111
+ )
112
+
113
+ controlnet_canny_negative_prompt = gr.Textbox(
114
+ lines=1,
115
+ placeholder="Negative Prompt",
116
+ show_label=False,
117
+ )
118
+ with gr.Row():
119
+ with gr.Column():
120
+ controlnet_canny_stable_model_id = gr.Dropdown(
121
+ choices=stable_model_list,
122
+ value=stable_model_list[0],
123
+ label="Stable Model Id",
124
+ )
125
+
126
+ controlnet_canny_guidance_scale = gr.Slider(
127
+ minimum=0.1,
128
+ maximum=15,
129
+ step=0.1,
130
+ value=7.5,
131
+ label="Guidance Scale",
132
+ )
133
+ controlnet_canny_num_inference_step = gr.Slider(
134
+ minimum=1,
135
+ maximum=100,
136
+ step=1,
137
+ value=50,
138
+ label="Num Inference Step",
139
+ )
140
+ controlnet_canny_num_images_per_prompt = gr.Slider(
141
+ minimum=1,
142
+ maximum=10,
143
+ step=1,
144
+ value=1,
145
+ label="Number Of Images",
146
+ )
147
+ with gr.Row():
148
+ with gr.Column():
149
+ controlnet_canny_model_id = gr.Dropdown(
150
+ choices=controlnet_lineart_anime_model_list,
151
+ value=controlnet_lineart_anime_model_list[
152
+ 0
153
+ ],
154
+ label="ControlNet Model Id",
155
+ )
156
+
157
+ controlnet_canny_scheduler = gr.Dropdown(
158
+ choices=SCHEDULER_LIST,
159
+ value=SCHEDULER_LIST[0],
160
+ label="Scheduler",
161
+ )
162
+
163
+ controlnet_canny_seed_generator = gr.Number(
164
+ value=0,
165
+ label="Seed Generator",
166
+ )
167
+ controlnet_canny_predict = gr.Button(value="Generator")
168
+
169
+ with gr.Column():
170
+ output_image = gr.Gallery(
171
+ label="Generated images",
172
+ show_label=False,
173
+ elem_id="gallery",
174
+ ).style(grid=(1, 2))
175
+
176
+ controlnet_canny_predict.click(
177
+ fn=StableDiffusionControlNetLineArtAnimeGenerator().generate_image,
178
+ inputs=[
179
+ controlnet_canny_image_file,
180
+ controlnet_canny_stable_model_id,
181
+ controlnet_canny_model_id,
182
+ controlnet_canny_prompt,
183
+ controlnet_canny_negative_prompt,
184
+ controlnet_canny_num_images_per_prompt,
185
+ controlnet_canny_guidance_scale,
186
+ controlnet_canny_num_inference_step,
187
+ controlnet_canny_scheduler,
188
+ controlnet_canny_seed_generator,
189
+ ],
190
+ outputs=[output_image],
191
+ )
diffusion_webui/diffusion_models/controlnet/controlnet_mlsd.py CHANGED
@@ -4,7 +4,10 @@ from controlnet_aux import MLSDdetector
4
  from diffusers import ControlNetModel, StableDiffusionControlNetPipeline
5
  from PIL import Image
6
 
7
- from diffusion_webui.utils.model_list import stable_model_list, controlnet_mlsd_model_list
 
 
 
8
  from diffusion_webui.utils.scheduler_list import (
9
  SCHEDULER_LIST,
10
  get_scheduler_list,
@@ -125,11 +128,12 @@ class StableDiffusionControlNetMLSDGenerator:
125
 
126
  with gr.Row():
127
  with gr.Column():
128
- controlnet_mlsd_controlnet_model_id = gr.Dropdown(
129
- choices=controlnet_mlsd_model_list,
130
- value=controlnet_mlsd_model_list[0],
131
- label="ControlNet Model Id",
132
-
 
133
  )
134
  controlnet_mlsd_scheduler = gr.Dropdown(
135
  choices=SCHEDULER_LIST,
 
4
  from diffusers import ControlNetModel, StableDiffusionControlNetPipeline
5
  from PIL import Image
6
 
7
+ from diffusion_webui.utils.model_list import (
8
+ controlnet_mlsd_model_list,
9
+ stable_model_list,
10
+ )
11
  from diffusion_webui.utils.scheduler_list import (
12
  SCHEDULER_LIST,
13
  get_scheduler_list,
 
128
 
129
  with gr.Row():
130
  with gr.Column():
131
+ controlnet_mlsd_controlnet_model_id = (
132
+ gr.Dropdown(
133
+ choices=controlnet_mlsd_model_list,
134
+ value=controlnet_mlsd_model_list[0],
135
+ label="ControlNet Model Id",
136
+ )
137
  )
138
  controlnet_mlsd_scheduler = gr.Dropdown(
139
  choices=SCHEDULER_LIST,
diffusion_webui/diffusion_models/controlnet/controlnet_normal.py CHANGED
@@ -1,12 +1,11 @@
1
- from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
2
- from diffusers.utils import load_image
3
- from transformers import pipeline
4
- from PIL import Image
5
  import gradio as gr
6
  import numpy as np
7
  import torch
8
- import cv2
9
-
 
 
10
 
11
  from diffusion_webui.utils.model_list import (
12
  controlnet_normal_model_list,
@@ -45,8 +44,10 @@ class StableDiffusionControlNetNormalGenerator:
45
  image_path: str,
46
  ):
47
  image = load_image(image_path).convert("RGB")
48
- depth_estimator = pipeline("depth-estimation", model ="Intel/dpt-hybrid-midas" )
49
- image = depth_estimator(image)['predicted_depth'][0]
 
 
50
  image = image.numpy()
51
  image_depth = image.copy()
52
  image_depth -= np.min(image_depth)
@@ -76,7 +77,9 @@ class StableDiffusionControlNetNormalGenerator:
76
  scheduler: str,
77
  seed_generator: int,
78
  ):
79
- pipe = self.load_model(stable_model_path, controlnet_model_path, scheduler)
 
 
80
  image = self.controlnet_normal(image_path)
81
 
82
  if seed_generator == 0:
@@ -84,7 +87,7 @@ class StableDiffusionControlNetNormalGenerator:
84
  generator = torch.manual_seed(random_seed)
85
  else:
86
  generator = torch.manual_seed(seed_generator)
87
-
88
  output = pipe(
89
  prompt=prompt,
90
  image=image,
 
1
+ import cv2
 
 
 
2
  import gradio as gr
3
  import numpy as np
4
  import torch
5
+ from diffusers import ControlNetModel, StableDiffusionControlNetPipeline
6
+ from diffusers.utils import load_image
7
+ from PIL import Image
8
+ from transformers import pipeline
9
 
10
  from diffusion_webui.utils.model_list import (
11
  controlnet_normal_model_list,
 
44
  image_path: str,
45
  ):
46
  image = load_image(image_path).convert("RGB")
47
+ depth_estimator = pipeline(
48
+ "depth-estimation", model="Intel/dpt-hybrid-midas"
49
+ )
50
+ image = depth_estimator(image)["predicted_depth"][0]
51
  image = image.numpy()
52
  image_depth = image.copy()
53
  image_depth -= np.min(image_depth)
 
77
  scheduler: str,
78
  seed_generator: int,
79
  ):
80
+ pipe = self.load_model(
81
+ stable_model_path, controlnet_model_path, scheduler
82
+ )
83
  image = self.controlnet_normal(image_path)
84
 
85
  if seed_generator == 0:
 
87
  generator = torch.manual_seed(random_seed)
88
  else:
89
  generator = torch.manual_seed(seed_generator)
90
+
91
  output = pipe(
92
  prompt=prompt,
93
  image=image,
diffusion_webui/diffusion_models/controlnet/controlnet_pix2pix.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from diffusers import ControlNetModel, StableDiffusionControlNetPipeline
4
+ from diffusers.utils import load_image
5
+
6
+ from diffusion_webui.utils.model_list import (
7
+ controlnet_lineart_model_list,
8
+ stable_model_list,
9
+ )
10
+ from diffusion_webui.utils.scheduler_list import (
11
+ SCHEDULER_LIST,
12
+ get_scheduler_list,
13
+ )
14
+
15
+
16
+ class StableDiffusionControlNetPix2PixGenerator:
17
+ def __init__(self):
18
+ self.pipe = None
19
+
20
+ def load_model(self, stable_model_path, controlnet_model_path, scheduler):
21
+ if self.pipe is None:
22
+ controlnet = ControlNetModel.from_pretrained(
23
+ controlnet_model_path, torch_dtype=torch.float16
24
+ )
25
+ self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
26
+ pretrained_model_name_or_path=stable_model_path,
27
+ controlnet=controlnet,
28
+ safety_checker=None,
29
+ torch_dtype=torch.float16,
30
+ )
31
+
32
+ self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
33
+ self.pipe.to("cuda")
34
+ self.pipe.enable_xformers_memory_efficient_attention()
35
+
36
+ return self.pipe
37
+
38
+ def controlnet_pix2pix(
39
+ self,
40
+ image_path: str,
41
+ ):
42
+ control_image = load_image(image_path).convert("RGB")
43
+ return control_image
44
+
45
+ def generate_image(
46
+ self,
47
+ image_path: str,
48
+ stable_model_path: str,
49
+ controlnet_model_path: str,
50
+ prompt: str,
51
+ negative_prompt: str,
52
+ num_images_per_prompt: int,
53
+ guidance_scale: int,
54
+ num_inference_step: int,
55
+ scheduler: str,
56
+ seed_generator: int,
57
+ ):
58
+ pipe = self.load_model(
59
+ stable_model_path=stable_model_path,
60
+ controlnet_model_path=controlnet_model_path,
61
+ scheduler=scheduler,
62
+ )
63
+
64
+ image = self.controlnet_pix2pix(image_path)
65
+
66
+ if seed_generator == 0:
67
+ random_seed = torch.randint(0, 1000000, (1,))
68
+ generator = torch.manual_seed(random_seed)
69
+ else:
70
+ generator = torch.manual_seed(seed_generator)
71
+
72
+ output = pipe(
73
+ prompt=prompt,
74
+ image=image,
75
+ negative_prompt=negative_prompt,
76
+ num_images_per_prompt=num_images_per_prompt,
77
+ num_inference_steps=num_inference_step,
78
+ guidance_scale=guidance_scale,
79
+ generator=generator,
80
+ ).images
81
+
82
+ return output
83
+
84
+ def app():
85
+ with gr.Blocks():
86
+ with gr.Row():
87
+ with gr.Column():
88
+ controlnet_canny_image_file = gr.Image(
89
+ type="filepath", label="Image"
90
+ )
91
+
92
+ controlnet_canny_prompt = gr.Textbox(
93
+ lines=1,
94
+ placeholder="Prompt",
95
+ show_label=False,
96
+ )
97
+
98
+ controlnet_canny_negative_prompt = gr.Textbox(
99
+ lines=1,
100
+ placeholder="Negative Prompt",
101
+ show_label=False,
102
+ )
103
+ with gr.Row():
104
+ with gr.Column():
105
+ controlnet_canny_stable_model_id = gr.Dropdown(
106
+ choices=stable_model_list,
107
+ value=stable_model_list[0],
108
+ label="Stable Model Id",
109
+ )
110
+
111
+ controlnet_canny_guidance_scale = gr.Slider(
112
+ minimum=0.1,
113
+ maximum=15,
114
+ step=0.1,
115
+ value=7.5,
116
+ label="Guidance Scale",
117
+ )
118
+ controlnet_canny_num_inference_step = gr.Slider(
119
+ minimum=1,
120
+ maximum=100,
121
+ step=1,
122
+ value=50,
123
+ label="Num Inference Step",
124
+ )
125
+ controlnet_canny_num_images_per_prompt = gr.Slider(
126
+ minimum=1,
127
+ maximum=10,
128
+ step=1,
129
+ value=1,
130
+ label="Number Of Images",
131
+ )
132
+ with gr.Row():
133
+ with gr.Column():
134
+ controlnet_canny_model_id = gr.Dropdown(
135
+ choices=controlnet_lineart_model_list,
136
+ value=controlnet_lineart_model_list[0],
137
+ label="ControlNet Model Id",
138
+ )
139
+
140
+ controlnet_canny_scheduler = gr.Dropdown(
141
+ choices=SCHEDULER_LIST,
142
+ value=SCHEDULER_LIST[0],
143
+ label="Scheduler",
144
+ )
145
+
146
+ controlnet_canny_seed_generator = gr.Number(
147
+ value=0,
148
+ label="Seed Generator",
149
+ )
150
+ controlnet_canny_predict = gr.Button(value="Generator")
151
+
152
+ with gr.Column():
153
+ output_image = gr.Gallery(
154
+ label="Generated images",
155
+ show_label=False,
156
+ elem_id="gallery",
157
+ ).style(grid=(1, 2))
158
+
159
+ controlnet_canny_predict.click(
160
+ fn=StableDiffusionControlNetPix2PixGenerator().generate_image,
161
+ inputs=[
162
+ controlnet_canny_image_file,
163
+ controlnet_canny_stable_model_id,
164
+ controlnet_canny_model_id,
165
+ controlnet_canny_prompt,
166
+ controlnet_canny_negative_prompt,
167
+ controlnet_canny_num_images_per_prompt,
168
+ controlnet_canny_guidance_scale,
169
+ controlnet_canny_num_inference_step,
170
+ controlnet_canny_scheduler,
171
+ controlnet_canny_seed_generator,
172
+ ],
173
+ outputs=[output_image],
174
+ )
diffusion_webui/diffusion_models/controlnet/controlnet_shuffle.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from controlnet_aux import ContentShuffleDetector
4
+ from diffusers import ControlNetModel, StableDiffusionControlNetPipeline
5
+ from diffusers.utils import load_image
6
+
7
+ from diffusion_webui.utils.model_list import (
8
+ controlnet_shuffle_model_list,
9
+ stable_model_list,
10
+ )
11
+ from diffusion_webui.utils.scheduler_list import (
12
+ SCHEDULER_LIST,
13
+ get_scheduler_list,
14
+ )
15
+
16
+
17
+ class StableDiffusionControlNetShuffleGenerator:
18
+ def __init__(self):
19
+ self.pipe = None
20
+
21
+ def load_model(self, stable_model_path, controlnet_model_path, scheduler):
22
+ if self.pipe is None:
23
+ controlnet = ControlNetModel.from_pretrained(
24
+ controlnet_model_path, torch_dtype=torch.float16
25
+ )
26
+ self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
27
+ pretrained_model_name_or_path=stable_model_path,
28
+ controlnet=controlnet,
29
+ safety_checker=None,
30
+ torch_dtype=torch.float16,
31
+ )
32
+
33
+ self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
34
+ self.pipe.to("cuda")
35
+ self.pipe.enable_xformers_memory_efficient_attention()
36
+
37
+ return self.pipe
38
+
39
+ def controlnet_shuffle(
40
+ self,
41
+ image_path: str,
42
+ ):
43
+ image = load_image(image_path)
44
+ control_image = ContentShuffleDetector()(image)
45
+ return control_image
46
+
47
+ def generate_image(
48
+ self,
49
+ image_path: str,
50
+ stable_model_path: str,
51
+ controlnet_model_path: str,
52
+ prompt: str,
53
+ negative_prompt: str,
54
+ num_images_per_prompt: int,
55
+ guidance_scale: int,
56
+ num_inference_step: int,
57
+ scheduler: str,
58
+ seed_generator: int,
59
+ ):
60
+ pipe = self.load_model(
61
+ stable_model_path=stable_model_path,
62
+ controlnet_model_path=controlnet_model_path,
63
+ scheduler=scheduler,
64
+ )
65
+
66
+ image = self.controlnet_shuffle(image_path)
67
+
68
+ if seed_generator == 0:
69
+ random_seed = torch.randint(0, 1000000, (1,))
70
+ generator = torch.manual_seed(random_seed)
71
+ else:
72
+ generator = torch.manual_seed(seed_generator)
73
+
74
+ output = pipe(
75
+ prompt=prompt,
76
+ image=image,
77
+ negative_prompt=negative_prompt,
78
+ num_images_per_prompt=num_images_per_prompt,
79
+ num_inference_steps=num_inference_step,
80
+ guidance_scale=guidance_scale,
81
+ generator=generator,
82
+ ).images
83
+
84
+ return output
85
+
86
+ def app():
87
+ with gr.Blocks():
88
+ with gr.Row():
89
+ with gr.Column():
90
+ controlnet_canny_image_file = gr.Image(
91
+ type="filepath", label="Image"
92
+ )
93
+
94
+ controlnet_canny_prompt = gr.Textbox(
95
+ lines=1,
96
+ placeholder="Prompt",
97
+ show_label=False,
98
+ )
99
+
100
+ controlnet_canny_negative_prompt = gr.Textbox(
101
+ lines=1,
102
+ placeholder="Negative Prompt",
103
+ show_label=False,
104
+ )
105
+ with gr.Row():
106
+ with gr.Column():
107
+ controlnet_canny_stable_model_id = gr.Dropdown(
108
+ choices=stable_model_list,
109
+ value=stable_model_list[0],
110
+ label="Stable Model Id",
111
+ )
112
+
113
+ controlnet_canny_guidance_scale = gr.Slider(
114
+ minimum=0.1,
115
+ maximum=15,
116
+ step=0.1,
117
+ value=7.5,
118
+ label="Guidance Scale",
119
+ )
120
+ controlnet_canny_num_inference_step = gr.Slider(
121
+ minimum=1,
122
+ maximum=100,
123
+ step=1,
124
+ value=50,
125
+ label="Num Inference Step",
126
+ )
127
+ controlnet_canny_num_images_per_prompt = gr.Slider(
128
+ minimum=1,
129
+ maximum=10,
130
+ step=1,
131
+ value=1,
132
+ label="Number Of Images",
133
+ )
134
+ with gr.Row():
135
+ with gr.Column():
136
+ controlnet_canny_model_id = gr.Dropdown(
137
+ choices=controlnet_shuffle_model_list,
138
+ value=controlnet_shuffle_model_list[0],
139
+ label="ControlNet Model Id",
140
+ )
141
+
142
+ controlnet_canny_scheduler = gr.Dropdown(
143
+ choices=SCHEDULER_LIST,
144
+ value=SCHEDULER_LIST[0],
145
+ label="Scheduler",
146
+ )
147
+
148
+ controlnet_canny_seed_generator = gr.Number(
149
+ value=0,
150
+ label="Seed Generator",
151
+ )
152
+ controlnet_canny_predict = gr.Button(value="Generator")
153
+
154
+ with gr.Column():
155
+ output_image = gr.Gallery(
156
+ label="Generated images",
157
+ show_label=False,
158
+ elem_id="gallery",
159
+ ).style(grid=(1, 2))
160
+
161
+ controlnet_canny_predict.click(
162
+ fn=StableDiffusionControlNetShuffleGenerator().generate_image,
163
+ inputs=[
164
+ controlnet_canny_image_file,
165
+ controlnet_canny_stable_model_id,
166
+ controlnet_canny_model_id,
167
+ controlnet_canny_prompt,
168
+ controlnet_canny_negative_prompt,
169
+ controlnet_canny_num_images_per_prompt,
170
+ controlnet_canny_guidance_scale,
171
+ controlnet_canny_num_inference_step,
172
+ controlnet_canny_scheduler,
173
+ controlnet_canny_seed_generator,
174
+ ],
175
+ outputs=[output_image],
176
+ )
diffusion_webui/diffusion_models/controlnet/controlnet_softedge.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from controlnet_aux import HEDdetector, PidiNetDetector
4
+ from diffusers import ControlNetModel, StableDiffusionControlNetPipeline
5
+ from diffusers.utils import load_image
6
+
7
+ from diffusion_webui.utils.model_list import (
8
+ controlnet_softedge_model_list,
9
+ stable_model_list,
10
+ )
11
+ from diffusion_webui.utils.scheduler_list import (
12
+ SCHEDULER_LIST,
13
+ get_scheduler_list,
14
+ )
15
+
16
+
17
+ class StableDiffusionControlNetSoftEdgeGenerator:
18
+ def __init__(self):
19
+ self.pipe = None
20
+
21
+ def load_model(self, stable_model_path, controlnet_model_path, scheduler):
22
+ if self.pipe is None:
23
+ controlnet = ControlNetModel.from_pretrained(
24
+ controlnet_model_path, torch_dtype=torch.float16
25
+ )
26
+ self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
27
+ pretrained_model_name_or_path=stable_model_path,
28
+ controlnet=controlnet,
29
+ safety_checker=None,
30
+ torch_dtype=torch.float16,
31
+ )
32
+
33
+ self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
34
+ self.pipe.to("cuda")
35
+ self.pipe.enable_xformers_memory_efficient_attention()
36
+
37
+ return self.pipe
38
+
39
+ def controlnet_softedge(
40
+ self,
41
+ image_path: str,
42
+ ):
43
+
44
+ image = load_image(image_path)
45
+ processor = HEDdetector.from_pretrained("lllyasviel/Annotators")
46
+ processor = PidiNetDetector.from_pretrained("lllyasviel/Annotators")
47
+ control_image = processor(image, safe=True)
48
+ return control_image
49
+
50
+ def generate_image(
51
+ self,
52
+ image_path: str,
53
+ stable_model_path: str,
54
+ controlnet_model_path: str,
55
+ prompt: str,
56
+ negative_prompt: str,
57
+ num_images_per_prompt: int,
58
+ guidance_scale: int,
59
+ num_inference_step: int,
60
+ scheduler: str,
61
+ seed_generator: int,
62
+ ):
63
+ pipe = self.load_model(
64
+ stable_model_path=stable_model_path,
65
+ controlnet_model_path=controlnet_model_path,
66
+ scheduler=scheduler,
67
+ )
68
+
69
+ image = self.controlnet_softedge(image_path)
70
+
71
+ if seed_generator == 0:
72
+ random_seed = torch.randint(0, 1000000, (1,))
73
+ generator = torch.manual_seed(random_seed)
74
+ else:
75
+ generator = torch.manual_seed(seed_generator)
76
+
77
+ output = pipe(
78
+ prompt=prompt,
79
+ image=image,
80
+ negative_prompt=negative_prompt,
81
+ num_images_per_prompt=num_images_per_prompt,
82
+ num_inference_steps=num_inference_step,
83
+ guidance_scale=guidance_scale,
84
+ generator=generator,
85
+ ).images
86
+
87
+ return output
88
+
89
+ def app():
90
+ with gr.Blocks():
91
+ with gr.Row():
92
+ with gr.Column():
93
+ controlnet_canny_image_file = gr.Image(
94
+ type="filepath", label="Image"
95
+ )
96
+
97
+ controlnet_canny_prompt = gr.Textbox(
98
+ lines=1,
99
+ placeholder="Prompt",
100
+ show_label=False,
101
+ )
102
+
103
+ controlnet_canny_negative_prompt = gr.Textbox(
104
+ lines=1,
105
+ placeholder="Negative Prompt",
106
+ show_label=False,
107
+ )
108
+ with gr.Row():
109
+ with gr.Column():
110
+ controlnet_canny_stable_model_id = gr.Dropdown(
111
+ choices=stable_model_list,
112
+ value=stable_model_list[0],
113
+ label="Stable Model Id",
114
+ )
115
+
116
+ controlnet_canny_guidance_scale = gr.Slider(
117
+ minimum=0.1,
118
+ maximum=15,
119
+ step=0.1,
120
+ value=7.5,
121
+ label="Guidance Scale",
122
+ )
123
+ controlnet_canny_num_inference_step = gr.Slider(
124
+ minimum=1,
125
+ maximum=100,
126
+ step=1,
127
+ value=50,
128
+ label="Num Inference Step",
129
+ )
130
+ controlnet_canny_num_images_per_prompt = gr.Slider(
131
+ minimum=1,
132
+ maximum=10,
133
+ step=1,
134
+ value=1,
135
+ label="Number Of Images",
136
+ )
137
+ with gr.Row():
138
+ with gr.Column():
139
+ controlnet_canny_model_id = gr.Dropdown(
140
+ choices=controlnet_softedge_model_list,
141
+ value=controlnet_softedge_model_list[0],
142
+ label="ControlNet Model Id",
143
+ )
144
+
145
+ controlnet_canny_scheduler = gr.Dropdown(
146
+ choices=SCHEDULER_LIST,
147
+ value=SCHEDULER_LIST[0],
148
+ label="Scheduler",
149
+ )
150
+
151
+ controlnet_canny_seed_generator = gr.Number(
152
+ value=0,
153
+ label="Seed Generator",
154
+ )
155
+ controlnet_canny_predict = gr.Button(value="Generator")
156
+
157
+ with gr.Column():
158
+ output_image = gr.Gallery(
159
+ label="Generated images",
160
+ show_label=False,
161
+ elem_id="gallery",
162
+ ).style(grid=(1, 2))
163
+
164
+ controlnet_canny_predict.click(
165
+ fn=StableDiffusionControlNetSoftEdgeGenerator().generate_image,
166
+ inputs=[
167
+ controlnet_canny_image_file,
168
+ controlnet_canny_stable_model_id,
169
+ controlnet_canny_model_id,
170
+ controlnet_canny_prompt,
171
+ controlnet_canny_negative_prompt,
172
+ controlnet_canny_num_images_per_prompt,
173
+ controlnet_canny_guidance_scale,
174
+ controlnet_canny_num_inference_step,
175
+ controlnet_canny_scheduler,
176
+ controlnet_canny_seed_generator,
177
+ ],
178
+ outputs=[output_image],
179
+ )
diffusion_webui/diffusion_models/stable_diffusion/__init__.py CHANGED
@@ -1,3 +1,9 @@
1
- from diffusion_webui.diffusion_models.stable_diffusion.text2img_app import StableDiffusionText2ImageGenerator
2
- from diffusion_webui.diffusion_models.stable_diffusion.img2img_app import StableDiffusionImage2ImageGenerator
3
- from diffusion_webui.diffusion_models.stable_diffusion.inpaint_app import StableDiffusionInpaintGenerator
 
 
 
 
 
 
 
1
+ from diffusion_webui.diffusion_models.stable_diffusion.img2img_app import (
2
+ StableDiffusionImage2ImageGenerator,
3
+ )
4
+ from diffusion_webui.diffusion_models.stable_diffusion.inpaint_app import (
5
+ StableDiffusionInpaintGenerator,
6
+ )
7
+ from diffusion_webui.diffusion_models.stable_diffusion.text2img_app import (
8
+ StableDiffusionText2ImageGenerator,
9
+ )
diffusion_webui/upscaler_models/__init__.py CHANGED
@@ -1 +1,3 @@
1
- from diffusion_webui.upscaler_models.codeformer_upscaler import CodeformerUpscalerGenerator
 
 
 
1
+ from diffusion_webui.upscaler_models.codeformer_upscaler import (
2
+ CodeformerUpscalerGenerator,
3
+ )
diffusion_webui/upscaler_models/codeformer_upscaler.py CHANGED
@@ -1,5 +1,4 @@
1
  import gradio as gr
2
- from codeformer.app import inference_app
3
 
4
 
5
  class CodeformerUpscalerGenerator:
@@ -11,6 +10,7 @@ class CodeformerUpscalerGenerator:
11
  upscale: int,
12
  codeformer_fidelity: int,
13
  ):
 
14
 
15
  pipe = inference_app(
16
  image=image_path,
 
1
  import gradio as gr
 
2
 
3
 
4
  class CodeformerUpscalerGenerator:
 
10
  upscale: int,
11
  codeformer_fidelity: int,
12
  ):
13
+ from codeformer.app import inference_app
14
 
15
  pipe = inference_app(
16
  image=image_path,
diffusion_webui/utils/model_list.py CHANGED
@@ -5,14 +5,13 @@ stable_model_list = [
5
  "wavymulder/Analog-Diffusion",
6
  "dreamlike-art/dreamlike-diffusion-1.0",
7
  "gsdf/Counterfeit-V2.5",
8
- "dreamlike-art/dreamlike-photoreal-2.0"
9
  ]
10
 
11
  controlnet_canny_model_list = [
12
  "lllyasviel/sd-controlnet-canny",
13
  "lllyasviel/control_v11p_sd15_canny",
14
  "thibaud/controlnet-sd21-canny-diffusers",
15
-
16
  ]
17
 
18
  controlnet_depth_model_list = [
@@ -57,3 +56,23 @@ controlnet_seg_model_list = [
57
  "lllyasviel/sd-controlnet-seg",
58
  "lllyasviel/control_v11p_sd15_seg",
59
  ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  "wavymulder/Analog-Diffusion",
6
  "dreamlike-art/dreamlike-diffusion-1.0",
7
  "gsdf/Counterfeit-V2.5",
8
+ "dreamlike-art/dreamlike-photoreal-2.0",
9
  ]
10
 
11
  controlnet_canny_model_list = [
12
  "lllyasviel/sd-controlnet-canny",
13
  "lllyasviel/control_v11p_sd15_canny",
14
  "thibaud/controlnet-sd21-canny-diffusers",
 
15
  ]
16
 
17
  controlnet_depth_model_list = [
 
56
  "lllyasviel/sd-controlnet-seg",
57
  "lllyasviel/control_v11p_sd15_seg",
58
  ]
59
+
60
+ controlnet_shuffle_model_list = [
61
+ "lllyasviel/control_v11e_sd15_shuffle",
62
+ ]
63
+
64
+ controlnet_pix2pix_model_list = [
65
+ "lllyasviel/control_v11e_sd15_ip2p",
66
+ ]
67
+
68
+ controlnet_lineart_model_list = [
69
+ "ControlNet-1-1-preview/control_v11p_sd15_lineart",
70
+ ]
71
+
72
+ controlnet_lineart_anime_model_list = [
73
+ "lllyasviel/control_v11p_sd15s2_lineart_anime",
74
+ ]
75
+
76
+ controlnet_softedge_model_list = [
77
+ "lllyasviel/control_v11p_sd15_softedge",
78
+ ]