kadirnar commited on
Commit
5fef1f4
1 Parent(s): babc3a6

Add controlnet_v2

Browse files
README.md CHANGED
@@ -6,7 +6,7 @@ colorTo: red
6
  sdk: gradio
7
  sdk_version: 3.19
8
  app_file: app.py
9
- pinned: false
10
  license: openrail
11
  tags:
12
  - making-demos
 
6
  sdk: gradio
7
  sdk_version: 3.19
8
  app_file: app.py
9
+ pinned: true
10
  license: openrail
11
  tags:
12
  - making-demos
diffusion_webui/controlnet/__pycache__/__init__.cpython-38.pyc DELETED
Binary file (168 Bytes)
 
diffusion_webui/controlnet/__pycache__/controlnet_canny.cpython-38.pyc DELETED
Binary file (3.01 kB)
 
diffusion_webui/controlnet/__pycache__/controlnet_depth.cpython-38.pyc DELETED
Binary file (3.09 kB)
 
diffusion_webui/controlnet/__pycache__/controlnet_hed.cpython-38.pyc DELETED
Binary file (2.91 kB)
 
diffusion_webui/controlnet/__pycache__/controlnet_mlsd.cpython-38.pyc DELETED
Binary file (2.93 kB)
 
diffusion_webui/controlnet/__pycache__/controlnet_pose.cpython-38.pyc DELETED
Binary file (2.94 kB)
 
diffusion_webui/controlnet/__pycache__/controlnet_scribble.cpython-38.pyc DELETED
Binary file (3 kB)
 
diffusion_webui/controlnet/__pycache__/controlnet_seg.cpython-38.pyc DELETED
Binary file (5.43 kB)
 
diffusion_webui/controlnet/controlnet_canny.py CHANGED
@@ -10,12 +10,13 @@ import cv2
10
 
11
  stable_model_list = [
12
  "runwayml/stable-diffusion-v1-5",
13
- "stabilityai/stable-diffusion-2",
14
- "stabilityai/stable-diffusion-2-base",
15
  "stabilityai/stable-diffusion-2-1",
16
- "stabilityai/stable-diffusion-2-1-base"
17
  ]
18
 
 
 
 
 
19
 
20
 
21
  stable_prompt_list = [
@@ -30,6 +31,7 @@ stable_negative_prompt_list = [
30
 
31
  def controlnet_canny(
32
  image_path:str,
 
33
  ):
34
  image = Image.open(image_path)
35
  image = np.array(image)
@@ -40,7 +42,7 @@ def controlnet_canny(
40
  image = Image.fromarray(image)
41
 
42
  controlnet = ControlNetModel.from_pretrained(
43
- "lllyasviel/sd-controlnet-canny",
44
  torch_dtype=torch.float16
45
  )
46
  return controlnet, image
@@ -48,17 +50,18 @@ def controlnet_canny(
48
 
49
  def stable_diffusion_controlnet_canny(
50
  image_path:str,
51
- model_path:str,
 
52
  prompt:str,
53
  negative_prompt:str,
54
  guidance_scale:int,
55
  num_inference_step:int,
56
  ):
57
 
58
- controlnet, image = controlnet_canny(image_path)
59
 
60
  pipe = StableDiffusionControlNetPipeline.from_pretrained(
61
- pretrained_model_name_or_path=model_path,
62
  controlnet=controlnet,
63
  safety_checker=None,
64
  torch_dtype=torch.float16,
@@ -87,12 +90,18 @@ def stable_diffusion_controlnet_canny_app():
87
  label='Image'
88
  )
89
 
90
- controlnet_canny_model_id = gr.Dropdown(
91
  choices=stable_model_list,
92
  value=stable_model_list[0],
93
  label='Stable Model Id'
94
  )
95
-
 
 
 
 
 
 
96
  controlnet_canny_prompt = gr.Textbox(
97
  lines=1,
98
  value=stable_prompt_list[0],
@@ -131,6 +140,7 @@ def stable_diffusion_controlnet_canny_app():
131
  fn=stable_diffusion_controlnet_canny,
132
  inputs=[
133
  controlnet_canny_image_file,
 
134
  controlnet_canny_model_id,
135
  controlnet_canny_prompt,
136
  controlnet_canny_negative_prompt,
 
10
 
11
  stable_model_list = [
12
  "runwayml/stable-diffusion-v1-5",
 
 
13
  "stabilityai/stable-diffusion-2-1",
 
14
  ]
15
 
16
+ controlnet_canny_model_list = [
17
+ "lllyasviel/sd-controlnet-canny",
18
+ "thibaud/controlnet-sd21-canny-diffusers"
19
+ ]
20
 
21
 
22
  stable_prompt_list = [
 
31
 
32
  def controlnet_canny(
33
  image_path:str,
34
+ controlnet_model_path:str,
35
  ):
36
  image = Image.open(image_path)
37
  image = np.array(image)
 
42
  image = Image.fromarray(image)
43
 
44
  controlnet = ControlNetModel.from_pretrained(
45
+ controlnet_model_path,
46
  torch_dtype=torch.float16
47
  )
48
  return controlnet, image
 
50
 
51
  def stable_diffusion_controlnet_canny(
52
  image_path:str,
53
+ stable_model_path:str,
54
+ controlnet_model_path:str,
55
  prompt:str,
56
  negative_prompt:str,
57
  guidance_scale:int,
58
  num_inference_step:int,
59
  ):
60
 
61
+ controlnet, image = controlnet_canny(image_path, controlnet_model_path)
62
 
63
  pipe = StableDiffusionControlNetPipeline.from_pretrained(
64
+ pretrained_model_name_or_path=stable_model_path,
65
  controlnet=controlnet,
66
  safety_checker=None,
67
  torch_dtype=torch.float16,
 
90
  label='Image'
91
  )
92
 
93
+ controlnet_canny_stable_model_id = gr.Dropdown(
94
  choices=stable_model_list,
95
  value=stable_model_list[0],
96
  label='Stable Model Id'
97
  )
98
+
99
+ controlnet_canny_model_id = gr.Dropdown(
100
+ choices=controlnet_canny_model_list,
101
+ value=controlnet_canny_model_list[0],
102
+ label='Controlnet Model Id'
103
+ )
104
+
105
  controlnet_canny_prompt = gr.Textbox(
106
  lines=1,
107
  value=stable_prompt_list[0],
 
140
  fn=stable_diffusion_controlnet_canny,
141
  inputs=[
142
  controlnet_canny_image_file,
143
+ controlnet_canny_stable_model_id,
144
  controlnet_canny_model_id,
145
  controlnet_canny_prompt,
146
  controlnet_canny_negative_prompt,
diffusion_webui/controlnet/controlnet_depth.py CHANGED
@@ -9,10 +9,12 @@ import torch
9
 
10
  stable_model_list = [
11
  "runwayml/stable-diffusion-v1-5",
12
- "stabilityai/stable-diffusion-2",
13
- "stabilityai/stable-diffusion-2-base",
14
  "stabilityai/stable-diffusion-2-1",
15
- "stabilityai/stable-diffusion-2-1-base"
 
 
 
 
16
  ]
17
 
18
 
@@ -27,7 +29,7 @@ stable_negative_prompt_list = [
27
  ]
28
 
29
 
30
- def controlnet_depth(image_path:str):
31
  depth_estimator = pipeline('depth-estimation')
32
 
33
  image = Image.open(image_path)
@@ -38,24 +40,25 @@ def controlnet_depth(image_path:str):
38
  image = Image.fromarray(image)
39
 
40
  controlnet = ControlNetModel.from_pretrained(
41
- "fusing/stable-diffusion-v1-5-controlnet-depth", torch_dtype=torch.float16
42
  )
43
 
44
  return controlnet, image
45
 
46
  def stable_diffusion_controlnet_depth(
47
  image_path:str,
48
- model_path:str,
 
49
  prompt:str,
50
  negative_prompt:str,
51
  guidance_scale:int,
52
  num_inference_step:int,
53
  ):
54
 
55
- controlnet, image = controlnet_depth(image_path=image_path)
56
 
57
  pipe = StableDiffusionControlNetPipeline.from_pretrained(
58
- pretrained_model_name_or_path=model_path,
59
  controlnet=controlnet,
60
  safety_checker=None,
61
  torch_dtype=torch.float16
@@ -85,12 +88,18 @@ def stable_diffusion_controlnet_depth_app():
85
  label='Image'
86
  )
87
 
88
- controlnet_depth_model_id = gr.Dropdown(
89
  choices=stable_model_list,
90
  value=stable_model_list[0],
91
  label='Stable Model Id'
92
  )
93
 
 
 
 
 
 
 
94
  controlnet_depth_prompt = gr.Textbox(
95
  lines=1,
96
  value=stable_prompt_list[0],
@@ -129,6 +138,7 @@ def stable_diffusion_controlnet_depth_app():
129
  fn=stable_diffusion_controlnet_depth,
130
  inputs=[
131
  controlnet_depth_image_file,
 
132
  controlnet_depth_model_id,
133
  controlnet_depth_prompt,
134
  controlnet_depth_negative_prompt,
 
9
 
10
  stable_model_list = [
11
  "runwayml/stable-diffusion-v1-5",
 
 
12
  "stabilityai/stable-diffusion-2-1",
13
+ ]
14
+
15
+ controlnet_depth_model_list = [
16
+ "fusing/stable-diffusion-v1-5-controlnet-depth",
17
+ "thibaud/controlnet-sd21-depth-diffusers"
18
  ]
19
 
20
 
 
29
  ]
30
 
31
 
32
+ def controlnet_depth(image_path:str, depth_model_path:str):
33
  depth_estimator = pipeline('depth-estimation')
34
 
35
  image = Image.open(image_path)
 
40
  image = Image.fromarray(image)
41
 
42
  controlnet = ControlNetModel.from_pretrained(
43
+ depth_model_path, torch_dtype=torch.float16
44
  )
45
 
46
  return controlnet, image
47
 
48
  def stable_diffusion_controlnet_depth(
49
  image_path:str,
50
+ stable_model_path:str,
51
+ depth_model_path:str,
52
  prompt:str,
53
  negative_prompt:str,
54
  guidance_scale:int,
55
  num_inference_step:int,
56
  ):
57
 
58
+ controlnet, image = controlnet_depth(image_path=image_path, depth_model_path=depth_model_path)
59
 
60
  pipe = StableDiffusionControlNetPipeline.from_pretrained(
61
+ pretrained_model_name_or_path=stable_model_path,
62
  controlnet=controlnet,
63
  safety_checker=None,
64
  torch_dtype=torch.float16
 
88
  label='Image'
89
  )
90
 
91
+ controlnet_depth_stable_model_id = gr.Dropdown(
92
  choices=stable_model_list,
93
  value=stable_model_list[0],
94
  label='Stable Model Id'
95
  )
96
 
97
+ controlnet_depth_model_id = gr.Dropdown(
98
+ choices=controlnet_depth_model_list,
99
+ value=controlnet_depth_model_list[0],
100
+ label='ControlNet Model Id'
101
+ )
102
+
103
  controlnet_depth_prompt = gr.Textbox(
104
  lines=1,
105
  value=stable_prompt_list[0],
 
138
  fn=stable_diffusion_controlnet_depth,
139
  inputs=[
140
  controlnet_depth_image_file,
141
+ controlnet_depth_stable_model_id,
142
  controlnet_depth_model_id,
143
  controlnet_depth_prompt,
144
  controlnet_depth_negative_prompt,
diffusion_webui/controlnet/controlnet_hed.py CHANGED
@@ -8,10 +8,12 @@ import torch
8
 
9
  stable_model_list = [
10
  "runwayml/stable-diffusion-v1-5",
11
- "stabilityai/stable-diffusion-2",
12
- "stabilityai/stable-diffusion-2-base",
13
  "stabilityai/stable-diffusion-2-1",
14
- "stabilityai/stable-diffusion-2-1-base"
 
 
 
 
15
  ]
16
 
17
  stable_prompt_list = [
@@ -25,14 +27,14 @@ stable_negative_prompt_list = [
25
  ]
26
 
27
 
28
- def controlnet_hed(image_path:str):
29
  hed = HEDdetector.from_pretrained('lllyasviel/ControlNet')
30
 
31
  image = Image.open(image_path)
32
  image = hed(image)
33
 
34
  controlnet = ControlNetModel.from_pretrained(
35
- "fusing/stable-diffusion-v1-5-controlnet-hed",
36
  torch_dtype=torch.float16
37
  )
38
  return controlnet, image
@@ -40,17 +42,18 @@ def controlnet_hed(image_path:str):
40
 
41
  def stable_diffusion_controlnet_hed(
42
  image_path:str,
43
- model_path:str,
 
44
  prompt:str,
45
  negative_prompt:str,
46
  guidance_scale:int,
47
  num_inference_step:int,
48
  ):
49
 
50
- controlnet, image = controlnet_hed(image_path=image_path)
51
 
52
  pipe = StableDiffusionControlNetPipeline.from_pretrained(
53
- pretrained_model_name_or_path=model_path,
54
  controlnet=controlnet,
55
  safety_checker=None,
56
  torch_dtype=torch.float16
@@ -79,11 +82,17 @@ def stable_diffusion_controlnet_hed_app():
79
  label='Image'
80
  )
81
 
82
- controlnet_hed_model_id = gr.Dropdown(
83
  choices=stable_model_list,
84
  value=stable_model_list[0],
85
  label='Stable Model Id'
86
  )
 
 
 
 
 
 
87
 
88
  controlnet_hed_prompt = gr.Textbox(
89
  lines=1,
@@ -124,6 +133,7 @@ def stable_diffusion_controlnet_hed_app():
124
  fn=stable_diffusion_controlnet_hed,
125
  inputs=[
126
  controlnet_hed_image_file,
 
127
  controlnet_hed_model_id,
128
  controlnet_hed_prompt,
129
  controlnet_hed_negative_prompt,
 
8
 
9
  stable_model_list = [
10
  "runwayml/stable-diffusion-v1-5",
 
 
11
  "stabilityai/stable-diffusion-2-1",
12
+ ]
13
+
14
+ controlnet_hed_model_list = [
15
+ "fusing/stable-diffusion-v1-5-controlnet-hed",
16
+ "thibaud/controlnet-sd21-hed-diffusers"
17
  ]
18
 
19
  stable_prompt_list = [
 
27
  ]
28
 
29
 
30
+ def controlnet_hed(image_path:str, controlnet_hed_model_path:str):
31
  hed = HEDdetector.from_pretrained('lllyasviel/ControlNet')
32
 
33
  image = Image.open(image_path)
34
  image = hed(image)
35
 
36
  controlnet = ControlNetModel.from_pretrained(
37
+ controlnet_hed_model_path,
38
  torch_dtype=torch.float16
39
  )
40
  return controlnet, image
 
42
 
43
  def stable_diffusion_controlnet_hed(
44
  image_path:str,
45
+ stable_model_path:str,
46
+ controlnet_hed_model_path:str,
47
  prompt:str,
48
  negative_prompt:str,
49
  guidance_scale:int,
50
  num_inference_step:int,
51
  ):
52
 
53
+ controlnet, image = controlnet_hed(image_path=image_path, controlnet_hed_model_path=controlnet_hed_model_path)
54
 
55
  pipe = StableDiffusionControlNetPipeline.from_pretrained(
56
+ pretrained_model_name_or_path=stable_model_path,
57
  controlnet=controlnet,
58
  safety_checker=None,
59
  torch_dtype=torch.float16
 
82
  label='Image'
83
  )
84
 
85
+ controlnet_hed_stable_model_id = gr.Dropdown(
86
  choices=stable_model_list,
87
  value=stable_model_list[0],
88
  label='Stable Model Id'
89
  )
90
+
91
+ controlnet_hed_model_id = gr.Dropdown(
92
+ choices=stable_model_list,
93
+ value=stable_model_list[1],
94
+ label='ControlNet Model Id'
95
+ )
96
 
97
  controlnet_hed_prompt = gr.Textbox(
98
  lines=1,
 
133
  fn=stable_diffusion_controlnet_hed,
134
  inputs=[
135
  controlnet_hed_image_file,
136
+ controlnet_hed_stable_model_id,
137
  controlnet_hed_model_id,
138
  controlnet_hed_prompt,
139
  controlnet_hed_negative_prompt,
diffusion_webui/controlnet/controlnet_pose.py CHANGED
@@ -9,10 +9,12 @@ import torch
9
 
10
  stable_model_list = [
11
  "runwayml/stable-diffusion-v1-5",
12
- "stabilityai/stable-diffusion-2",
13
- "stabilityai/stable-diffusion-2-base",
14
  "stabilityai/stable-diffusion-2-1",
15
- "stabilityai/stable-diffusion-2-1-base"
 
 
 
 
16
  ]
17
 
18
  stable_prompt_list = [
@@ -26,14 +28,14 @@ stable_negative_prompt_list = [
26
  ]
27
 
28
 
29
- def controlnet_pose(image_path:str):
30
  openpose = OpenposeDetector.from_pretrained('lllyasviel/ControlNet')
31
 
32
  image = Image.open(image_path)
33
  image = openpose(image)
34
 
35
  controlnet = ControlNetModel.from_pretrained(
36
- "fusing/stable-diffusion-v1-5-controlnet-openpose",
37
  torch_dtype=torch.float16
38
  )
39
 
@@ -41,17 +43,18 @@ def controlnet_pose(image_path:str):
41
 
42
  def stable_diffusion_controlnet_pose(
43
  image_path:str,
44
- model_path:str,
 
45
  prompt:str,
46
  negative_prompt:str,
47
  guidance_scale:int,
48
  num_inference_step:int,
49
  ):
50
 
51
- controlnet, image = controlnet_pose(image_path=image_path)
52
 
53
  pipe = StableDiffusionControlNetPipeline.from_pretrained(
54
- pretrained_model_name_or_path=model_path,
55
  controlnet=controlnet,
56
  safety_checker=None,
57
  torch_dtype=torch.float16
@@ -81,11 +84,18 @@ def stable_diffusion_controlnet_pose_app():
81
  label='Image'
82
  )
83
 
84
- controlnet_pose_model_id = gr.Dropdown(
85
  choices=stable_model_list,
86
  value=stable_model_list[0],
87
  label='Stable Model Id'
88
  )
 
 
 
 
 
 
 
89
 
90
  controlnet_pose_prompt = gr.Textbox(
91
  lines=1,
@@ -125,6 +135,7 @@ def stable_diffusion_controlnet_pose_app():
125
  fn=stable_diffusion_controlnet_pose,
126
  inputs=[
127
  controlnet_pose_image_file,
 
128
  controlnet_pose_model_id,
129
  controlnet_pose_prompt,
130
  controlnet_pose_negative_prompt,
 
9
 
10
  stable_model_list = [
11
  "runwayml/stable-diffusion-v1-5",
 
 
12
  "stabilityai/stable-diffusion-2-1",
13
+ ]
14
+
15
+ controlnet_pose_model_list = [
16
+ "fusing/stable-diffusion-v1-5-controlnet-openpose",
17
+ "thibaud/controlnet-sd21-openpose-diffusers"
18
  ]
19
 
20
  stable_prompt_list = [
 
28
  ]
29
 
30
 
31
+ def controlnet_pose(image_path:str, controlnet_pose_model_path:str):
32
  openpose = OpenposeDetector.from_pretrained('lllyasviel/ControlNet')
33
 
34
  image = Image.open(image_path)
35
  image = openpose(image)
36
 
37
  controlnet = ControlNetModel.from_pretrained(
38
+ controlnet_pose_model_path,
39
  torch_dtype=torch.float16
40
  )
41
 
 
43
 
44
  def stable_diffusion_controlnet_pose(
45
  image_path:str,
46
+ stable_model_path:str,
47
+ controlnet_pose_model_path:str,
48
  prompt:str,
49
  negative_prompt:str,
50
  guidance_scale:int,
51
  num_inference_step:int,
52
  ):
53
 
54
+ controlnet, image = controlnet_pose(image_path=image_path, controlnet_pose_model_path=controlnet_pose_model_path)
55
 
56
  pipe = StableDiffusionControlNetPipeline.from_pretrained(
57
+ pretrained_model_name_or_path=-stable_model_path,
58
  controlnet=controlnet,
59
  safety_checker=None,
60
  torch_dtype=torch.float16
 
84
  label='Image'
85
  )
86
 
87
+ controlnet_pose_stable_model_id = gr.Dropdown(
88
  choices=stable_model_list,
89
  value=stable_model_list[0],
90
  label='Stable Model Id'
91
  )
92
+
93
+ controlnet_pose_model_id = gr.Dropdown(
94
+ choices=stable_model_list,
95
+ value=stable_model_list[1],
96
+ label='ControlNet Model Id'
97
+ )
98
+
99
 
100
  controlnet_pose_prompt = gr.Textbox(
101
  lines=1,
 
135
  fn=stable_diffusion_controlnet_pose,
136
  inputs=[
137
  controlnet_pose_image_file,
138
+ controlnet_pose_stable_model_id,
139
  controlnet_pose_model_id,
140
  controlnet_pose_prompt,
141
  controlnet_pose_negative_prompt,
diffusion_webui/controlnet/controlnet_scribble.py CHANGED
@@ -9,10 +9,12 @@ import torch
9
 
10
  stable_model_list = [
11
  "runwayml/stable-diffusion-v1-5",
12
- "stabilityai/stable-diffusion-2",
13
- "stabilityai/stable-diffusion-2-base",
14
  "stabilityai/stable-diffusion-2-1",
15
- "stabilityai/stable-diffusion-2-1-base"
 
 
 
 
16
  ]
17
 
18
  stable_prompt_list = [
@@ -26,31 +28,32 @@ stable_negative_prompt_list = [
26
  ]
27
 
28
 
29
- def controlnet_scribble(image_path:str):
30
  hed = HEDdetector.from_pretrained('lllyasviel/ControlNet')
31
 
32
  image = Image.open(image_path)
33
  image = hed(image, scribble=True)
34
 
35
  controlnet = ControlNetModel.from_pretrained(
36
- "fusing/stable-diffusion-v1-5-controlnet-scribble", torch_dtype=torch.float16
37
  )
38
 
39
  return controlnet, image
40
 
41
  def stable_diffusion_controlnet_scribble(
42
  image_path:str,
43
- model_path:str,
 
44
  prompt:str,
45
  negative_prompt:str,
46
  guidance_scale:int,
47
  num_inference_step:int,
48
  ):
49
 
50
- controlnet, image = controlnet_scribble(image_path=image_path)
51
 
52
  pipe = StableDiffusionControlNetPipeline.from_pretrained(
53
- pretrained_model_name_or_path=model_path,
54
  controlnet=controlnet,
55
  safety_checker=None,
56
  torch_dtype=torch.float16
@@ -79,10 +82,16 @@ def stable_diffusion_controlnet_scribble_app():
79
  label='Image'
80
  )
81
 
82
- controlnet_scribble_model_id = gr.Dropdown(
83
  choices=stable_model_list,
84
  value=stable_model_list[0],
85
- label='Stable Model Id'
 
 
 
 
 
 
86
  )
87
 
88
  controlnet_scribble_prompt = gr.Textbox(
@@ -123,7 +132,8 @@ def stable_diffusion_controlnet_scribble_app():
123
  fn=stable_diffusion_controlnet_scribble,
124
  inputs=[
125
  controlnet_scribble_image_file,
126
- controlnet_scribble_model_id,
 
127
  controlnet_scribble_prompt,
128
  controlnet_scribble_negative_prompt,
129
  controlnet_scribble_guidance_scale,
 
9
 
10
  stable_model_list = [
11
  "runwayml/stable-diffusion-v1-5",
 
 
12
  "stabilityai/stable-diffusion-2-1",
13
+ ]
14
+
15
+ controlnet_hed_model_list = [
16
+ "fusing/stable-diffusion-v1-5-controlnet-hed",
17
+ "thibaud/controlnet-sd21-scribble-diffusers"
18
  ]
19
 
20
  stable_prompt_list = [
 
28
  ]
29
 
30
 
31
+ def controlnet_scribble(image_path:str, controlnet_hed_model_path:str):
32
  hed = HEDdetector.from_pretrained('lllyasviel/ControlNet')
33
 
34
  image = Image.open(image_path)
35
  image = hed(image, scribble=True)
36
 
37
  controlnet = ControlNetModel.from_pretrained(
38
+ controlnet_hed_model_path, torch_dtype=torch.float16
39
  )
40
 
41
  return controlnet, image
42
 
43
  def stable_diffusion_controlnet_scribble(
44
  image_path:str,
45
+ stable_model_path:str,
46
+ controlnet_hed_model_path:str,
47
  prompt:str,
48
  negative_prompt:str,
49
  guidance_scale:int,
50
  num_inference_step:int,
51
  ):
52
 
53
+ controlnet, image = controlnet_scribble(image_path=image_path, controlnet_hed_model_path=controlnet_hed_model_path)
54
 
55
  pipe = StableDiffusionControlNetPipeline.from_pretrained(
56
+ pretrained_model_name_or_path=stable_model_path,
57
  controlnet=controlnet,
58
  safety_checker=None,
59
  torch_dtype=torch.float16
 
82
  label='Image'
83
  )
84
 
85
+ controlnet_scribble_stablev1_model_id = gr.Dropdown(
86
  choices=stable_model_list,
87
  value=stable_model_list[0],
88
+ label='Stable v1.5 Model Id'
89
+ )
90
+
91
+ controlnet_scribble_stablev2_model_id = gr.Dropdown(
92
+ choices=stable_model_list,
93
+ value=stable_model_list[1],
94
+ label='Stable v2.1 Model Id'
95
  )
96
 
97
  controlnet_scribble_prompt = gr.Textbox(
 
132
  fn=stable_diffusion_controlnet_scribble,
133
  inputs=[
134
  controlnet_scribble_image_file,
135
+ controlnet_scribble_stablev1_model_id,
136
+ controlnet_scribble_stablev2_model_id,
137
  controlnet_scribble_prompt,
138
  controlnet_scribble_negative_prompt,
139
  controlnet_scribble_guidance_scale,
diffusion_webui/stable_diffusion/__pycache__/__init__.cpython-38.pyc DELETED
Binary file (174 Bytes)
 
diffusion_webui/stable_diffusion/__pycache__/img2img_app.cpython-38.pyc DELETED
Binary file (2.44 kB)
 
diffusion_webui/stable_diffusion/__pycache__/inpaint_app.cpython-38.pyc DELETED
Binary file (3.08 kB)
 
diffusion_webui/stable_diffusion/__pycache__/text2img_app.cpython-38.pyc DELETED
Binary file (2.45 kB)
 
diffusion_webui/stable_diffusion/img2img_app.py CHANGED
@@ -12,11 +12,6 @@ stable_model_list = [
12
  "stabilityai/stable-diffusion-2-1-base"
13
  ]
14
 
15
- stable_inpiant_model_list = [
16
- "stabilityai/stable-diffusion-2-inpainting",
17
- "runwayml/stable-diffusion-inpainting"
18
- ]
19
-
20
  stable_prompt_list = [
21
  "a photo of a man.",
22
  "a photo of a girl."
 
12
  "stabilityai/stable-diffusion-2-1-base"
13
  ]
14
 
 
 
 
 
 
15
  stable_prompt_list = [
16
  "a photo of a man.",
17
  "a photo of a girl."
diffusion_webui/stable_diffusion/inpaint_app.py CHANGED
@@ -1,18 +1,8 @@
1
  from diffusers import DiffusionPipeline, DDIMScheduler
2
- from PIL import Image
3
- import imageio
4
  import torch
5
 
6
  import gradio as gr
7
 
8
- stable_model_list = [
9
- "runwayml/stable-diffusion-v1-5",
10
- "stabilityai/stable-diffusion-2",
11
- "stabilityai/stable-diffusion-2-base",
12
- "stabilityai/stable-diffusion-2-1",
13
- "stabilityai/stable-diffusion-2-1-base"
14
- ]
15
-
16
  stable_inpiant_model_list = [
17
  "stabilityai/stable-diffusion-2-inpainting",
18
  "runwayml/stable-diffusion-inpainting"
 
1
  from diffusers import DiffusionPipeline, DDIMScheduler
 
 
2
  import torch
3
 
4
  import gradio as gr
5
 
 
 
 
 
 
 
 
 
6
  stable_inpiant_model_list = [
7
  "stabilityai/stable-diffusion-2-inpainting",
8
  "runwayml/stable-diffusion-inpainting"
diffusion_webui/stable_diffusion/keras_txt2img.py CHANGED
@@ -5,8 +5,8 @@ import tensorflow as tf
5
  import gradio as gr
6
 
7
  keras_model_list = [
8
- "kadirnar/dreambooth_diffusion_model_v1",
9
- "keras-dreambooth/pink-floyd-division-bell"
10
  "keras-dreambooth/dreambooth_diffusion_model",
11
  ]
12
 
 
5
  import gradio as gr
6
 
7
  keras_model_list = [
8
+ "keras-dreambooth/keras_diffusion_lowpoly_world",
9
+ "keras-dreambooth/pink-floyd-division-bell",
10
  "keras-dreambooth/dreambooth_diffusion_model",
11
  ]
12
 
diffusion_webui/stable_diffusion/text2img_app.py CHANGED
@@ -4,10 +4,14 @@ import torch
4
 
5
  stable_model_list = [
6
  "runwayml/stable-diffusion-v1-5",
7
- "stabilityai/stable-diffusion-2",
8
- "stabilityai/stable-diffusion-2-base",
9
  "stabilityai/stable-diffusion-2-1",
10
- "stabilityai/stable-diffusion-2-1-base"
 
 
 
 
 
 
11
  ]
12
 
13
  stable_prompt_list = [
 
4
 
5
  stable_model_list = [
6
  "runwayml/stable-diffusion-v1-5",
 
 
7
  "stabilityai/stable-diffusion-2-1",
8
+ "sd-dreambooth-library/disco-diffusion-style",
9
+ "prompthero/openjourney-v2",
10
+ "andite/anything-v4.0",
11
+ "Lykon/DreamShaper",
12
+ "nitrosocke/Nitro-Diffusion",
13
+ "dreamlike-art/dreamlike-diffusion-1.0"
14
+
15
  ]
16
 
17
  stable_prompt_list = [