kadirnar commited on
Commit
a8ce530
1 Parent(s): 7474614

Upload 16 files

Browse files
app.py CHANGED
@@ -33,169 +33,30 @@ with app:
33
  )
34
  with gr.Row():
35
  with gr.Column():
36
- text2image_app = stable_diffusion_text2img_app()
37
- img2img_app = stable_diffusion_img2img_app()
38
- inpaint_app = stable_diffusion_inpaint_app()
39
-
 
 
 
40
  with gr.Tab('ControlNet'):
41
- controlnet_canny_app = stable_diffusion_controlnet_canny_app()
42
- controlnet_hed_app = stable_diffusion_controlnet_hed_app()
43
- controlnet_mlsd_app = stable_diffusion_controlnet_mlsd_app()
44
- controlnet_depth_app = stable_diffusion_controlnet_depth_app()
45
- controlnet_pose_app = stable_diffusion_controlnet_pose_app()
46
- controlnet_scribble_app = stable_diffusion_controlnet_scribble_app()
47
- controlnet_seg_app = stable_diffusion_controlnet_seg_app()
 
 
 
 
 
 
 
48
 
49
- keras_diffusion_app = keras_stable_diffusion_app()
50
-
51
- with gr.Tab('Output'):
52
- with gr.Column():
53
- output_image = gr.Image(label='Image')
54
-
55
- text2image_app['predict'].click(
56
- fn = stable_diffusion_text2img,
57
- inputs = [
58
- text2image_app['model_path'],
59
- text2image_app['prompt'],
60
- text2image_app['negative_prompt'],
61
- text2image_app['guidance_scale'],
62
- text2image_app['num_inference_step'],
63
- text2image_app['height'],
64
- text2image_app['width'],
65
- ],
66
- outputs = [output_image],
67
- )
68
-
69
- img2img_app['predict'].click(
70
- fn = stable_diffusion_img2img,
71
- inputs = [
72
- img2img_app['image_path'],
73
- img2img_app['model_path'],
74
- img2img_app['prompt'],
75
- img2img_app['negative_prompt'],
76
- img2img_app['guidance_scale'],
77
- img2img_app['num_inference_step'],
78
- ],
79
- outputs = [output_image],
80
- )
81
-
82
- inpaint_app['predict'].click(
83
- fn = stable_diffusion_inpaint,
84
- inputs = [
85
- inpaint_app['image_path'],
86
- inpaint_app['model_path'],
87
- inpaint_app['prompt'],
88
- inpaint_app['negative_prompt'],
89
- inpaint_app['guidance_scale'],
90
- inpaint_app['num_inference_step'],
91
- ],
92
- outputs = [output_image],
93
- )
94
-
95
- controlnet_canny_app['predict'].click(
96
- fn = stable_diffusion_controlnet_canny,
97
- inputs = [
98
- controlnet_canny_app['image_path'],
99
- controlnet_canny_app['model_path'],
100
- controlnet_canny_app['prompt'],
101
- controlnet_canny_app['negative_prompt'],
102
- controlnet_canny_app['guidance_scale'],
103
- controlnet_canny_app['num_inference_step'],
104
- ],
105
- outputs = [output_image],
106
- )
107
-
108
- controlnet_hed_app['predict'].click(
109
- fn = stable_diffusion_controlnet_hed,
110
- inputs = [
111
- controlnet_hed_app['image_path'],
112
- controlnet_hed_app['model_path'],
113
- controlnet_hed_app['prompt'],
114
- controlnet_hed_app['negative_prompt'],
115
- controlnet_hed_app['guidance_scale'],
116
- controlnet_hed_app['num_inference_step'],
117
- ],
118
- outputs = [output_image],
119
- )
120
-
121
- controlnet_mlsd_app['predict'].click(
122
- fn = stable_diffusion_controlnet_mlsd,
123
- inputs = [
124
- controlnet_mlsd_app['image_path'],
125
- controlnet_mlsd_app['model_path'],
126
- controlnet_mlsd_app['prompt'],
127
- controlnet_mlsd_app['negative_prompt'],
128
- controlnet_mlsd_app['guidance_scale'],
129
- controlnet_mlsd_app['num_inference_step'],
130
- ],
131
- outputs = [output_image],
132
- )
133
-
134
- controlnet_depth_app['predict'].click(
135
- fn = stable_diffusion_controlnet_seg,
136
- inputs = [
137
- controlnet_depth_app['image_path'],
138
- controlnet_depth_app['model_path'],
139
- controlnet_depth_app['prompt'],
140
- controlnet_depth_app['negative_prompt'],
141
- controlnet_depth_app['guidance_scale'],
142
- controlnet_depth_app['num_inference_step'],
143
- ],
144
- outputs = [output_image],
145
- )
146
-
147
- controlnet_pose_app['predict'].click(
148
- fn = stable_diffusion_controlnet_depth,
149
- inputs = [
150
- controlnet_pose_app['image_path'],
151
- controlnet_pose_app['model_path'],
152
- controlnet_pose_app['prompt'],
153
- controlnet_pose_app['negative_prompt'],
154
- controlnet_pose_app['guidance_scale'],
155
- controlnet_pose_app['num_inference_step'],
156
- ],
157
- outputs = [output_image],
158
- )
159
-
160
- controlnet_scribble_app['predict'].click(
161
- fn = stable_diffusion_controlnet_scribble,
162
- inputs = [
163
- controlnet_scribble_app['image_path'],
164
- controlnet_scribble_app['model_path'],
165
- controlnet_scribble_app['prompt'],
166
- controlnet_scribble_app['negative_prompt'],
167
- controlnet_scribble_app['guidance_scale'],
168
- controlnet_scribble_app['num_inference_step'],
169
- ],
170
- outputs = [output_image],
171
- )
172
-
173
- controlnet_seg_app['predict'].click(
174
- fn = stable_diffusion_controlnet_pose,
175
- inputs = [
176
- controlnet_seg_app['image_path'],
177
- controlnet_seg_app['model_path'],
178
- controlnet_seg_app['prompt'],
179
- controlnet_seg_app['negative_prompt'],
180
- controlnet_seg_app['guidance_scale'],
181
- controlnet_seg_app['num_inference_step'],
182
- ],
183
- outputs = [output_image],
184
- )
185
-
186
- keras_diffusion_app['predict'].click(
187
- fn = keras_stable_diffusion,
188
- inputs = [
189
- keras_diffusion_app['model_path'],
190
- keras_diffusion_app['prompt'],
191
- keras_diffusion_app['negative_prompt'],
192
- keras_diffusion_app['guidance_scale'],
193
- keras_diffusion_app['num_inference_step'],
194
- keras_diffusion_app['height'],
195
- keras_diffusion_app['width'],
196
- ],
197
- outputs = [gr.Gallery(label="Outputs")],
198
- )
199
-
200
 
201
  app.launch(debug=True)
 
33
  )
34
  with gr.Row():
35
  with gr.Column():
36
+ with gr.Tab('Text2Img'):
37
+ stable_diffusion_text2img_app()
38
+ with gr.Tab('Img2Img'):
39
+ stable_diffusion_img2img_app()
40
+ with gr.Tab('Inpaint'):
41
+ stable_diffusion_inpaint_app()
42
+
43
  with gr.Tab('ControlNet'):
44
+ with gr.Tab('Canny'):
45
+ stable_diffusion_controlnet_canny_app()
46
+ with gr.Tab('Depth'):
47
+ stable_diffusion_controlnet_depth_app()
48
+ with gr.Tab('HED'):
49
+ stable_diffusion_controlnet_hed_app()
50
+ with gr.Tab('MLSD'):
51
+ stable_diffusion_controlnet_mlsd_app()
52
+ with gr.Tab('Pose'):
53
+ stable_diffusion_controlnet_pose_app()
54
+ with gr.Tab('Seg'):
55
+ stable_diffusion_controlnet_seg_app()
56
+ with gr.Tab('Scribble'):
57
+ stable_diffusion_controlnet_scribble_app()
58
 
59
+ with gr.Tab('Keras Diffusion'):
60
+ keras_diffusion_app = keras_stable_diffusion_app()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
 
62
  app.launch(debug=True)
diffusion_webui/controlnet/controlnet_canny.py CHANGED
@@ -16,10 +16,7 @@ stable_model_list = [
16
  "stabilityai/stable-diffusion-2-1-base"
17
  ]
18
 
19
- stable_inpiant_model_list = [
20
- "stabilityai/stable-diffusion-2-inpainting",
21
- "runwayml/stable-diffusion-inpainting"
22
- ]
23
 
24
  stable_prompt_list = [
25
  "a photo of a man.",
@@ -82,57 +79,63 @@ def stable_diffusion_controlnet_canny(
82
 
83
 
84
  def stable_diffusion_controlnet_canny_app():
85
- with gr.Tab('Canny'):
86
- controlnet_canny_image_file = gr.Image(
87
- type='filepath',
88
- label='Image'
89
- )
90
-
91
- controlnet_canny_model_id = gr.Dropdown(
92
- choices=stable_model_list,
93
- value=stable_model_list[0],
94
- label='Stable Model Id'
95
- )
96
-
97
- controlnet_canny_prompt = gr.Textbox(
98
- lines=1,
99
- value=stable_prompt_list[0],
100
- label='Prompt'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
  )
102
-
103
- controlnet_canny_negative_prompt = gr.Textbox(
104
- lines=1,
105
- value=stable_negative_prompt_list[0],
106
- label='Negative Prompt'
107
- )
108
-
109
- with gr.Accordion("Advanced Options", open=False):
110
- controlnet_canny_guidance_scale = gr.Slider(
111
- minimum=0.1,
112
- maximum=15,
113
- step=0.1,
114
- value=7.5,
115
- label='Guidance Scale'
116
- )
117
-
118
- controlnet_canny_num_inference_step = gr.Slider(
119
- minimum=1,
120
- maximum=100,
121
- step=1,
122
- value=50,
123
- label='Num Inference Step'
124
- )
125
-
126
- controlnet_canny_predict = gr.Button(value='Generator')
127
-
128
- variables = {
129
- 'image_path': controlnet_canny_image_file,
130
- 'model_path': controlnet_canny_model_id,
131
- 'prompt': controlnet_canny_prompt,
132
- 'negative_prompt': controlnet_canny_negative_prompt,
133
- 'guidance_scale': controlnet_canny_guidance_scale,
134
- 'num_inference_step': controlnet_canny_num_inference_step,
135
- 'predict': controlnet_canny_predict
136
- }
137
-
138
- return variables
 
16
  "stabilityai/stable-diffusion-2-1-base"
17
  ]
18
 
19
+
 
 
 
20
 
21
  stable_prompt_list = [
22
  "a photo of a man.",
 
79
 
80
 
81
  def stable_diffusion_controlnet_canny_app():
82
+ with gr.Blocks():
83
+ with gr.Row():
84
+ with gr.Column():
85
+ controlnet_canny_image_file = gr.Image(
86
+ type='filepath',
87
+ label='Image'
88
+ )
89
+
90
+ controlnet_canny_model_id = gr.Dropdown(
91
+ choices=stable_model_list,
92
+ value=stable_model_list[0],
93
+ label='Stable Model Id'
94
+ )
95
+
96
+ controlnet_canny_prompt = gr.Textbox(
97
+ lines=1,
98
+ value=stable_prompt_list[0],
99
+ label='Prompt'
100
+ )
101
+
102
+ controlnet_canny_negative_prompt = gr.Textbox(
103
+ lines=1,
104
+ value=stable_negative_prompt_list[0],
105
+ label='Negative Prompt'
106
+ )
107
+
108
+ with gr.Accordion("Advanced Options", open=False):
109
+ controlnet_canny_guidance_scale = gr.Slider(
110
+ minimum=0.1,
111
+ maximum=15,
112
+ step=0.1,
113
+ value=7.5,
114
+ label='Guidance Scale'
115
+ )
116
+
117
+ controlnet_canny_num_inference_step = gr.Slider(
118
+ minimum=1,
119
+ maximum=100,
120
+ step=1,
121
+ value=50,
122
+ label='Num Inference Step'
123
+ )
124
+
125
+ controlnet_canny_predict = gr.Button(value='Generator')
126
+
127
+ with gr.Column():
128
+ output_image = gr.Image(label='Output')
129
+
130
+ controlnet_canny_predict.click(
131
+ fn=stable_diffusion_controlnet_canny,
132
+ inputs=[
133
+ controlnet_canny_image_file,
134
+ controlnet_canny_model_id,
135
+ controlnet_canny_prompt,
136
+ controlnet_canny_negative_prompt,
137
+ controlnet_canny_guidance_scale,
138
+ controlnet_canny_num_inference_step,
139
+ ],
140
+ outputs=[output_image],
141
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
diffusion_webui/controlnet/controlnet_depth.py CHANGED
@@ -15,10 +15,6 @@ stable_model_list = [
15
  "stabilityai/stable-diffusion-2-1-base"
16
  ]
17
 
18
- stable_inpiant_model_list = [
19
- "stabilityai/stable-diffusion-2-inpainting",
20
- "runwayml/stable-diffusion-inpainting"
21
- ]
22
 
23
  stable_prompt_list = [
24
  "a photo of a man.",
@@ -81,57 +77,63 @@ def stable_diffusion_controlnet_depth(
81
 
82
 
83
  def stable_diffusion_controlnet_depth_app():
84
- with gr.Tab('Depth'):
85
- controlnet_depth_image_file = gr.Image(
86
- type='filepath',
87
- label='Image'
88
- )
89
-
90
- controlnet_depth_model_id = gr.Dropdown(
91
- choices=stable_model_list,
92
- value=stable_model_list[0],
93
- label='Stable Model Id'
94
- )
95
-
96
- controlnet_depth_prompt = gr.Textbox(
97
- lines=1,
98
- value=stable_prompt_list[0],
99
- label='Prompt'
100
- )
101
-
102
- controlnet_depth_negative_prompt = gr.Textbox(
103
- lines=1,
104
- value=stable_negative_prompt_list[0],
105
- label='Negative Prompt'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
  )
107
-
108
- with gr.Accordion("Advanced Options", open=False):
109
- controlnet_depth_guidance_scale = gr.Slider(
110
- minimum=0.1,
111
- maximum=15,
112
- step=0.1,
113
- value=7.5,
114
- label='Guidance Scale'
115
- )
116
-
117
- controlnet_depth_num_inference_step = gr.Slider(
118
- minimum=1,
119
- maximum=100,
120
- step=1,
121
- value=50,
122
- label='Num Inference Step'
123
- )
124
-
125
- controlnet_depth_predict = gr.Button(value='Generator')
126
-
127
- variables = {
128
- 'image_path': controlnet_depth_image_file,
129
- 'model_path': controlnet_depth_model_id,
130
- 'prompt': controlnet_depth_prompt,
131
- 'negative_prompt': controlnet_depth_negative_prompt,
132
- 'guidance_scale': controlnet_depth_guidance_scale,
133
- 'num_inference_step': controlnet_depth_num_inference_step,
134
- 'predict': controlnet_depth_predict
135
- }
136
-
137
- return variables
 
15
  "stabilityai/stable-diffusion-2-1-base"
16
  ]
17
 
 
 
 
 
18
 
19
  stable_prompt_list = [
20
  "a photo of a man.",
 
77
 
78
 
79
  def stable_diffusion_controlnet_depth_app():
80
+ with gr.Blocks():
81
+ with gr.Row():
82
+ with gr.Column():
83
+ controlnet_depth_image_file = gr.Image(
84
+ type='filepath',
85
+ label='Image'
86
+ )
87
+
88
+ controlnet_depth_model_id = gr.Dropdown(
89
+ choices=stable_model_list,
90
+ value=stable_model_list[0],
91
+ label='Stable Model Id'
92
+ )
93
+
94
+ controlnet_depth_prompt = gr.Textbox(
95
+ lines=1,
96
+ value=stable_prompt_list[0],
97
+ label='Prompt'
98
+ )
99
+
100
+ controlnet_depth_negative_prompt = gr.Textbox(
101
+ lines=1,
102
+ value=stable_negative_prompt_list[0],
103
+ label='Negative Prompt'
104
+ )
105
+
106
+ with gr.Accordion("Advanced Options", open=False):
107
+ controlnet_depth_guidance_scale = gr.Slider(
108
+ minimum=0.1,
109
+ maximum=15,
110
+ step=0.1,
111
+ value=7.5,
112
+ label='Guidance Scale'
113
+ )
114
+
115
+ controlnet_depth_num_inference_step = gr.Slider(
116
+ minimum=1,
117
+ maximum=100,
118
+ step=1,
119
+ value=50,
120
+ label='Num Inference Step'
121
+ )
122
+
123
+ controlnet_depth_predict = gr.Button(value='Generator')
124
+
125
+ with gr.Column():
126
+ output_image = gr.Image(label='Output')
127
+
128
+ controlnet_depth_predict.click(
129
+ fn=stable_diffusion_controlnet_depth,
130
+ inputs=[
131
+ controlnet_depth_image_file,
132
+ controlnet_depth_model_id,
133
+ controlnet_depth_prompt,
134
+ controlnet_depth_negative_prompt,
135
+ controlnet_depth_guidance_scale,
136
+ controlnet_depth_num_inference_step,
137
+ ],
138
+ outputs=output_image
139
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
diffusion_webui/controlnet/controlnet_hed.py CHANGED
@@ -14,11 +14,6 @@ stable_model_list = [
14
  "stabilityai/stable-diffusion-2-1-base"
15
  ]
16
 
17
- stable_inpiant_model_list = [
18
- "stabilityai/stable-diffusion-2-inpainting",
19
- "runwayml/stable-diffusion-inpainting"
20
- ]
21
-
22
  stable_prompt_list = [
23
  "a photo of a man.",
24
  "a photo of a girl."
@@ -76,57 +71,65 @@ def stable_diffusion_controlnet_hed(
76
  return output[0]
77
 
78
  def stable_diffusion_controlnet_hed_app():
79
- with gr.Tab('Hed'):
80
- controlnet_hed_image_file = gr.Image(
81
- type='filepath',
82
- label='Image'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  )
84
-
85
- controlnet_hed_model_id = gr.Dropdown(
86
- choices=stable_model_list,
87
- value=stable_model_list[0],
88
- label='Stable Model Id'
89
- )
90
-
91
- controlnet_hed_prompt = gr.Textbox(
92
- lines=1,
93
- value=stable_prompt_list[0],
94
- label='Prompt'
95
- )
96
-
97
- controlnet_hed_negative_prompt = gr.Textbox(
98
- lines=1,
99
- value=stable_negative_prompt_list[0],
100
- label='Negative Prompt'
101
- )
102
-
103
- with gr.Accordion("Advanced Options", open=False):
104
- controlnet_hed_guidance_scale = gr.Slider(
105
- minimum=0.1,
106
- maximum=15,
107
- step=0.1,
108
- value=7.5,
109
- label='Guidance Scale'
110
- )
111
-
112
- controlnet_hed_num_inference_step = gr.Slider(
113
- minimum=1,
114
- maximum=100,
115
- step=1,
116
- value=50,
117
- label='Num Inference Step'
118
- )
119
-
120
- controlnet_hed_predict = gr.Button(value='Generator')
121
-
122
- variables = {
123
- 'image_path': controlnet_hed_image_file,
124
- 'model_path': controlnet_hed_model_id,
125
- 'prompt': controlnet_hed_prompt,
126
- 'negative_prompt': controlnet_hed_negative_prompt,
127
- 'guidance_scale': controlnet_hed_guidance_scale,
128
- 'num_inference_step': controlnet_hed_num_inference_step,
129
- 'predict': controlnet_hed_predict
130
- }
131
-
132
- return variables
 
14
  "stabilityai/stable-diffusion-2-1-base"
15
  ]
16
 
 
 
 
 
 
17
  stable_prompt_list = [
18
  "a photo of a man.",
19
  "a photo of a girl."
 
71
  return output[0]
72
 
73
  def stable_diffusion_controlnet_hed_app():
74
+ with gr.Blocks():
75
+ with gr.Row():
76
+ with gr.Column():
77
+ controlnet_hed_image_file = gr.Image(
78
+ type='filepath',
79
+ label='Image'
80
+ )
81
+
82
+ controlnet_hed_model_id = gr.Dropdown(
83
+ choices=stable_model_list,
84
+ value=stable_model_list[0],
85
+ label='Stable Model Id'
86
+ )
87
+
88
+ controlnet_hed_prompt = gr.Textbox(
89
+ lines=1,
90
+ value=stable_prompt_list[0],
91
+ label='Prompt'
92
+ )
93
+
94
+ controlnet_hed_negative_prompt = gr.Textbox(
95
+ lines=1,
96
+ value=stable_negative_prompt_list[0],
97
+ label='Negative Prompt'
98
+ )
99
+
100
+ with gr.Accordion("Advanced Options", open=False):
101
+ controlnet_hed_guidance_scale = gr.Slider(
102
+ minimum=0.1,
103
+ maximum=15,
104
+ step=0.1,
105
+ value=7.5,
106
+ label='Guidance Scale'
107
+ )
108
+
109
+ controlnet_hed_num_inference_step = gr.Slider(
110
+ minimum=1,
111
+ maximum=100,
112
+ step=1,
113
+ value=50,
114
+ label='Num Inference Step'
115
+ )
116
+
117
+ controlnet_hed_predict = gr.Button(value='Generator')
118
+
119
+
120
+ with gr.Column():
121
+ output_image = gr.Image(label='Output')
122
+
123
+ controlnet_hed_predict.click(
124
+ fn=stable_diffusion_controlnet_hed,
125
+ inputs=[
126
+ controlnet_hed_image_file,
127
+ controlnet_hed_model_id,
128
+ controlnet_hed_prompt,
129
+ controlnet_hed_negative_prompt,
130
+ controlnet_hed_guidance_scale,
131
+ controlnet_hed_num_inference_step,
132
+ ],
133
+ outputs=[output_image]
134
  )
135
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
diffusion_webui/controlnet/controlnet_mlsd.py CHANGED
@@ -14,11 +14,6 @@ stable_model_list = [
14
  "stabilityai/stable-diffusion-2-1-base"
15
  ]
16
 
17
- stable_inpiant_model_list = [
18
- "stabilityai/stable-diffusion-2-inpainting",
19
- "runwayml/stable-diffusion-inpainting"
20
- ]
21
-
22
  stable_prompt_list = [
23
  "a photo of a man.",
24
  "a photo of a girl."
@@ -76,58 +71,63 @@ def stable_diffusion_controlnet_mlsd(
76
  return output[0]
77
 
78
  def stable_diffusion_controlnet_mlsd_app():
79
- with gr.Tab('MLSD line'):
80
- controlnet_mlsd_image_file = gr.Image(
81
- type='filepath',
82
- label='Image'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  )
84
-
85
- controlnet_mlsd_model_id = gr.Dropdown(
86
- choices=stable_model_list,
87
- value=stable_model_list[0],
88
- label='Stable Model Id'
89
- )
90
-
91
- controlnet_mlsd_prompt = gr.Textbox(
92
- lines=1,
93
- value=stable_prompt_list[0],
94
- label='Prompt'
95
- )
96
-
97
- controlnet_mlsd_negative_prompt = gr.Textbox(
98
- lines=1,
99
- value=stable_negative_prompt_list[0],
100
- label='Negative Prompt'
101
- )
102
-
103
- with gr.Accordion("Advanced Options", open=False):
104
- controlnet_mlsd_guidance_scale = gr.Slider(
105
- minimum=0.1,
106
- maximum=15,
107
- step=0.1,
108
- value=7.5,
109
- label='Guidance Scale'
110
- )
111
-
112
- controlnet_mlsd_num_inference_step = gr.Slider(
113
- minimum=1,
114
- maximum=100,
115
- step=1,
116
- value=50,
117
- label='Num Inference Step'
118
- )
119
-
120
- controlnet_mlsd_predict = gr.Button(value='Generator')
121
-
122
- variables = {
123
- 'image_path': controlnet_mlsd_image_file,
124
- 'model_path': controlnet_mlsd_model_id,
125
- 'prompt': controlnet_mlsd_prompt,
126
- 'negative_prompt': controlnet_mlsd_negative_prompt,
127
- 'guidance_scale': controlnet_mlsd_guidance_scale,
128
- 'num_inference_step': controlnet_mlsd_num_inference_step,
129
- 'predict': controlnet_mlsd_predict
130
- }
131
-
132
- return variables
133
-
 
14
  "stabilityai/stable-diffusion-2-1-base"
15
  ]
16
 
 
 
 
 
 
17
  stable_prompt_list = [
18
  "a photo of a man.",
19
  "a photo of a girl."
 
71
  return output[0]
72
 
73
  def stable_diffusion_controlnet_mlsd_app():
74
+ with gr.Blocks():
75
+ with gr.Row():
76
+ with gr.Column():
77
+ controlnet_mlsd_image_file = gr.Image(
78
+ type='filepath',
79
+ label='Image'
80
+ )
81
+
82
+ controlnet_mlsd_model_id = gr.Dropdown(
83
+ choices=stable_model_list,
84
+ value=stable_model_list[0],
85
+ label='Stable Model Id'
86
+ )
87
+
88
+ controlnet_mlsd_prompt = gr.Textbox(
89
+ lines=1,
90
+ value=stable_prompt_list[0],
91
+ label='Prompt'
92
+ )
93
+
94
+ controlnet_mlsd_negative_prompt = gr.Textbox(
95
+ lines=1,
96
+ value=stable_negative_prompt_list[0],
97
+ label='Negative Prompt'
98
+ )
99
+
100
+ with gr.Accordion("Advanced Options", open=False):
101
+ controlnet_mlsd_guidance_scale = gr.Slider(
102
+ minimum=0.1,
103
+ maximum=15,
104
+ step=0.1,
105
+ value=7.5,
106
+ label='Guidance Scale'
107
+ )
108
+
109
+ controlnet_mlsd_num_inference_step = gr.Slider(
110
+ minimum=1,
111
+ maximum=100,
112
+ step=1,
113
+ value=50,
114
+ label='Num Inference Step'
115
+ )
116
+
117
+ controlnet_mlsd_predict = gr.Button(value='Generator')
118
+
119
+ with gr.Column():
120
+ output_image = gr.Image(label='Output')
121
+
122
+ controlnet_mlsd_predict.click(
123
+ fn=stable_diffusion_controlnet_mlsd,
124
+ inputs=[
125
+ controlnet_mlsd_image_file,
126
+ controlnet_mlsd_model_id,
127
+ controlnet_mlsd_prompt,
128
+ controlnet_mlsd_negative_prompt,
129
+ controlnet_mlsd_guidance_scale,
130
+ controlnet_mlsd_num_inference_step
131
+ ],
132
+ outputs=output_image
133
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
diffusion_webui/controlnet/controlnet_pose.py CHANGED
@@ -15,11 +15,6 @@ stable_model_list = [
15
  "stabilityai/stable-diffusion-2-1-base"
16
  ]
17
 
18
- stable_inpiant_model_list = [
19
- "stabilityai/stable-diffusion-2-inpainting",
20
- "runwayml/stable-diffusion-inpainting"
21
- ]
22
-
23
  stable_prompt_list = [
24
  "a photo of a man.",
25
  "a photo of a girl."
@@ -78,57 +73,63 @@ def stable_diffusion_controlnet_pose(
78
 
79
 
80
  def stable_diffusion_controlnet_pose_app():
81
- with gr.Tab('Pose'):
82
- controlnet_pose_image_file = gr.Image(
83
- type='filepath',
84
- label='Image'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
  )
86
-
87
- controlnet_pose_model_id = gr.Dropdown(
88
- choices=stable_model_list,
89
- value=stable_model_list[0],
90
- label='Stable Model Id'
91
- )
92
-
93
- controlnet_pose_prompt = gr.Textbox(
94
- lines=1,
95
- value=stable_prompt_list[0],
96
- label='Prompt'
97
- )
98
-
99
- controlnet_pose_negative_prompt = gr.Textbox(
100
- lines=1,
101
- value=stable_negative_prompt_list[0],
102
- label='Negative Prompt'
103
- )
104
-
105
- with gr.Accordion("Advanced Options", open=False):
106
- controlnet_pose_guidance_scale = gr.Slider(
107
- minimum=0.1,
108
- maximum=15,
109
- step=0.1,
110
- value=7.5,
111
- label='Guidance Scale'
112
- )
113
-
114
- controlnet_pose_num_inference_step = gr.Slider(
115
- minimum=1,
116
- maximum=100,
117
- step=1,
118
- value=50,
119
- label='Num Inference Step'
120
- )
121
-
122
- controlnet_pose_predict = gr.Button(value='Generator')
123
-
124
- variables = {
125
- 'image_path': controlnet_pose_image_file,
126
- 'model_path': controlnet_pose_model_id,
127
- 'prompt': controlnet_pose_prompt,
128
- 'negative_prompt': controlnet_pose_negative_prompt,
129
- 'guidance_scale': controlnet_pose_guidance_scale,
130
- 'num_inference_step': controlnet_pose_num_inference_step,
131
- 'predict': controlnet_pose_predict
132
- }
133
-
134
- return variables
 
15
  "stabilityai/stable-diffusion-2-1-base"
16
  ]
17
 
 
 
 
 
 
18
  stable_prompt_list = [
19
  "a photo of a man.",
20
  "a photo of a girl."
 
73
 
74
 
75
  def stable_diffusion_controlnet_pose_app():
76
+ with gr.Blocks():
77
+ with gr.Row():
78
+ with gr.Column():
79
+ controlnet_pose_image_file = gr.Image(
80
+ type='filepath',
81
+ label='Image'
82
+ )
83
+
84
+ controlnet_pose_model_id = gr.Dropdown(
85
+ choices=stable_model_list,
86
+ value=stable_model_list[0],
87
+ label='Stable Model Id'
88
+ )
89
+
90
+ controlnet_pose_prompt = gr.Textbox(
91
+ lines=1,
92
+ value=stable_prompt_list[0],
93
+ label='Prompt'
94
+ )
95
+
96
+ controlnet_pose_negative_prompt = gr.Textbox(
97
+ lines=1,
98
+ value=stable_negative_prompt_list[0],
99
+ label='Negative Prompt'
100
+ )
101
+
102
+ with gr.Accordion("Advanced Options", open=False):
103
+ controlnet_pose_guidance_scale = gr.Slider(
104
+ minimum=0.1,
105
+ maximum=15,
106
+ step=0.1,
107
+ value=7.5,
108
+ label='Guidance Scale'
109
+ )
110
+
111
+ controlnet_pose_num_inference_step = gr.Slider(
112
+ minimum=1,
113
+ maximum=100,
114
+ step=1,
115
+ value=50,
116
+ label='Num Inference Step'
117
+ )
118
+
119
+ controlnet_pose_predict = gr.Button(value='Generator')
120
+
121
+ with gr.Column():
122
+ output_image = gr.Image(label='Output')
123
+
124
+ controlnet_pose_predict.click(
125
+ fn=stable_diffusion_controlnet_pose,
126
+ inputs=[
127
+ controlnet_pose_image_file,
128
+ controlnet_pose_model_id,
129
+ controlnet_pose_prompt,
130
+ controlnet_pose_negative_prompt,
131
+ controlnet_pose_guidance_scale,
132
+ controlnet_pose_num_inference_step,
133
+ ],
134
+ outputs=output_image
135
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
diffusion_webui/controlnet/controlnet_scribble.py CHANGED
@@ -15,11 +15,6 @@ stable_model_list = [
15
  "stabilityai/stable-diffusion-2-1-base"
16
  ]
17
 
18
- stable_inpiant_model_list = [
19
- "stabilityai/stable-diffusion-2-inpainting",
20
- "runwayml/stable-diffusion-inpainting"
21
- ]
22
-
23
  stable_prompt_list = [
24
  "a photo of a man.",
25
  "a photo of a girl."
@@ -76,57 +71,63 @@ def stable_diffusion_controlnet_scribble(
76
  return output[0]
77
 
78
  def stable_diffusion_controlnet_scribble_app():
79
- with gr.Tab('Scribble'):
80
- controlnet_scribble_image_file = gr.Image(
81
- type='filepath',
82
- label='Image'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  )
84
-
85
- controlnet_scribble_model_id = gr.Dropdown(
86
- choices=stable_model_list,
87
- value=stable_model_list[0],
88
- label='Stable Model Id'
89
- )
90
-
91
- controlnet_scribble_prompt = gr.Textbox(
92
- lines=1,
93
- value=stable_prompt_list[0],
94
- label='Prompt'
95
- )
96
-
97
- controlnet_scribble_negative_prompt = gr.Textbox(
98
- lines=1,
99
- value=stable_negative_prompt_list[0],
100
- label='Negative Prompt'
101
- )
102
-
103
- with gr.Accordion("Advanced Options", open=False):
104
- controlnet_scribble_guidance_scale = gr.Slider(
105
- minimum=0.1,
106
- maximum=15,
107
- step=0.1,
108
- value=7.5,
109
- label='Guidance Scale'
110
- )
111
-
112
- controlnet_scribble_num_inference_step = gr.Slider(
113
- minimum=1,
114
- maximum=100,
115
- step=1,
116
- value=50,
117
- label='Num Inference Step'
118
- )
119
-
120
- controlnet_scribble_predict = gr.Button(value='Generator')
121
-
122
- variables = {
123
- 'image_path': controlnet_scribble_image_file,
124
- 'model_path': controlnet_scribble_model_id,
125
- 'prompt': controlnet_scribble_prompt,
126
- 'negative_prompt': controlnet_scribble_negative_prompt,
127
- 'guidance_scale': controlnet_scribble_guidance_scale,
128
- 'num_inference_step': controlnet_scribble_num_inference_step,
129
- 'predict': controlnet_scribble_predict
130
- }
131
-
132
- return variables
 
15
  "stabilityai/stable-diffusion-2-1-base"
16
  ]
17
 
 
 
 
 
 
18
  stable_prompt_list = [
19
  "a photo of a man.",
20
  "a photo of a girl."
 
71
  return output[0]
72
 
73
  def stable_diffusion_controlnet_scribble_app():
74
+ with gr.Blocks():
75
+ with gr.Row():
76
+ with gr.Column():
77
+ controlnet_scribble_image_file = gr.Image(
78
+ type='filepath',
79
+ label='Image'
80
+ )
81
+
82
+ controlnet_scribble_model_id = gr.Dropdown(
83
+ choices=stable_model_list,
84
+ value=stable_model_list[0],
85
+ label='Stable Model Id'
86
+ )
87
+
88
+ controlnet_scribble_prompt = gr.Textbox(
89
+ lines=1,
90
+ value=stable_prompt_list[0],
91
+ label='Prompt'
92
+ )
93
+
94
+ controlnet_scribble_negative_prompt = gr.Textbox(
95
+ lines=1,
96
+ value=stable_negative_prompt_list[0],
97
+ label='Negative Prompt'
98
+ )
99
+
100
+ with gr.Accordion("Advanced Options", open=False):
101
+ controlnet_scribble_guidance_scale = gr.Slider(
102
+ minimum=0.1,
103
+ maximum=15,
104
+ step=0.1,
105
+ value=7.5,
106
+ label='Guidance Scale'
107
+ )
108
+
109
+ controlnet_scribble_num_inference_step = gr.Slider(
110
+ minimum=1,
111
+ maximum=100,
112
+ step=1,
113
+ value=50,
114
+ label='Num Inference Step'
115
+ )
116
+
117
+ controlnet_scribble_predict = gr.Button(value='Generator')
118
+
119
+ with gr.Column():
120
+ output_image = gr.Image(label='Output')
121
+
122
+ controlnet_scribble_predict.click(
123
+ fn=stable_diffusion_controlnet_scribble,
124
+ inputs=[
125
+ controlnet_scribble_image_file,
126
+ controlnet_scribble_model_id,
127
+ controlnet_scribble_prompt,
128
+ controlnet_scribble_negative_prompt,
129
+ controlnet_scribble_guidance_scale,
130
+ controlnet_scribble_num_inference_step,
131
+ ],
132
+ outputs=output_image
133
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
diffusion_webui/controlnet/controlnet_seg.py CHANGED
@@ -17,11 +17,6 @@ stable_model_list = [
17
  "stabilityai/stable-diffusion-2-1-base"
18
  ]
19
 
20
- stable_inpiant_model_list = [
21
- "stabilityai/stable-diffusion-2-inpainting",
22
- "runwayml/stable-diffusion-inpainting"
23
- ]
24
-
25
  stable_prompt_list = [
26
  "a photo of a man.",
27
  "a photo of a girl."
@@ -135,57 +130,64 @@ def stable_diffusion_controlnet_seg(
135
  return output[0]
136
 
137
  def stable_diffusion_controlnet_seg_app():
138
- with gr.Tab('Segmentation'):
139
- controlnet_seg_image_file = gr.Image(
140
- type='filepath',
141
- label='Image'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142
  )
143
-
144
- controlnet_seg_model_id = gr.Dropdown(
145
- choices=stable_model_list,
146
- value=stable_model_list[0],
147
- label='Stable Model Id'
148
- )
149
-
150
- controlnet_seg_prompt = gr.Textbox(
151
- lines=1,
152
- value=stable_prompt_list[0],
153
- label='Prompt'
154
- )
155
-
156
- controlnet_seg_negative_prompt = gr.Textbox(
157
- lines=1,
158
- value=stable_negative_prompt_list[0],
159
- label='Negative Prompt'
160
- )
161
-
162
- with gr.Accordion("Advanced Options", open=False):
163
- controlnet_seg_guidance_scale = gr.Slider(
164
- minimum=0.1,
165
- maximum=15,
166
- step=0.1,
167
- value=7.5,
168
- label='Guidance Scale'
169
- )
170
-
171
- controlnet_seg_num_inference_step = gr.Slider(
172
- minimum=1,
173
- maximum=100,
174
- step=1,
175
- value=50,
176
- label='Num Inference Step'
177
- )
178
-
179
- controlnet_seg_predict = gr.Button(value='Generator')
180
-
181
- variables = {
182
- 'image_path': controlnet_seg_image_file,
183
- 'model_path': controlnet_seg_model_id,
184
- 'prompt': controlnet_seg_prompt,
185
- 'negative_prompt': controlnet_seg_negative_prompt,
186
- 'guidance_scale': controlnet_seg_guidance_scale,
187
- 'num_inference_step': controlnet_seg_num_inference_step,
188
- 'predict': controlnet_seg_predict,
189
- }
190
-
191
- return variables
 
17
  "stabilityai/stable-diffusion-2-1-base"
18
  ]
19
 
 
 
 
 
 
20
  stable_prompt_list = [
21
  "a photo of a man.",
22
  "a photo of a girl."
 
130
  return output[0]
131
 
132
  def stable_diffusion_controlnet_seg_app():
133
+ with gr.Blocks():
134
+ with gr.Row():
135
+ with gr.Column():
136
+ controlnet_seg_image_file = gr.Image(
137
+ type='filepath',
138
+ label='Image'
139
+ )
140
+
141
+ controlnet_seg_model_id = gr.Dropdown(
142
+ choices=stable_model_list,
143
+ value=stable_model_list[0],
144
+ label='Stable Model Id'
145
+ )
146
+
147
+ controlnet_seg_prompt = gr.Textbox(
148
+ lines=1,
149
+ value=stable_prompt_list[0],
150
+ label='Prompt'
151
+ )
152
+
153
+ controlnet_seg_negative_prompt = gr.Textbox(
154
+ lines=1,
155
+ value=stable_negative_prompt_list[0],
156
+ label='Negative Prompt'
157
+ )
158
+
159
+ with gr.Accordion("Advanced Options", open=False):
160
+ controlnet_seg_guidance_scale = gr.Slider(
161
+ minimum=0.1,
162
+ maximum=15,
163
+ step=0.1,
164
+ value=7.5,
165
+ label='Guidance Scale'
166
+ )
167
+
168
+ controlnet_seg_num_inference_step = gr.Slider(
169
+ minimum=1,
170
+ maximum=100,
171
+ step=1,
172
+ value=50,
173
+ label='Num Inference Step'
174
+ )
175
+
176
+ controlnet_seg_predict = gr.Button(value='Generator')
177
+
178
+ with gr.Column():
179
+ output_image = gr.Image(label='Output')
180
+
181
+ controlnet_seg_predict.click(
182
+ fn=stable_diffusion_controlnet_seg,
183
+ inputs=[
184
+ controlnet_seg_image_file,
185
+ controlnet_seg_model_id,
186
+ controlnet_seg_prompt,
187
+ controlnet_seg_negative_prompt,
188
+ controlnet_seg_guidance_scale,
189
+ controlnet_seg_num_inference_step,
190
+ ],
191
+ outputs=[output_image],
192
  )
193
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
diffusion_webui/stable_diffusion/img2img_app.py CHANGED
@@ -60,57 +60,63 @@ def stable_diffusion_img2img(
60
 
61
 
62
  def stable_diffusion_img2img_app():
63
- with gr.Tab('Image2Image'):
64
- image2image2_image_file = gr.Image(
65
- type='filepath',
66
- label='Image'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
  )
68
-
69
- image2image_model_path = gr.Dropdown(
70
- choices=stable_model_list,
71
- value=stable_model_list[0],
72
- label='Image-Image Model Id'
73
- )
74
-
75
- image2image_prompt = gr.Textbox(
76
- lines=1,
77
- value=stable_prompt_list[0],
78
- label='Prompt'
79
- )
80
-
81
- image2image_negative_prompt = gr.Textbox(
82
- lines=1,
83
- value=stable_negative_prompt_list[0],
84
- label='Negative Prompt'
85
- )
86
-
87
- with gr.Accordion("Advanced Options", open=False):
88
- image2image_guidance_scale = gr.Slider(
89
- minimum=0.1,
90
- maximum=15,
91
- step=0.1,
92
- value=7.5,
93
- label='Guidance Scale'
94
- )
95
-
96
- image2image_num_inference_step = gr.Slider(
97
- minimum=1,
98
- maximum=100,
99
- step=1,
100
- value=50,
101
- label='Num Inference Step'
102
- )
103
-
104
- image2image_predict = gr.Button(value='Generator')
105
-
106
- variables = {
107
- 'image_path': image2image2_image_file,
108
- 'model_path': image2image_model_path,
109
- 'prompt': image2image_prompt,
110
- 'negative_prompt': image2image_negative_prompt,
111
- 'guidance_scale': image2image_guidance_scale,
112
- 'num_inference_step': image2image_num_inference_step,
113
- 'predict': image2image_predict
114
- }
115
-
116
- return variables
 
60
 
61
 
62
  def stable_diffusion_img2img_app():
63
+ with gr.Blocks():
64
+ with gr.Row():
65
+ with gr.Column():
66
+ image2image2_image_file = gr.Image(
67
+ type='filepath',
68
+ label='Image'
69
+ )
70
+
71
+ image2image_model_path = gr.Dropdown(
72
+ choices=stable_model_list,
73
+ value=stable_model_list[0],
74
+ label='Image-Image Model Id'
75
+ )
76
+
77
+ image2image_prompt = gr.Textbox(
78
+ lines=1,
79
+ value=stable_prompt_list[0],
80
+ label='Prompt'
81
+ )
82
+
83
+ image2image_negative_prompt = gr.Textbox(
84
+ lines=1,
85
+ value=stable_negative_prompt_list[0],
86
+ label='Negative Prompt'
87
+ )
88
+
89
+ with gr.Accordion("Advanced Options", open=False):
90
+ image2image_guidance_scale = gr.Slider(
91
+ minimum=0.1,
92
+ maximum=15,
93
+ step=0.1,
94
+ value=7.5,
95
+ label='Guidance Scale'
96
+ )
97
+
98
+ image2image_num_inference_step = gr.Slider(
99
+ minimum=1,
100
+ maximum=100,
101
+ step=1,
102
+ value=50,
103
+ label='Num Inference Step'
104
+ )
105
+
106
+ image2image_predict = gr.Button(value='Generator')
107
+
108
+ with gr.Column():
109
+ output_image = gr.Image(label='Output')
110
+
111
+ image2image_predict.click(
112
+ fn=stable_diffusion_img2img,
113
+ inputs=[
114
+ image2image2_image_file,
115
+ image2image_model_path,
116
+ image2image_prompt,
117
+ image2image_negative_prompt,
118
+ image2image_guidance_scale,
119
+ image2image_num_inference_step,
120
+ ],
121
+ outputs=[output_image],
122
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
diffusion_webui/stable_diffusion/inpaint_app.py CHANGED
@@ -62,60 +62,68 @@ def stable_diffusion_inpaint(
62
 
63
 
64
  def stable_diffusion_inpaint_app():
65
- with gr.Tab('Inpaint'):
66
- inpaint_image_file = gr.Image(
67
- source='upload',
68
- tool='sketch',
69
- elem_id="image_upload",
70
- type="pil",
71
- label="Upload"
72
- ).style(height=400)
73
-
74
- inpaint_model_id = gr.Dropdown(
75
- choices=stable_inpiant_model_list,
76
- value=stable_inpiant_model_list[0],
77
- label='Inpaint Model Id'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  )
79
-
80
- inpaint_prompt = gr.Textbox(
81
- lines=1,
82
- value=stable_prompt_list[0],
83
- label='Prompt'
84
- )
85
-
86
- inpaint_negative_prompt = gr.Textbox(
87
- lines=1,
88
- value=stable_negative_prompt_list[0],
89
- label='Negative Prompt'
90
- )
91
-
92
- with gr.Accordion("Advanced Options", open=False):
93
- inpaint_guidance_scale = gr.Slider(
94
- minimum=0.1,
95
- maximum=15,
96
- step=0.1,
97
- value=7.5,
98
- label='Guidance Scale'
99
- )
100
-
101
- inpaint_num_inference_step = gr.Slider(
102
- minimum=1,
103
- maximum=100,
104
- step=1,
105
- value=50,
106
- label='Num Inference Step'
107
- )
108
-
109
- inpaint_predict = gr.Button(value='Generator')
110
-
111
- variables = {
112
- "image_path": inpaint_image_file,
113
- "model_path": inpaint_model_id,
114
- "prompt": inpaint_prompt,
115
- "negative_prompt": inpaint_negative_prompt,
116
- "guidance_scale": inpaint_guidance_scale,
117
- "num_inference_step": inpaint_num_inference_step,
118
- "predict": inpaint_predict
119
- }
120
-
121
- return variables
 
62
 
63
 
64
  def stable_diffusion_inpaint_app():
65
+ with gr.Blocks():
66
+ with gr.Row():
67
+ with gr.Column():
68
+ inpaint_image_file = gr.Image(
69
+ source='upload',
70
+ tool='sketch',
71
+ elem_id="image_upload",
72
+ type="pil",
73
+ label="Upload"
74
+ ).style(height=400)
75
+
76
+ inpaint_model_id = gr.Dropdown(
77
+ choices=stable_inpiant_model_list,
78
+ value=stable_inpiant_model_list[0],
79
+ label='Inpaint Model Id'
80
+ )
81
+
82
+ inpaint_prompt = gr.Textbox(
83
+ lines=1,
84
+ value=stable_prompt_list[0],
85
+ label='Prompt'
86
+ )
87
+
88
+ inpaint_negative_prompt = gr.Textbox(
89
+ lines=1,
90
+ value=stable_negative_prompt_list[0],
91
+ label='Negative Prompt'
92
+ )
93
+
94
+ with gr.Accordion("Advanced Options", open=False):
95
+ inpaint_guidance_scale = gr.Slider(
96
+ minimum=0.1,
97
+ maximum=15,
98
+ step=0.1,
99
+ value=7.5,
100
+ label='Guidance Scale'
101
+ )
102
+
103
+ inpaint_num_inference_step = gr.Slider(
104
+ minimum=1,
105
+ maximum=100,
106
+ step=1,
107
+ value=50,
108
+ label='Num Inference Step'
109
+ )
110
+
111
+ inpaint_predict = gr.Button(value='Generator')
112
+
113
+
114
+ with gr.Column():
115
+ output_image = gr.Gallery(label="Outputs")
116
+
117
+ inpaint_predict.click(
118
+ fn=stable_diffusion_inpaint,
119
+ inputs=[
120
+ inpaint_image_file,
121
+ inpaint_model_id,
122
+ inpaint_prompt,
123
+ inpaint_negative_prompt,
124
+ inpaint_guidance_scale,
125
+ inpaint_num_inference_step,
126
+ ],
127
+ outputs=output_image
128
  )
129
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
diffusion_webui/stable_diffusion/keras_txt2img.py CHANGED
@@ -48,69 +48,75 @@ def keras_stable_diffusion(
48
  return generated_images
49
 
50
  def keras_stable_diffusion_app():
51
- with gr.Tab('Keras Diffusion'):
52
- keras_text2image_model_path = gr.Dropdown(
53
- choices=stable_model_list,
54
- value=stable_model_list[0],
55
- label='Text-Image Model Id'
56
- )
57
-
58
- keras_text2image_prompt = gr.Textbox(
59
- lines=1,
60
- value=stable_prompt_list[0],
61
- label='Prompt'
62
- )
63
-
64
- keras_text2image_negative_prompt = gr.Textbox(
65
- lines=1,
66
- value=stable_negative_prompt_list[0],
67
- label='Negative Prompt'
68
- )
69
-
70
- with gr.Accordion("Advanced Options", open=False):
71
- keras_text2image_guidance_scale = gr.Slider(
72
- minimum=0.1,
73
- maximum=15,
74
- step=0.1,
75
- value=7.5,
76
- label='Guidance Scale'
77
- )
78
-
79
- keras_text2image_num_inference_step = gr.Slider(
80
- minimum=1,
81
- maximum=100,
82
- step=1,
83
- value=50,
84
- label='Num Inference Step'
85
- )
86
-
87
- keras_text2image_height = gr.Slider(
88
- minimum=128,
89
- maximum=1280,
90
- step=32,
91
- value=512,
92
- label='Image Height'
93
- )
94
-
95
- keras_text2image_width = gr.Slider(
96
- minimum=128,
97
- maximum=1280,
98
- step=32,
99
- value=768,
100
- label='Image Height'
101
- )
102
-
103
- keras_text2image_predict = gr.Button(value='Generator')
 
 
104
 
105
- variables = {
106
- "model_path": keras_text2image_model_path,
107
- "prompt": keras_text2image_prompt,
108
- "negative_prompt": keras_text2image_negative_prompt,
109
- "guidance_scale": keras_text2image_guidance_scale,
110
- "num_inference_step": keras_text2image_num_inference_step,
111
- "height": keras_text2image_height,
112
- "width": keras_text2image_width,
113
- "predict": keras_text2image_predict
114
- }
115
-
116
- return variables
 
 
 
 
 
48
  return generated_images
49
 
50
  def keras_stable_diffusion_app():
51
+ with gr.Blocks():
52
+ with gr.Row():
53
+ with gr.Column():
54
+ keras_text2image_model_path = gr.Dropdown(
55
+ choices=stable_model_list,
56
+ value=stable_model_list[0],
57
+ label='Text-Image Model Id'
58
+ )
59
+
60
+ keras_text2image_prompt = gr.Textbox(
61
+ lines=1,
62
+ value=stable_prompt_list[0],
63
+ label='Prompt'
64
+ )
65
+
66
+ keras_text2image_negative_prompt = gr.Textbox(
67
+ lines=1,
68
+ value=stable_negative_prompt_list[0],
69
+ label='Negative Prompt'
70
+ )
71
+
72
+ with gr.Accordion("Advanced Options", open=False):
73
+ keras_text2image_guidance_scale = gr.Slider(
74
+ minimum=0.1,
75
+ maximum=15,
76
+ step=0.1,
77
+ value=7.5,
78
+ label='Guidance Scale'
79
+ )
80
+
81
+ keras_text2image_num_inference_step = gr.Slider(
82
+ minimum=1,
83
+ maximum=100,
84
+ step=1,
85
+ value=50,
86
+ label='Num Inference Step'
87
+ )
88
+
89
+ keras_text2image_height = gr.Slider(
90
+ minimum=128,
91
+ maximum=1280,
92
+ step=32,
93
+ value=512,
94
+ label='Image Height'
95
+ )
96
+
97
+ keras_text2image_width = gr.Slider(
98
+ minimum=128,
99
+ maximum=1280,
100
+ step=32,
101
+ value=768,
102
+ label='Image Height'
103
+ )
104
+
105
+ keras_text2image_predict = gr.Button(value='Generator')
106
 
107
+ with gr.Column():
108
+ output_image = gr.Image(label='Output')
109
+
110
+ keras_text2image_predict.click(
111
+ fn=keras_stable_diffusion,
112
+ inputs=[
113
+ keras_text2image_model_path,
114
+ keras_text2image_prompt,
115
+ keras_text2image_negative_prompt,
116
+ keras_text2image_guidance_scale,
117
+ keras_text2image_num_inference_step,
118
+ keras_text2image_height,
119
+ keras_text2image_width
120
+ ],
121
+ outputs=output_image
122
+ )
diffusion_webui/stable_diffusion/text2img_app.py CHANGED
@@ -10,11 +10,6 @@ stable_model_list = [
10
  "stabilityai/stable-diffusion-2-1-base"
11
  ]
12
 
13
- stable_inpiant_model_list = [
14
- "stabilityai/stable-diffusion-2-inpainting",
15
- "runwayml/stable-diffusion-inpainting"
16
- ]
17
-
18
  stable_prompt_list = [
19
  "a photo of a man.",
20
  "a photo of a girl."
@@ -56,70 +51,76 @@ def stable_diffusion_text2img(
56
  return images[0]
57
 
58
  def stable_diffusion_text2img_app():
59
- with gr.Tab('Text2Image'):
60
- text2image_model_path = gr.Dropdown(
61
- choices=stable_model_list,
62
- value=stable_model_list[0],
63
- label='Text-Image Model Id'
64
- )
65
-
66
- text2image_prompt = gr.Textbox(
67
- lines=1,
68
- value=stable_prompt_list[0],
69
- label='Prompt'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
  )
71
-
72
- text2image_negative_prompt = gr.Textbox(
73
- lines=1,
74
- value=stable_negative_prompt_list[0],
75
- label='Negative Prompt'
76
- )
77
-
78
- with gr.Accordion("Advanced Options", open=False):
79
- text2image_guidance_scale = gr.Slider(
80
- minimum=0.1,
81
- maximum=15,
82
- step=0.1,
83
- value=7.5,
84
- label='Guidance Scale'
85
- )
86
-
87
- text2image_num_inference_step = gr.Slider(
88
- minimum=1,
89
- maximum=100,
90
- step=1,
91
- value=50,
92
- label='Num Inference Step'
93
- )
94
-
95
- text2image_height = gr.Slider(
96
- minimum=128,
97
- maximum=1280,
98
- step=32,
99
- value=512,
100
- label='Image Height'
101
- )
102
-
103
- text2image_width = gr.Slider(
104
- minimum=128,
105
- maximum=1280,
106
- step=32,
107
- value=768,
108
- label='Image Height'
109
- )
110
-
111
- text2image_predict = gr.Button(value='Generator')
112
-
113
- variables = {
114
- "model_path": text2image_model_path,
115
- "prompt": text2image_prompt,
116
- "negative_prompt": text2image_negative_prompt,
117
- "guidance_scale": text2image_guidance_scale,
118
- "num_inference_step": text2image_num_inference_step,
119
- "height": text2image_height,
120
- "width": text2image_width,
121
- "predict": text2image_predict
122
- }
123
-
124
- return variables
125
-
 
10
  "stabilityai/stable-diffusion-2-1-base"
11
  ]
12
 
 
 
 
 
 
13
  stable_prompt_list = [
14
  "a photo of a man.",
15
  "a photo of a girl."
 
51
  return images[0]
52
 
53
  def stable_diffusion_text2img_app():
54
+ with gr.Blocks():
55
+ with gr.Row():
56
+ with gr.Column():
57
+ text2image_model_path = gr.Dropdown(
58
+ choices=stable_model_list,
59
+ value=stable_model_list[0],
60
+ label='Text-Image Model Id'
61
+ )
62
+
63
+ text2image_prompt = gr.Textbox(
64
+ lines=1,
65
+ value=stable_prompt_list[0],
66
+ label='Prompt'
67
+ )
68
+
69
+ text2image_negative_prompt = gr.Textbox(
70
+ lines=1,
71
+ value=stable_negative_prompt_list[0],
72
+ label='Negative Prompt'
73
+ )
74
+
75
+ with gr.Accordion("Advanced Options", open=False):
76
+ text2image_guidance_scale = gr.Slider(
77
+ minimum=0.1,
78
+ maximum=15,
79
+ step=0.1,
80
+ value=7.5,
81
+ label='Guidance Scale'
82
+ )
83
+
84
+ text2image_num_inference_step = gr.Slider(
85
+ minimum=1,
86
+ maximum=100,
87
+ step=1,
88
+ value=50,
89
+ label='Num Inference Step'
90
+ )
91
+
92
+ text2image_height = gr.Slider(
93
+ minimum=128,
94
+ maximum=1280,
95
+ step=32,
96
+ value=512,
97
+ label='Image Height'
98
+ )
99
+
100
+ text2image_width = gr.Slider(
101
+ minimum=128,
102
+ maximum=1280,
103
+ step=32,
104
+ value=768,
105
+ label='Image Height'
106
+ )
107
+
108
+ text2image_predict = gr.Button(value='Generator')
109
+
110
+ with gr.Column():
111
+ output_image = gr.Image(label='Output')
112
+
113
+
114
+ text2image_predict.click(
115
+ fn=stable_diffusion_text2img,
116
+ inputs=[
117
+ text2image_model_path,
118
+ text2image_prompt,
119
+ text2image_negative_prompt,
120
+ text2image_guidance_scale,
121
+ text2image_num_inference_step,
122
+ text2image_height,
123
+ text2image_width,
124
+ ],
125
+ outputs=output_image
126
  )