kadirnar commited on
Commit
3911a99
1 Parent(s): 28b1e6a

Upload 28 files

Browse files
Files changed (28) hide show
  1. app.py +94 -524
  2. diffusion_webui/__init__.py +0 -0
  3. diffusion_webui/__pycache__/__init__.cpython-38.pyc +0 -0
  4. diffusion_webui/controlnet/__init__.py +0 -0
  5. diffusion_webui/controlnet/__pycache__/__init__.cpython-38.pyc +0 -0
  6. diffusion_webui/controlnet/__pycache__/controlnet_canny.cpython-38.pyc +0 -0
  7. diffusion_webui/controlnet/__pycache__/controlnet_depth.cpython-38.pyc +0 -0
  8. diffusion_webui/controlnet/__pycache__/controlnet_hed.cpython-38.pyc +0 -0
  9. diffusion_webui/controlnet/__pycache__/controlnet_mlsd.cpython-38.pyc +0 -0
  10. diffusion_webui/controlnet/__pycache__/controlnet_pose.cpython-38.pyc +0 -0
  11. diffusion_webui/controlnet/__pycache__/controlnet_scribble.cpython-38.pyc +0 -0
  12. diffusion_webui/controlnet/__pycache__/controlnet_seg.cpython-38.pyc +0 -0
  13. diffusion_webui/controlnet/controlnet_canny.py +138 -0
  14. diffusion_webui/controlnet/controlnet_depth.py +137 -0
  15. diffusion_webui/controlnet/controlnet_hed.py +132 -0
  16. diffusion_webui/controlnet/controlnet_mlsd.py +133 -0
  17. diffusion_webui/controlnet/controlnet_pose.py +134 -0
  18. diffusion_webui/controlnet/controlnet_scribble.py +132 -0
  19. diffusion_webui/controlnet/controlnet_seg.py +191 -0
  20. diffusion_webui/stable_diffusion/__init__.py +0 -0
  21. diffusion_webui/stable_diffusion/__pycache__/__init__.cpython-38.pyc +0 -0
  22. diffusion_webui/stable_diffusion/__pycache__/img2img_app.cpython-38.pyc +0 -0
  23. diffusion_webui/stable_diffusion/__pycache__/inpaint_app.cpython-38.pyc +0 -0
  24. diffusion_webui/stable_diffusion/__pycache__/text2img_app.cpython-38.pyc +0 -0
  25. diffusion_webui/stable_diffusion/img2img_app.py +116 -0
  26. diffusion_webui/stable_diffusion/inpaint_app.py +136 -0
  27. diffusion_webui/stable_diffusion/text2img_app.py +125 -0
  28. requirements.txt +3 -1
app.py CHANGED
@@ -1,42 +1,19 @@
 
 
 
 
 
 
 
1
 
2
- from utils.image2image import stable_diffusion_img2img
3
- from utils.text2image import stable_diffusion_text2img
4
- from utils.inpaint import stable_diffusion_inpaint
5
-
6
- from controlnet.controlnet_canny import stable_diffusion_controlnet_canny
7
- from controlnet.controlnet_depth import stable_diffusion_controlnet_depth
8
- from controlnet.controlnet_hed import stable_diffusion_controlnet_hed
9
- from controlnet.controlnet_mlsd import stable_diffusion_controlnet_mlsd
10
- from controlnet.controlnet_pose import stable_diffusion_controlnet_pose
11
- from controlnet.controlnet_scribble import stable_diffusion_controlnet_scribble
12
- from controlnet.controlnet_seg import stable_diffusion_controlnet_seg
13
 
14
 
15
  import gradio as gr
16
 
17
 
18
- stable_model_list = [
19
- "runwayml/stable-diffusion-v1-5",
20
- "stabilityai/stable-diffusion-2",
21
- "stabilityai/stable-diffusion-2-base",
22
- "stabilityai/stable-diffusion-2-1",
23
- "stabilityai/stable-diffusion-2-1-base"
24
- ]
25
-
26
- stable_inpiant_model_list = [
27
- "stabilityai/stable-diffusion-2-inpainting",
28
- "runwayml/stable-diffusion-inpainting"
29
- ]
30
-
31
- stable_prompt_list = [
32
- "a photo of a man.",
33
- "a photo of a girl."
34
- ]
35
-
36
- stable_negative_prompt_list = [
37
- "bad, ugly",
38
- "deformed"
39
- ]
40
  app = gr.Blocks()
41
  with app:
42
  gr.Markdown("# **<h1 align='center'>Stable Diffusion + ControlNet WebUI<h1>**")
@@ -50,560 +27,153 @@ with app:
50
  )
51
  with gr.Row():
52
  with gr.Column():
53
- with gr.Tab('Text2Image'):
54
- text2image_model_id = gr.Dropdown(
55
- choices=stable_model_list,
56
- value=stable_model_list[0],
57
- label='Text-Image Model Id'
58
- )
59
-
60
- text2image_prompt = gr.Textbox(
61
- lines=1,
62
- value=stable_prompt_list[0],
63
- label='Prompt'
64
- )
65
-
66
- text2image_negative_prompt = gr.Textbox(
67
- lines=1,
68
- value=stable_negative_prompt_list[0],
69
- label='Negative Prompt'
70
- )
71
-
72
- with gr.Accordion("Advanced Options", open=False):
73
- text2image_guidance_scale = gr.Slider(
74
- minimum=0.1,
75
- maximum=15,
76
- step=0.1,
77
- value=7.5,
78
- label='Guidance Scale'
79
- )
80
-
81
- text2image_num_inference_step = gr.Slider(
82
- minimum=1,
83
- maximum=100,
84
- step=1,
85
- value=50,
86
- label='Num Inference Step'
87
- )
88
-
89
- text2image_height = gr.Slider(
90
- minimum=128,
91
- maximum=1280,
92
- step=32,
93
- value=512,
94
- label='Image Height'
95
- )
96
-
97
- text2image_width = gr.Slider(
98
- minimum=128,
99
- maximum=1280,
100
- step=32,
101
- value=768,
102
- label='Image Height'
103
- )
104
-
105
- text2image_predict = gr.Button(value='Generator')
106
-
107
-
108
- with gr.Tab('Image2Image'):
109
- image2image2_image_file = gr.Image(label='Image')
110
-
111
- image2image_model_id = gr.Dropdown(
112
- choices=stable_model_list,
113
- value=stable_model_list[0],
114
- label='Image-Image Model Id'
115
- )
116
-
117
- image2image_prompt = gr.Textbox(
118
- lines=1,
119
- value=stable_prompt_list[0],
120
- label='Prompt'
121
- )
122
-
123
- image2image_negative_prompt = gr.Textbox(
124
- lines=1,
125
- value=stable_negative_prompt_list[0],
126
- label='Negative Prompt'
127
- )
128
-
129
- with gr.Accordion("Advanced Options", open=False):
130
- image2image_guidance_scale = gr.Slider(
131
- minimum=0.1,
132
- maximum=15,
133
- step=0.1,
134
- value=7.5,
135
- label='Guidance Scale'
136
- )
137
-
138
- image2image_num_inference_step = gr.Slider(
139
- minimum=1,
140
- maximum=100,
141
- step=1,
142
- value=50,
143
- label='Num Inference Step'
144
- )
145
-
146
- image2image_predict = gr.Button(value='Generator')
147
-
148
- with gr.Tab('Inpaint'):
149
- inpaint_image_file = gr.Image(
150
- source="upload",
151
- type="numpy",
152
- tool="sketch",
153
- elem_id="source_container"
154
- )
155
-
156
- inpaint_model_id = gr.Dropdown(
157
- choices=stable_inpiant_model_list,
158
- value=stable_inpiant_model_list[0],
159
- label='Inpaint Model Id'
160
- )
161
-
162
- inpaint_prompt = gr.Textbox(
163
- lines=1,
164
- value=stable_prompt_list[0],
165
- label='Prompt'
166
- )
167
-
168
- inpaint_negative_prompt = gr.Textbox(
169
- lines=1,
170
- value=stable_negative_prompt_list[0],
171
- label='Negative Prompt'
172
- )
173
-
174
- with gr.Accordion("Advanced Options", open=False):
175
- inpaint_guidance_scale = gr.Slider(
176
- minimum=0.1,
177
- maximum=15,
178
- step=0.1,
179
- value=7.5,
180
- label='Guidance Scale'
181
- )
182
-
183
- inpaint_num_inference_step = gr.Slider(
184
- minimum=1,
185
- maximum=100,
186
- step=1,
187
- value=50,
188
- label='Num Inference Step'
189
- )
190
-
191
- inpaint_predict = gr.Button(value='Generator')
192
-
193
  with gr.Tab('ControlNet'):
194
- with gr.Tab('Canny'):
195
- controlnet_canny_image_file = gr.Image(label='Image')
196
-
197
- controlnet_canny_model_id = gr.Dropdown(
198
- choices=stable_model_list,
199
- value=stable_model_list[0],
200
- label='Stable Model Id'
201
- )
202
-
203
- controlnet_canny_prompt = gr.Textbox(
204
- lines=1,
205
- value=stable_prompt_list[0],
206
- label='Prompt'
207
- )
208
-
209
- controlnet_canny_negative_prompt = gr.Textbox(
210
- lines=1,
211
- value=stable_negative_prompt_list[0],
212
- label='Negative Prompt'
213
- )
214
-
215
- with gr.Accordion("Advanced Options", open=False):
216
- controlnet_canny_guidance_scale = gr.Slider(
217
- minimum=0.1,
218
- maximum=15,
219
- step=0.1,
220
- value=7.5,
221
- label='Guidance Scale'
222
- )
223
-
224
- controlnet_canny_num_inference_step = gr.Slider(
225
- minimum=1,
226
- maximum=100,
227
- step=1,
228
- value=50,
229
- label='Num Inference Step'
230
- )
231
-
232
- controlnet_canny_predict = gr.Button(value='Generator')
233
-
234
- with gr.Tab('Hed'):
235
- controlnet_hed_image_file = gr.Image(label='Image')
236
-
237
- controlnet_hed_model_id = gr.Dropdown(
238
- choices=stable_model_list,
239
- value=stable_model_list[0],
240
- label='Stable Model Id'
241
- )
242
-
243
- controlnet_hed_prompt = gr.Textbox(
244
- lines=1,
245
- value=stable_prompt_list[0],
246
- label='Prompt'
247
- )
248
-
249
- controlnet_hed_negative_prompt = gr.Textbox(
250
- lines=1,
251
- value=stable_negative_prompt_list[0],
252
- label='Negative Prompt'
253
- )
254
-
255
- with gr.Accordion("Advanced Options", open=False):
256
- controlnet_hed_guidance_scale = gr.Slider(
257
- minimum=0.1,
258
- maximum=15,
259
- step=0.1,
260
- value=7.5,
261
- label='Guidance Scale'
262
- )
263
-
264
- controlnet_hed_num_inference_step = gr.Slider(
265
- minimum=1,
266
- maximum=100,
267
- step=1,
268
- value=50,
269
- label='Num Inference Step'
270
- )
271
-
272
- controlnet_hed_predict = gr.Button(value='Generator')
273
-
274
- with gr.Tab('MLSD line'):
275
- controlnet_mlsd_image_file = gr.Image(label='Image')
276
-
277
- controlnet_mlsd_model_id = gr.Dropdown(
278
- choices=stable_model_list,
279
- value=stable_model_list[0],
280
- label='Stable Model Id'
281
- )
282
-
283
- controlnet_mlsd_prompt = gr.Textbox(
284
- lines=1,
285
- value=stable_prompt_list[0],
286
- label='Prompt'
287
- )
288
-
289
- controlnet_mlsd_negative_prompt = gr.Textbox(
290
- lines=1,
291
- value=stable_negative_prompt_list[0],
292
- label='Negative Prompt'
293
- )
294
-
295
- with gr.Accordion("Advanced Options", open=False):
296
- controlnet_mlsd_guidance_scale = gr.Slider(
297
- minimum=0.1,
298
- maximum=15,
299
- step=0.1,
300
- value=7.5,
301
- label='Guidance Scale'
302
- )
303
-
304
- controlnet_mlsd_num_inference_step = gr.Slider(
305
- minimum=1,
306
- maximum=100,
307
- step=1,
308
- value=50,
309
- label='Num Inference Step'
310
- )
311
-
312
- controlnet_mlsd_predict = gr.Button(value='Generator')
313
-
314
- with gr.Tab('Segmentation'):
315
- controlnet_seg_image_file = gr.Image(label='Image')
316
-
317
- controlnet_seg_model_id = gr.Dropdown(
318
- choices=stable_model_list,
319
- value=stable_model_list[0],
320
- label='Stable Model Id'
321
- )
322
-
323
- controlnet_seg_prompt = gr.Textbox(
324
- lines=1,
325
- value=stable_prompt_list[0],
326
- label='Prompt'
327
- )
328
-
329
- controlnet_seg_negative_prompt = gr.Textbox(
330
- lines=1,
331
- value=stable_negative_prompt_list[0],
332
- label='Negative Prompt'
333
- )
334
-
335
- with gr.Accordion("Advanced Options", open=False):
336
- controlnet_seg_guidance_scale = gr.Slider(
337
- minimum=0.1,
338
- maximum=15,
339
- step=0.1,
340
- value=7.5,
341
- label='Guidance Scale'
342
- )
343
-
344
- controlnet_seg_num_inference_step = gr.Slider(
345
- minimum=1,
346
- maximum=100,
347
- step=1,
348
- value=50,
349
- label='Num Inference Step'
350
- )
351
-
352
- controlnet_seg_predict = gr.Button(value='Generator')
353
-
354
- with gr.Tab('Depth'):
355
- controlnet_depth_image_file = gr.Image(label='Image')
356
-
357
- controlnet_depth_model_id = gr.Dropdown(
358
- choices=stable_model_list,
359
- value=stable_model_list[0],
360
- label='Stable Model Id'
361
- )
362
-
363
- controlnet_depth_prompt = gr.Textbox(
364
- lines=1,
365
- value=stable_prompt_list[0],
366
- label='Prompt'
367
- )
368
-
369
- controlnet_depth_negative_prompt = gr.Textbox(
370
- lines=1,
371
- value=stable_negative_prompt_list[0],
372
- label='Negative Prompt'
373
- )
374
-
375
- with gr.Accordion("Advanced Options", open=False):
376
- controlnet_depth_guidance_scale = gr.Slider(
377
- minimum=0.1,
378
- maximum=15,
379
- step=0.1,
380
- value=7.5,
381
- label='Guidance Scale'
382
- )
383
-
384
- controlnet_depth_num_inference_step = gr.Slider(
385
- minimum=1,
386
- maximum=100,
387
- step=1,
388
- value=50,
389
- label='Num Inference Step'
390
- )
391
-
392
- controlnet_depth_predict = gr.Button(value='Generator')
393
-
394
- with gr.Tab('Scribble'):
395
- controlnet_scribble_image_file = gr.Image(label='Image')
396
-
397
- controlnet_scribble_model_id = gr.Dropdown(
398
- choices=stable_model_list,
399
- value=stable_model_list[0],
400
- label='Stable Model Id'
401
- )
402
-
403
- controlnet_scribble_prompt = gr.Textbox(
404
- lines=1,
405
- value=stable_prompt_list[0],
406
- label='Prompt'
407
- )
408
-
409
- controlnet_scribble_negative_prompt = gr.Textbox(
410
- lines=1,
411
- value=stable_negative_prompt_list[0],
412
- label='Negative Prompt'
413
- )
414
-
415
- with gr.Accordion("Advanced Options", open=False):
416
- controlnet_scribble_guidance_scale = gr.Slider(
417
- minimum=0.1,
418
- maximum=15,
419
- step=0.1,
420
- value=7.5,
421
- label='Guidance Scale'
422
- )
423
-
424
- controlnet_scribble_num_inference_step = gr.Slider(
425
- minimum=1,
426
- maximum=100,
427
- step=1,
428
- value=50,
429
- label='Num Inference Step'
430
- )
431
-
432
- controlnet_scribble_predict = gr.Button(value='Generator')
433
-
434
- with gr.Tab('Pose'):
435
- controlnet_pose_image_file = gr.Image(label='Image')
436
-
437
- controlnet_pose_model_id = gr.Dropdown(
438
- choices=stable_model_list,
439
- value=stable_model_list[0],
440
- label='Stable Model Id'
441
- )
442
-
443
- controlnet_pose_prompt = gr.Textbox(
444
- lines=1,
445
- value=stable_prompt_list[0],
446
- label='Prompt'
447
- )
448
-
449
- controlnet_pose_negative_prompt = gr.Textbox(
450
- lines=1,
451
- value=stable_negative_prompt_list[0],
452
- label='Negative Prompt'
453
- )
454
-
455
- with gr.Accordion("Advanced Options", open=False):
456
- controlnet_pose_guidance_scale = gr.Slider(
457
- minimum=0.1,
458
- maximum=15,
459
- step=0.1,
460
- value=7.5,
461
- label='Guidance Scale'
462
- )
463
-
464
- controlnet_pose_num_inference_step = gr.Slider(
465
- minimum=1,
466
- maximum=100,
467
- step=1,
468
- value=50,
469
- label='Num Inference Step'
470
- )
471
 
472
- controlnet_pose_predict = gr.Button(value='Generator')
473
 
474
- with gr.Tab('Generator'):
475
  with gr.Column():
476
  output_image = gr.Image(label='Image')
477
 
478
- text2image_predict.click(
479
  fn = stable_diffusion_text2img,
480
  inputs = [
481
- text2image_model_id,
482
- text2image_prompt,
483
- text2image_negative_prompt,
484
- text2image_guidance_scale,
485
- text2image_num_inference_step,
486
- text2image_height,
487
- text2image_width,
488
  ],
489
  outputs = [output_image],
490
  )
491
 
492
- image2image_predict.click(
493
  fn = stable_diffusion_img2img,
494
  inputs = [
495
- image2image2_image_file,
496
- image2image_model_id,
497
- image2image_prompt,
498
- image2image_negative_prompt,
499
- image2image_guidance_scale,
500
- image2image_num_inference_step,
501
  ],
502
  outputs = [output_image],
503
  )
504
 
505
- inpaint_predict.click(
506
  fn = stable_diffusion_inpaint,
507
  inputs = [
508
- inpaint_image_file,
509
- inpaint_model_id,
510
- inpaint_prompt,
511
- inpaint_negative_prompt,
512
- inpaint_guidance_scale,
513
- inpaint_num_inference_step,
514
  ],
515
  outputs = [output_image],
516
  )
517
 
518
- controlnet_canny_predict.click(
519
  fn = stable_diffusion_controlnet_canny,
520
  inputs = [
521
- controlnet_canny_image_file,
522
- controlnet_canny_model_id,
523
- controlnet_canny_prompt,
524
- controlnet_canny_negative_prompt,
525
- controlnet_canny_guidance_scale,
526
- controlnet_canny_num_inference_step,
527
  ],
528
  outputs = [output_image],
529
  )
530
 
531
- controlnet_hed_predict.click(
532
  fn = stable_diffusion_controlnet_hed,
533
  inputs = [
534
- controlnet_hed_image_file,
535
- controlnet_hed_model_id,
536
- controlnet_hed_prompt,
537
- controlnet_hed_negative_prompt,
538
- controlnet_hed_guidance_scale,
539
- controlnet_hed_num_inference_step,
540
  ],
541
  outputs = [output_image],
542
  )
543
 
544
- controlnet_mlsd_predict.click(
545
  fn = stable_diffusion_controlnet_mlsd,
546
  inputs = [
547
- controlnet_mlsd_image_file,
548
- controlnet_mlsd_model_id,
549
- controlnet_mlsd_prompt,
550
- controlnet_mlsd_negative_prompt,
551
- controlnet_mlsd_guidance_scale,
552
- controlnet_mlsd_num_inference_step,
553
  ],
554
  outputs = [output_image],
555
  )
556
 
557
- controlnet_seg_predict.click(
558
  fn = stable_diffusion_controlnet_seg,
559
  inputs = [
560
- controlnet_seg_image_file,
561
- controlnet_seg_model_id,
562
- controlnet_seg_prompt,
563
- controlnet_seg_negative_prompt,
564
- controlnet_seg_guidance_scale,
565
- controlnet_seg_num_inference_step,
566
  ],
567
  outputs = [output_image],
568
  )
569
 
570
- controlnet_depth_predict.click(
571
  fn = stable_diffusion_controlnet_depth,
572
  inputs = [
573
- controlnet_depth_image_file,
574
- controlnet_depth_model_id,
575
- controlnet_depth_prompt,
576
- controlnet_depth_negative_prompt,
577
- controlnet_depth_guidance_scale,
578
- controlnet_depth_num_inference_step,
579
  ],
580
  outputs = [output_image],
581
  )
582
 
583
- controlnet_scribble_predict.click(
584
  fn = stable_diffusion_controlnet_scribble,
585
  inputs = [
586
- controlnet_scribble_image_file,
587
- controlnet_scribble_model_id,
588
- controlnet_scribble_prompt,
589
- controlnet_scribble_negative_prompt,
590
- controlnet_scribble_guidance_scale,
591
- controlnet_scribble_num_inference_step,
592
  ],
593
  outputs = [output_image],
594
  )
595
 
596
- controlnet_pose_predict.click(
597
  fn = stable_diffusion_controlnet_pose,
598
  inputs = [
599
- controlnet_pose_image_file,
600
- controlnet_pose_model_id,
601
- controlnet_pose_prompt,
602
- controlnet_pose_negative_prompt,
603
- controlnet_pose_guidance_scale,
604
- controlnet_pose_num_inference_step,
605
  ],
606
  outputs = [output_image],
607
  )
608
 
609
- app.launch()
 
1
+ from diffusion_webui.controlnet.controlnet_canny import stable_diffusion_controlnet_canny_app, stable_diffusion_controlnet_canny
2
+ from diffusion_webui.controlnet.controlnet_depth import stable_diffusion_controlnet_depth_app, stable_diffusion_controlnet_depth
3
+ from diffusion_webui.controlnet.controlnet_hed import stable_diffusion_controlnet_hed_app, stable_diffusion_controlnet_hed
4
+ from diffusion_webui.controlnet.controlnet_mlsd import stable_diffusion_controlnet_mlsd_app, stable_diffusion_controlnet_mlsd
5
+ from diffusion_webui.controlnet.controlnet_pose import stable_diffusion_controlnet_pose_app, stable_diffusion_controlnet_pose
6
+ from diffusion_webui.controlnet.controlnet_scribble import stable_diffusion_controlnet_scribble_app, stable_diffusion_controlnet_scribble
7
+ from diffusion_webui.controlnet.controlnet_seg import stable_diffusion_controlnet_seg_app, stable_diffusion_controlnet_seg
8
 
9
+ from diffusion_webui.stable_diffusion.text2img_app import stable_diffusion_text2img_app, stable_diffusion_text2img
10
+ from diffusion_webui.stable_diffusion.img2img_app import stable_diffusion_img2img_app, stable_diffusion_img2img
11
+ from diffusion_webui.stable_diffusion.inpaint_app import stable_diffusion_inpaint_app, stable_diffusion_inpaint
 
 
 
 
 
 
 
 
12
 
13
 
14
  import gradio as gr
15
 
16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  app = gr.Blocks()
18
  with app:
19
  gr.Markdown("# **<h1 align='center'>Stable Diffusion + ControlNet WebUI<h1>**")
 
27
  )
28
  with gr.Row():
29
  with gr.Column():
30
+ text2image_app = stable_diffusion_text2img_app()
31
+ img2img_app = stable_diffusion_img2img_app()
32
+ inpaint_app = stable_diffusion_inpaint_app()
33
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  with gr.Tab('ControlNet'):
35
+ controlnet_canny_app = stable_diffusion_controlnet_canny_app()
36
+ controlnet_hed_app = stable_diffusion_controlnet_hed_app()
37
+ controlnet_mlsd_app = stable_diffusion_controlnet_mlsd_app()
38
+ controlnet_depth_app = stable_diffusion_controlnet_depth_app()
39
+ controlnet_pose_app = stable_diffusion_controlnet_pose_app()
40
+ controlnet_scribble_app = stable_diffusion_controlnet_scribble_app()
41
+ controlnet_seg_app = stable_diffusion_controlnet_seg_app()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
 
 
43
 
44
+ with gr.Tab('Output'):
45
  with gr.Column():
46
  output_image = gr.Image(label='Image')
47
 
48
+ text2image_app['predict'].click(
49
  fn = stable_diffusion_text2img,
50
  inputs = [
51
+ text2image_app['model_path'],
52
+ text2image_app['prompt'],
53
+ text2image_app['negative_prompt'],
54
+ text2image_app['guidance_scale'],
55
+ text2image_app['num_inference_step'],
56
+ text2image_app['height'],
57
+ text2image_app['width'],
58
  ],
59
  outputs = [output_image],
60
  )
61
 
62
+ img2img_app['predict'].click(
63
  fn = stable_diffusion_img2img,
64
  inputs = [
65
+ img2img_app['image_path'],
66
+ img2img_app['model_path'],
67
+ img2img_app['prompt'],
68
+ img2img_app['negative_prompt'],
69
+ img2img_app['guidance_scale'],
70
+ img2img_app['num_inference_step'],
71
  ],
72
  outputs = [output_image],
73
  )
74
 
75
+ inpaint_app['predict'].click(
76
  fn = stable_diffusion_inpaint,
77
  inputs = [
78
+ inpaint_app['image_path'],
79
+ inpaint_app['model_path'],
80
+ inpaint_app['prompt'],
81
+ inpaint_app['negative_prompt'],
82
+ inpaint_app['guidance_scale'],
83
+ inpaint_app['num_inference_step'],
84
  ],
85
  outputs = [output_image],
86
  )
87
 
88
+ controlnet_canny_app['predict'].click(
89
  fn = stable_diffusion_controlnet_canny,
90
  inputs = [
91
+ controlnet_canny_app['image_path'],
92
+ controlnet_canny_app['model_path'],
93
+ controlnet_canny_app['prompt'],
94
+ controlnet_canny_app['negative_prompt'],
95
+ controlnet_canny_app['guidance_scale'],
96
+ controlnet_canny_app['num_inference_step'],
97
  ],
98
  outputs = [output_image],
99
  )
100
 
101
+ controlnet_hed_app['predict'].click(
102
  fn = stable_diffusion_controlnet_hed,
103
  inputs = [
104
+ controlnet_hed_app['image_path'],
105
+ controlnet_hed_app['model_path'],
106
+ controlnet_hed_app['prompt'],
107
+ controlnet_hed_app['negative_prompt'],
108
+ controlnet_hed_app['guidance_scale'],
109
+ controlnet_hed_app['num_inference_step'],
110
  ],
111
  outputs = [output_image],
112
  )
113
 
114
+ controlnet_mlsd_app['predict'].click(
115
  fn = stable_diffusion_controlnet_mlsd,
116
  inputs = [
117
+ controlnet_mlsd_app['image_path'],
118
+ controlnet_mlsd_app['model_path'],
119
+ controlnet_mlsd_app['prompt'],
120
+ controlnet_mlsd_app['negative_prompt'],
121
+ controlnet_mlsd_app['guidance_scale'],
122
+ controlnet_mlsd_app['num_inference_step'],
123
  ],
124
  outputs = [output_image],
125
  )
126
 
127
+ controlnet_depth_app['predict'].click(
128
  fn = stable_diffusion_controlnet_seg,
129
  inputs = [
130
+ controlnet_depth_app['image_path'],
131
+ controlnet_depth_app['model_path'],
132
+ controlnet_depth_app['prompt'],
133
+ controlnet_depth_app['negative_prompt'],
134
+ controlnet_depth_app['guidance_scale'],
135
+ controlnet_depth_app['num_inference_step'],
136
  ],
137
  outputs = [output_image],
138
  )
139
 
140
+ controlnet_pose_app['predict'].click(
141
  fn = stable_diffusion_controlnet_depth,
142
  inputs = [
143
+ controlnet_pose_app['image_path'],
144
+ controlnet_pose_app['model_path'],
145
+ controlnet_pose_app['prompt'],
146
+ controlnet_pose_app['negative_prompt'],
147
+ controlnet_pose_app['guidance_scale'],
148
+ controlnet_pose_app['num_inference_step'],
149
  ],
150
  outputs = [output_image],
151
  )
152
 
153
+ controlnet_scribble_app['predict'].click(
154
  fn = stable_diffusion_controlnet_scribble,
155
  inputs = [
156
+ controlnet_scribble_app['image_path'],
157
+ controlnet_scribble_app['model_path'],
158
+ controlnet_scribble_app['prompt'],
159
+ controlnet_scribble_app['negative_prompt'],
160
+ controlnet_scribble_app['guidance_scale'],
161
+ controlnet_scribble_app['num_inference_step'],
162
  ],
163
  outputs = [output_image],
164
  )
165
 
166
+ controlnet_seg_app['predict'].click(
167
  fn = stable_diffusion_controlnet_pose,
168
  inputs = [
169
+ controlnet_seg_app['image_path'],
170
+ controlnet_seg_app['model_path'],
171
+ controlnet_seg_app['prompt'],
172
+ controlnet_seg_app['negative_prompt'],
173
+ controlnet_seg_app['guidance_scale'],
174
+ controlnet_seg_app['num_inference_step'],
175
  ],
176
  outputs = [output_image],
177
  )
178
 
179
+ app.launch(debug=True)
diffusion_webui/__init__.py ADDED
File without changes
diffusion_webui/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (157 Bytes). View file
 
diffusion_webui/controlnet/__init__.py ADDED
File without changes
diffusion_webui/controlnet/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (168 Bytes). View file
 
diffusion_webui/controlnet/__pycache__/controlnet_canny.cpython-38.pyc ADDED
Binary file (3.01 kB). View file
 
diffusion_webui/controlnet/__pycache__/controlnet_depth.cpython-38.pyc ADDED
Binary file (3.09 kB). View file
 
diffusion_webui/controlnet/__pycache__/controlnet_hed.cpython-38.pyc ADDED
Binary file (2.91 kB). View file
 
diffusion_webui/controlnet/__pycache__/controlnet_mlsd.cpython-38.pyc ADDED
Binary file (2.93 kB). View file
 
diffusion_webui/controlnet/__pycache__/controlnet_pose.cpython-38.pyc ADDED
Binary file (2.94 kB). View file
 
diffusion_webui/controlnet/__pycache__/controlnet_scribble.cpython-38.pyc ADDED
Binary file (3 kB). View file
 
diffusion_webui/controlnet/__pycache__/controlnet_seg.cpython-38.pyc ADDED
Binary file (5.43 kB). View file
 
diffusion_webui/controlnet/controlnet_canny.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import ( StableDiffusionControlNetPipeline,
2
+ ControlNetModel, UniPCMultistepScheduler)
3
+
4
+ from PIL import Image
5
+ import gradio as gr
6
+ import numpy as np
7
+ import torch
8
+ import cv2
9
+
10
+
11
+ stable_model_list = [
12
+ "runwayml/stable-diffusion-v1-5",
13
+ "stabilityai/stable-diffusion-2",
14
+ "stabilityai/stable-diffusion-2-base",
15
+ "stabilityai/stable-diffusion-2-1",
16
+ "stabilityai/stable-diffusion-2-1-base"
17
+ ]
18
+
19
+ stable_inpiant_model_list = [
20
+ "stabilityai/stable-diffusion-2-inpainting",
21
+ "runwayml/stable-diffusion-inpainting"
22
+ ]
23
+
24
+ stable_prompt_list = [
25
+ "a photo of a man.",
26
+ "a photo of a girl."
27
+ ]
28
+
29
+ stable_negative_prompt_list = [
30
+ "bad, ugly",
31
+ "deformed"
32
+ ]
33
+
34
+ def controlnet_canny(
35
+ image_path:str,
36
+ ):
37
+ image = Image.open(image_path)
38
+ image = np.array(image)
39
+
40
+ image = cv2.Canny(image, 100, 200)
41
+ image = image[:, :, None]
42
+ image = np.concatenate([image, image, image], axis=2)
43
+ image = Image.fromarray(image)
44
+
45
+ controlnet = ControlNetModel.from_pretrained(
46
+ "lllyasviel/sd-controlnet-canny",
47
+ torch_dtype=torch.float16
48
+ )
49
+ return controlnet, image
50
+
51
+
52
+ def stable_diffusion_controlnet_canny(
53
+ image_path:str,
54
+ model_path:str,
55
+ prompt:str,
56
+ negative_prompt:str,
57
+ guidance_scale:int,
58
+ num_inference_step:int,
59
+ ):
60
+
61
+ controlnet, image = controlnet_canny(image_path)
62
+
63
+ pipe = StableDiffusionControlNetPipeline.from_pretrained(
64
+ pretrained_model_name_or_path=model_path,
65
+ controlnet=controlnet,
66
+ safety_checker=None,
67
+ torch_dtype=torch.float16,
68
+ )
69
+ pipe.to("cuda")
70
+ pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
71
+ pipe.enable_xformers_memory_efficient_attention()
72
+
73
+ output = pipe(
74
+ prompt = prompt,
75
+ image = image,
76
+ negative_prompt = negative_prompt,
77
+ num_inference_steps = num_inference_step,
78
+ guidance_scale = guidance_scale,
79
+ ).images
80
+
81
+ return output[0]
82
+
83
+
84
+ def stable_diffusion_controlnet_canny_app():
85
+ with gr.Tab('Canny'):
86
+ controlnet_canny_image_file = gr.Image(
87
+ type='filepath',
88
+ label='Image'
89
+ )
90
+
91
+ controlnet_canny_model_id = gr.Dropdown(
92
+ choices=stable_model_list,
93
+ value=stable_model_list[0],
94
+ label='Stable Model Id'
95
+ )
96
+
97
+ controlnet_canny_prompt = gr.Textbox(
98
+ lines=1,
99
+ value=stable_prompt_list[0],
100
+ label='Prompt'
101
+ )
102
+
103
+ controlnet_canny_negative_prompt = gr.Textbox(
104
+ lines=1,
105
+ value=stable_negative_prompt_list[0],
106
+ label='Negative Prompt'
107
+ )
108
+
109
+ with gr.Accordion("Advanced Options", open=False):
110
+ controlnet_canny_guidance_scale = gr.Slider(
111
+ minimum=0.1,
112
+ maximum=15,
113
+ step=0.1,
114
+ value=7.5,
115
+ label='Guidance Scale'
116
+ )
117
+
118
+ controlnet_canny_num_inference_step = gr.Slider(
119
+ minimum=1,
120
+ maximum=100,
121
+ step=1,
122
+ value=50,
123
+ label='Num Inference Step'
124
+ )
125
+
126
+ controlnet_canny_predict = gr.Button(value='Generator')
127
+
128
+ variables = {
129
+ 'image_path': controlnet_canny_image_file,
130
+ 'model_path': controlnet_canny_model_id,
131
+ 'prompt': controlnet_canny_prompt,
132
+ 'negative_prompt': controlnet_canny_negative_prompt,
133
+ 'guidance_scale': controlnet_canny_guidance_scale,
134
+ 'num_inference_step': controlnet_canny_num_inference_step,
135
+ 'predict': controlnet_canny_predict
136
+ }
137
+
138
+ return variables
diffusion_webui/controlnet/controlnet_depth.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import ( StableDiffusionControlNetPipeline,
2
+ ControlNetModel, UniPCMultistepScheduler )
3
+
4
+ from transformers import pipeline
5
+ from PIL import Image
6
+ import gradio as gr
7
+ import numpy as np
8
+ import torch
9
+
10
+ stable_model_list = [
11
+ "runwayml/stable-diffusion-v1-5",
12
+ "stabilityai/stable-diffusion-2",
13
+ "stabilityai/stable-diffusion-2-base",
14
+ "stabilityai/stable-diffusion-2-1",
15
+ "stabilityai/stable-diffusion-2-1-base"
16
+ ]
17
+
18
+ stable_inpiant_model_list = [
19
+ "stabilityai/stable-diffusion-2-inpainting",
20
+ "runwayml/stable-diffusion-inpainting"
21
+ ]
22
+
23
+ stable_prompt_list = [
24
+ "a photo of a man.",
25
+ "a photo of a girl."
26
+ ]
27
+
28
+ stable_negative_prompt_list = [
29
+ "bad, ugly",
30
+ "deformed"
31
+ ]
32
+
33
+
34
+ def controlnet_depth(image_path:str):
35
+ depth_estimator = pipeline('depth-estimation')
36
+
37
+ image = Image.open(image_path)
38
+ image = depth_estimator(image)['depth']
39
+ image = np.array(image)
40
+ image = image[:, :, None]
41
+ image = np.concatenate([image, image, image], axis=2)
42
+ image = Image.fromarray(image)
43
+
44
+ controlnet = ControlNetModel.from_pretrained(
45
+ "fusing/stable-diffusion-v1-5-controlnet-depth", torch_dtype=torch.float16
46
+ )
47
+
48
+ return controlnet, image
49
+
50
+ def stable_diffusion_controlnet_depth(
51
+ image_path:str,
52
+ model_path:str,
53
+ prompt:str,
54
+ negative_prompt:str,
55
+ guidance_scale:int,
56
+ num_inference_step:int,
57
+ ):
58
+
59
+ controlnet, image = controlnet_depth(image_path=image_path)
60
+
61
+ pipe = StableDiffusionControlNetPipeline.from_pretrained(
62
+ pretrained_model_name_or_path=model_path,
63
+ controlnet=controlnet,
64
+ safety_checker=None,
65
+ torch_dtype=torch.float16
66
+ )
67
+
68
+ pipe.to("cuda")
69
+ pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
70
+ pipe.enable_xformers_memory_efficient_attention()
71
+
72
+ output = pipe(
73
+ prompt = prompt,
74
+ image = image,
75
+ negative_prompt = negative_prompt,
76
+ num_inference_steps = num_inference_step,
77
+ guidance_scale = guidance_scale,
78
+ ).images
79
+
80
+ return output[0]
81
+
82
+
83
+ def stable_diffusion_controlnet_depth_app():
84
+ with gr.Tab('Depth'):
85
+ controlnet_depth_image_file = gr.Image(
86
+ type='filepath',
87
+ label='Image'
88
+ )
89
+
90
+ controlnet_depth_model_id = gr.Dropdown(
91
+ choices=stable_model_list,
92
+ value=stable_model_list[0],
93
+ label='Stable Model Id'
94
+ )
95
+
96
+ controlnet_depth_prompt = gr.Textbox(
97
+ lines=1,
98
+ value=stable_prompt_list[0],
99
+ label='Prompt'
100
+ )
101
+
102
+ controlnet_depth_negative_prompt = gr.Textbox(
103
+ lines=1,
104
+ value=stable_negative_prompt_list[0],
105
+ label='Negative Prompt'
106
+ )
107
+
108
+ with gr.Accordion("Advanced Options", open=False):
109
+ controlnet_depth_guidance_scale = gr.Slider(
110
+ minimum=0.1,
111
+ maximum=15,
112
+ step=0.1,
113
+ value=7.5,
114
+ label='Guidance Scale'
115
+ )
116
+
117
+ controlnet_depth_num_inference_step = gr.Slider(
118
+ minimum=1,
119
+ maximum=100,
120
+ step=1,
121
+ value=50,
122
+ label='Num Inference Step'
123
+ )
124
+
125
+ controlnet_depth_predict = gr.Button(value='Generator')
126
+
127
+ variables = {
128
+ 'image_path': controlnet_depth_image_file,
129
+ 'model_path': controlnet_depth_model_id,
130
+ 'prompt': controlnet_depth_prompt,
131
+ 'negative_prompt': controlnet_depth_negative_prompt,
132
+ 'guidance_scale': controlnet_depth_guidance_scale,
133
+ 'num_inference_step': controlnet_depth_num_inference_step,
134
+ 'predict': controlnet_depth_predict
135
+ }
136
+
137
+ return variables
diffusion_webui/controlnet/controlnet_hed.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import ( StableDiffusionControlNetPipeline,
2
+ ControlNetModel, UniPCMultistepScheduler)
3
+
4
+ from controlnet_aux import HEDdetector
5
+ from PIL import Image
6
+ import gradio as gr
7
+ import torch
8
+
9
+ stable_model_list = [
10
+ "runwayml/stable-diffusion-v1-5",
11
+ "stabilityai/stable-diffusion-2",
12
+ "stabilityai/stable-diffusion-2-base",
13
+ "stabilityai/stable-diffusion-2-1",
14
+ "stabilityai/stable-diffusion-2-1-base"
15
+ ]
16
+
17
+ stable_inpiant_model_list = [
18
+ "stabilityai/stable-diffusion-2-inpainting",
19
+ "runwayml/stable-diffusion-inpainting"
20
+ ]
21
+
22
+ stable_prompt_list = [
23
+ "a photo of a man.",
24
+ "a photo of a girl."
25
+ ]
26
+
27
+ stable_negative_prompt_list = [
28
+ "bad, ugly",
29
+ "deformed"
30
+ ]
31
+
32
+
33
+ def controlnet_hed(image_path:str):
34
+ hed = HEDdetector.from_pretrained('lllyasviel/ControlNet')
35
+
36
+ image = Image.open(image_path)
37
+ image = hed(image)
38
+
39
+ controlnet = ControlNetModel.from_pretrained(
40
+ "fusing/stable-diffusion-v1-5-controlnet-hed",
41
+ torch_dtype=torch.float16
42
+ )
43
+ return controlnet, image
44
+
45
+
46
+ def stable_diffusion_controlnet_hed(
47
+ image_path:str,
48
+ model_path:str,
49
+ prompt:str,
50
+ negative_prompt:str,
51
+ guidance_scale:int,
52
+ num_inference_step:int,
53
+ ):
54
+
55
+ controlnet, image = controlnet_hed(image_path=image_path)
56
+
57
+ pipe = StableDiffusionControlNetPipeline.from_pretrained(
58
+ pretrained_model_name_or_path=model_path,
59
+ controlnet=controlnet,
60
+ safety_checker=None,
61
+ torch_dtype=torch.float16
62
+ )
63
+
64
+ pipe.to("cuda")
65
+ pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
66
+ pipe.enable_xformers_memory_efficient_attention()
67
+
68
+ output = pipe(
69
+ prompt = prompt,
70
+ image = image,
71
+ negative_prompt = negative_prompt,
72
+ num_inference_steps = num_inference_step,
73
+ guidance_scale = guidance_scale,
74
+ ).images
75
+
76
+ return output[0]
77
+
78
+ def stable_diffusion_controlnet_hed_app():
79
+ with gr.Tab('Hed'):
80
+ controlnet_hed_image_file = gr.Image(
81
+ type='filepath',
82
+ label='Image'
83
+ )
84
+
85
+ controlnet_hed_model_id = gr.Dropdown(
86
+ choices=stable_model_list,
87
+ value=stable_model_list[0],
88
+ label='Stable Model Id'
89
+ )
90
+
91
+ controlnet_hed_prompt = gr.Textbox(
92
+ lines=1,
93
+ value=stable_prompt_list[0],
94
+ label='Prompt'
95
+ )
96
+
97
+ controlnet_hed_negative_prompt = gr.Textbox(
98
+ lines=1,
99
+ value=stable_negative_prompt_list[0],
100
+ label='Negative Prompt'
101
+ )
102
+
103
+ with gr.Accordion("Advanced Options", open=False):
104
+ controlnet_hed_guidance_scale = gr.Slider(
105
+ minimum=0.1,
106
+ maximum=15,
107
+ step=0.1,
108
+ value=7.5,
109
+ label='Guidance Scale'
110
+ )
111
+
112
+ controlnet_hed_num_inference_step = gr.Slider(
113
+ minimum=1,
114
+ maximum=100,
115
+ step=1,
116
+ value=50,
117
+ label='Num Inference Step'
118
+ )
119
+
120
+ controlnet_hed_predict = gr.Button(value='Generator')
121
+
122
+ variables = {
123
+ 'image_path': controlnet_hed_image_file,
124
+ 'model_path': controlnet_hed_model_id,
125
+ 'prompt': controlnet_hed_prompt,
126
+ 'negative_prompt': controlnet_hed_negative_prompt,
127
+ 'guidance_scale': controlnet_hed_guidance_scale,
128
+ 'num_inference_step': controlnet_hed_num_inference_step,
129
+ 'predict': controlnet_hed_predict
130
+ }
131
+
132
+ return variables
diffusion_webui/controlnet/controlnet_mlsd.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import ( StableDiffusionControlNetPipeline,
2
+ ControlNetModel, UniPCMultistepScheduler)
3
+
4
+ from controlnet_aux import MLSDdetector
5
+ from PIL import Image
6
+ import gradio as gr
7
+ import torch
8
+
9
+ stable_model_list = [
10
+ "runwayml/stable-diffusion-v1-5",
11
+ "stabilityai/stable-diffusion-2",
12
+ "stabilityai/stable-diffusion-2-base",
13
+ "stabilityai/stable-diffusion-2-1",
14
+ "stabilityai/stable-diffusion-2-1-base"
15
+ ]
16
+
17
+ stable_inpiant_model_list = [
18
+ "stabilityai/stable-diffusion-2-inpainting",
19
+ "runwayml/stable-diffusion-inpainting"
20
+ ]
21
+
22
+ stable_prompt_list = [
23
+ "a photo of a man.",
24
+ "a photo of a girl."
25
+ ]
26
+
27
+ stable_negative_prompt_list = [
28
+ "bad, ugly",
29
+ "deformed"
30
+ ]
31
+
32
+
33
+ def controlnet_mlsd(image_path:str):
34
+ mlsd = MLSDdetector.from_pretrained('lllyasviel/ControlNet')
35
+
36
+ image = Image.open(image_path)
37
+ image = mlsd(image)
38
+
39
+ controlnet = ControlNetModel.from_pretrained(
40
+ "fusing/stable-diffusion-v1-5-controlnet-mlsd",
41
+ torch_dtype=torch.float16
42
+ )
43
+
44
+ return controlnet, image
45
+
46
+ def stable_diffusion_controlnet_mlsd(
47
+ image_path:str,
48
+ model_path:str,
49
+ prompt:str,
50
+ negative_prompt:str,
51
+ guidance_scale:int,
52
+ num_inference_step:int,
53
+ ):
54
+
55
+ controlnet, image = controlnet_mlsd(image_path=image_path)
56
+
57
+ pipe = StableDiffusionControlNetPipeline.from_pretrained(
58
+ pretrained_model_name_or_path=model_path,
59
+ controlnet=controlnet,
60
+ safety_checker=None,
61
+ torch_dtype=torch.float16
62
+ )
63
+
64
+ pipe.to("cuda")
65
+ pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
66
+ pipe.enable_xformers_memory_efficient_attention()
67
+
68
+ output = pipe(
69
+ prompt = prompt,
70
+ image = image,
71
+ negative_prompt = negative_prompt,
72
+ num_inference_steps = num_inference_step,
73
+ guidance_scale = guidance_scale,
74
+ ).images
75
+
76
+ return output[0]
77
+
78
+ def stable_diffusion_controlnet_mlsd_app():
79
+ with gr.Tab('MLSD line'):
80
+ controlnet_mlsd_image_file = gr.Image(
81
+ type='filepath',
82
+ label='Image'
83
+ )
84
+
85
+ controlnet_mlsd_model_id = gr.Dropdown(
86
+ choices=stable_model_list,
87
+ value=stable_model_list[0],
88
+ label='Stable Model Id'
89
+ )
90
+
91
+ controlnet_mlsd_prompt = gr.Textbox(
92
+ lines=1,
93
+ value=stable_prompt_list[0],
94
+ label='Prompt'
95
+ )
96
+
97
+ controlnet_mlsd_negative_prompt = gr.Textbox(
98
+ lines=1,
99
+ value=stable_negative_prompt_list[0],
100
+ label='Negative Prompt'
101
+ )
102
+
103
+ with gr.Accordion("Advanced Options", open=False):
104
+ controlnet_mlsd_guidance_scale = gr.Slider(
105
+ minimum=0.1,
106
+ maximum=15,
107
+ step=0.1,
108
+ value=7.5,
109
+ label='Guidance Scale'
110
+ )
111
+
112
+ controlnet_mlsd_num_inference_step = gr.Slider(
113
+ minimum=1,
114
+ maximum=100,
115
+ step=1,
116
+ value=50,
117
+ label='Num Inference Step'
118
+ )
119
+
120
+ controlnet_mlsd_predict = gr.Button(value='Generator')
121
+
122
+ variables = {
123
+ 'image_path': controlnet_mlsd_image_file,
124
+ 'model_path': controlnet_mlsd_model_id,
125
+ 'prompt': controlnet_mlsd_prompt,
126
+ 'negative_prompt': controlnet_mlsd_negative_prompt,
127
+ 'guidance_scale': controlnet_mlsd_guidance_scale,
128
+ 'num_inference_step': controlnet_mlsd_num_inference_step,
129
+ 'predict': controlnet_mlsd_predict
130
+ }
131
+
132
+ return variables
133
+
diffusion_webui/controlnet/controlnet_pose.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import ( StableDiffusionControlNetPipeline,
2
+ ControlNetModel, UniPCMultistepScheduler)
3
+
4
+ from controlnet_aux import OpenposeDetector
5
+
6
+ from PIL import Image
7
+ import gradio as gr
8
+ import torch
9
+
10
+ stable_model_list = [
11
+ "runwayml/stable-diffusion-v1-5",
12
+ "stabilityai/stable-diffusion-2",
13
+ "stabilityai/stable-diffusion-2-base",
14
+ "stabilityai/stable-diffusion-2-1",
15
+ "stabilityai/stable-diffusion-2-1-base"
16
+ ]
17
+
18
+ stable_inpiant_model_list = [
19
+ "stabilityai/stable-diffusion-2-inpainting",
20
+ "runwayml/stable-diffusion-inpainting"
21
+ ]
22
+
23
+ stable_prompt_list = [
24
+ "a photo of a man.",
25
+ "a photo of a girl."
26
+ ]
27
+
28
+ stable_negative_prompt_list = [
29
+ "bad, ugly",
30
+ "deformed"
31
+ ]
32
+
33
+
34
+ def controlnet_pose(image_path:str):
35
+ openpose = OpenposeDetector.from_pretrained('lllyasviel/ControlNet')
36
+
37
+ image = Image.open(image_path)
38
+ image = openpose(image)
39
+
40
+ controlnet = ControlNetModel.from_pretrained(
41
+ "fusing/stable-diffusion-v1-5-controlnet-openpose",
42
+ torch_dtype=torch.float16
43
+ )
44
+
45
+ return controlnet, image
46
+
47
+ def stable_diffusion_controlnet_pose(
48
+ image_path:str,
49
+ model_path:str,
50
+ prompt:str,
51
+ negative_prompt:str,
52
+ guidance_scale:int,
53
+ num_inference_step:int,
54
+ ):
55
+
56
+ controlnet, image = controlnet_pose(image_path=image_path)
57
+
58
+ pipe = StableDiffusionControlNetPipeline.from_pretrained(
59
+ pretrained_model_name_or_path=model_path,
60
+ controlnet=controlnet,
61
+ safety_checker=None,
62
+ torch_dtype=torch.float16
63
+ )
64
+
65
+ pipe.to("cuda")
66
+ pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
67
+ pipe.enable_xformers_memory_efficient_attention()
68
+
69
+ output = pipe(
70
+ prompt = prompt,
71
+ image = image,
72
+ negative_prompt = negative_prompt,
73
+ num_inference_steps = num_inference_step,
74
+ guidance_scale = guidance_scale,
75
+ ).images
76
+
77
+ return output[0]
78
+
79
+
80
+ def stable_diffusion_controlnet_pose_app():
81
+ with gr.Tab('Pose'):
82
+ controlnet_pose_image_file = gr.Image(
83
+ type='filepath',
84
+ label='Image'
85
+ )
86
+
87
+ controlnet_pose_model_id = gr.Dropdown(
88
+ choices=stable_model_list,
89
+ value=stable_model_list[0],
90
+ label='Stable Model Id'
91
+ )
92
+
93
+ controlnet_pose_prompt = gr.Textbox(
94
+ lines=1,
95
+ value=stable_prompt_list[0],
96
+ label='Prompt'
97
+ )
98
+
99
+ controlnet_pose_negative_prompt = gr.Textbox(
100
+ lines=1,
101
+ value=stable_negative_prompt_list[0],
102
+ label='Negative Prompt'
103
+ )
104
+
105
+ with gr.Accordion("Advanced Options", open=False):
106
+ controlnet_pose_guidance_scale = gr.Slider(
107
+ minimum=0.1,
108
+ maximum=15,
109
+ step=0.1,
110
+ value=7.5,
111
+ label='Guidance Scale'
112
+ )
113
+
114
+ controlnet_pose_num_inference_step = gr.Slider(
115
+ minimum=1,
116
+ maximum=100,
117
+ step=1,
118
+ value=50,
119
+ label='Num Inference Step'
120
+ )
121
+
122
+ controlnet_pose_predict = gr.Button(value='Generator')
123
+
124
+ variables = {
125
+ 'image_path': controlnet_pose_image_file,
126
+ 'model_path': controlnet_pose_model_id,
127
+ 'prompt': controlnet_pose_prompt,
128
+ 'negative_prompt': controlnet_pose_negative_prompt,
129
+ 'guidance_scale': controlnet_pose_guidance_scale,
130
+ 'num_inference_step': controlnet_pose_num_inference_step,
131
+ 'predict': controlnet_pose_predict
132
+ }
133
+
134
+ return variables
diffusion_webui/controlnet/controlnet_scribble.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import ( StableDiffusionControlNetPipeline,
2
+ ControlNetModel, UniPCMultistepScheduler)
3
+
4
+ from controlnet_aux import HEDdetector
5
+
6
+ from PIL import Image
7
+ import gradio as gr
8
+ import torch
9
+
10
+ stable_model_list = [
11
+ "runwayml/stable-diffusion-v1-5",
12
+ "stabilityai/stable-diffusion-2",
13
+ "stabilityai/stable-diffusion-2-base",
14
+ "stabilityai/stable-diffusion-2-1",
15
+ "stabilityai/stable-diffusion-2-1-base"
16
+ ]
17
+
18
+ stable_inpiant_model_list = [
19
+ "stabilityai/stable-diffusion-2-inpainting",
20
+ "runwayml/stable-diffusion-inpainting"
21
+ ]
22
+
23
+ stable_prompt_list = [
24
+ "a photo of a man.",
25
+ "a photo of a girl."
26
+ ]
27
+
28
+ stable_negative_prompt_list = [
29
+ "bad, ugly",
30
+ "deformed"
31
+ ]
32
+
33
+
34
+ def controlnet_scribble(image_path:str):
35
+ hed = HEDdetector.from_pretrained('lllyasviel/ControlNet')
36
+
37
+ image = Image.open(image_path)
38
+ image = hed(image, scribble=True)
39
+
40
+ controlnet = ControlNetModel.from_pretrained(
41
+ "fusing/stable-diffusion-v1-5-controlnet-scribble", torch_dtype=torch.float16
42
+ )
43
+
44
+ return controlnet, image
45
+
46
+ def stable_diffusion_controlnet_scribble(
47
+ image_path:str,
48
+ model_path:str,
49
+ prompt:str,
50
+ negative_prompt:str,
51
+ guidance_scale:int,
52
+ num_inference_step:int,
53
+ ):
54
+
55
+ controlnet, image = controlnet_scribble(image_path=image_path)
56
+
57
+ pipe = StableDiffusionControlNetPipeline.from_pretrained(
58
+ pretrained_model_name_or_path=model_path,
59
+ controlnet=controlnet,
60
+ safety_checker=None,
61
+ torch_dtype=torch.float16
62
+ )
63
+
64
+ pipe.to("cuda")
65
+ pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
66
+ pipe.enable_xformers_memory_efficient_attention()
67
+
68
+ output = pipe(
69
+ prompt = prompt,
70
+ image = image,
71
+ negative_prompt = negative_prompt,
72
+ num_inference_steps = num_inference_step,
73
+ guidance_scale = guidance_scale,
74
+ ).images
75
+
76
+ return output[0]
77
+
78
+ def stable_diffusion_controlnet_scribble_app():
79
+ with gr.Tab('Scribble'):
80
+ controlnet_scribble_image_file = gr.Image(
81
+ type='filepath',
82
+ label='Image'
83
+ )
84
+
85
+ controlnet_scribble_model_id = gr.Dropdown(
86
+ choices=stable_model_list,
87
+ value=stable_model_list[0],
88
+ label='Stable Model Id'
89
+ )
90
+
91
+ controlnet_scribble_prompt = gr.Textbox(
92
+ lines=1,
93
+ value=stable_prompt_list[0],
94
+ label='Prompt'
95
+ )
96
+
97
+ controlnet_scribble_negative_prompt = gr.Textbox(
98
+ lines=1,
99
+ value=stable_negative_prompt_list[0],
100
+ label='Negative Prompt'
101
+ )
102
+
103
+ with gr.Accordion("Advanced Options", open=False):
104
+ controlnet_scribble_guidance_scale = gr.Slider(
105
+ minimum=0.1,
106
+ maximum=15,
107
+ step=0.1,
108
+ value=7.5,
109
+ label='Guidance Scale'
110
+ )
111
+
112
+ controlnet_scribble_num_inference_step = gr.Slider(
113
+ minimum=1,
114
+ maximum=100,
115
+ step=1,
116
+ value=50,
117
+ label='Num Inference Step'
118
+ )
119
+
120
+ controlnet_scribble_predict = gr.Button(value='Generator')
121
+
122
+ variables = {
123
+ 'image_path': controlnet_scribble_image_file,
124
+ 'model_path': controlnet_scribble_model_id,
125
+ 'prompt': controlnet_scribble_prompt,
126
+ 'negative_prompt': controlnet_scribble_negative_prompt,
127
+ 'guidance_scale': controlnet_scribble_guidance_scale,
128
+ 'num_inference_step': controlnet_scribble_num_inference_step,
129
+ 'predict': controlnet_scribble_predict
130
+ }
131
+
132
+ return variables
diffusion_webui/controlnet/controlnet_seg.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoImageProcessor, UperNetForSemanticSegmentation
2
+ import torch
3
+ from diffusers import (StableDiffusionControlNetPipeline,
4
+ ControlNetModel, UniPCMultistepScheduler)
5
+
6
+
7
+ from PIL import Image
8
+ import gradio as gr
9
+ import numpy as np
10
+ import torch
11
+
12
+ stable_model_list = [
13
+ "runwayml/stable-diffusion-v1-5",
14
+ "stabilityai/stable-diffusion-2",
15
+ "stabilityai/stable-diffusion-2-base",
16
+ "stabilityai/stable-diffusion-2-1",
17
+ "stabilityai/stable-diffusion-2-1-base"
18
+ ]
19
+
20
+ stable_inpiant_model_list = [
21
+ "stabilityai/stable-diffusion-2-inpainting",
22
+ "runwayml/stable-diffusion-inpainting"
23
+ ]
24
+
25
+ stable_prompt_list = [
26
+ "a photo of a man.",
27
+ "a photo of a girl."
28
+ ]
29
+
30
+ stable_negative_prompt_list = [
31
+ "bad, ugly",
32
+ "deformed"
33
+ ]
34
+
35
+
36
+ def ade_palette():
37
+ """ADE20K palette that maps each class to RGB values."""
38
+ return [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
39
+ [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
40
+ [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
41
+ [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
42
+ [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
43
+ [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
44
+ [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
45
+ [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
46
+ [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
47
+ [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
48
+ [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
49
+ [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
50
+ [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
51
+ [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
52
+ [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255],
53
+ [11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255],
54
+ [0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0],
55
+ [255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0],
56
+ [0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255],
57
+ [173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255],
58
+ [255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20],
59
+ [255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255],
60
+ [255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255],
61
+ [0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255],
62
+ [0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0],
63
+ [143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0],
64
+ [8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255],
65
+ [255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112],
66
+ [92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160],
67
+ [163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163],
68
+ [255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0],
69
+ [255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0],
70
+ [10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255],
71
+ [255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204],
72
+ [41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255],
73
+ [71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255],
74
+ [184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194],
75
+ [102, 255, 0], [92, 0, 255]]
76
+
77
+
78
+ def controlnet_mlsd(image_path:str):
79
+ image_processor = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-small")
80
+ image_segmentor = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-small")
81
+
82
+ image = Image.open(image_path).convert('RGB')
83
+ pixel_values = image_processor(image, return_tensors="pt").pixel_values
84
+
85
+ with torch.no_grad():
86
+ outputs = image_segmentor(pixel_values)
87
+
88
+ seg = image_processor.post_process_semantic_segmentation(outputs, target_sizes=[image.size[::-1]])[0]
89
+
90
+ color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8)
91
+ palette = np.array(ade_palette())
92
+
93
+ for label, color in enumerate(palette):
94
+ color_seg[seg == label, :] = color
95
+
96
+ color_seg = color_seg.astype(np.uint8)
97
+ image = Image.fromarray(color_seg)
98
+ controlnet = ControlNetModel.from_pretrained(
99
+ "fusing/stable-diffusion-v1-5-controlnet-seg", torch_dtype=torch.float16
100
+ )
101
+
102
+ return controlnet, image
103
+
104
+
105
+ def stable_diffusion_controlnet_seg(
106
+ image_path:str,
107
+ model_path:str,
108
+ prompt:str,
109
+ negative_prompt:str,
110
+ guidance_scale:int,
111
+ num_inference_step:int,
112
+ ):
113
+
114
+ controlnet, image = controlnet_mlsd(image_path=image_path)
115
+
116
+ pipe = StableDiffusionControlNetPipeline.from_pretrained(
117
+ pretrained_model_name_or_path=model_path,
118
+ controlnet=controlnet,
119
+ safety_checker=None,
120
+ torch_dtype=torch.float16
121
+ )
122
+
123
+ pipe.to("cuda")
124
+ pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
125
+ pipe.enable_xformers_memory_efficient_attention()
126
+
127
+ output = pipe(
128
+ prompt = prompt,
129
+ image = image,
130
+ negative_prompt = negative_prompt,
131
+ num_inference_steps = num_inference_step,
132
+ guidance_scale = guidance_scale,
133
+ ).images
134
+
135
+ return output[0]
136
+
137
+ def stable_diffusion_controlnet_seg_app():
138
+ with gr.Tab('Segmentation'):
139
+ controlnet_seg_image_file = gr.Image(
140
+ type='filepath',
141
+ label='Image'
142
+ )
143
+
144
+ controlnet_seg_model_id = gr.Dropdown(
145
+ choices=stable_model_list,
146
+ value=stable_model_list[0],
147
+ label='Stable Model Id'
148
+ )
149
+
150
+ controlnet_seg_prompt = gr.Textbox(
151
+ lines=1,
152
+ value=stable_prompt_list[0],
153
+ label='Prompt'
154
+ )
155
+
156
+ controlnet_seg_negative_prompt = gr.Textbox(
157
+ lines=1,
158
+ value=stable_negative_prompt_list[0],
159
+ label='Negative Prompt'
160
+ )
161
+
162
+ with gr.Accordion("Advanced Options", open=False):
163
+ controlnet_seg_guidance_scale = gr.Slider(
164
+ minimum=0.1,
165
+ maximum=15,
166
+ step=0.1,
167
+ value=7.5,
168
+ label='Guidance Scale'
169
+ )
170
+
171
+ controlnet_seg_num_inference_step = gr.Slider(
172
+ minimum=1,
173
+ maximum=100,
174
+ step=1,
175
+ value=50,
176
+ label='Num Inference Step'
177
+ )
178
+
179
+ controlnet_seg_predict = gr.Button(value='Generator')
180
+
181
+ variables = {
182
+ 'image_path': controlnet_seg_image_file,
183
+ 'model_path': controlnet_seg_model_id,
184
+ 'prompt': controlnet_seg_prompt,
185
+ 'negative_prompt': controlnet_seg_negative_prompt,
186
+ 'guidance_scale': controlnet_seg_guidance_scale,
187
+ 'num_inference_step': controlnet_seg_num_inference_step,
188
+ 'predict': controlnet_seg_predict,
189
+ }
190
+
191
+ return variables
diffusion_webui/stable_diffusion/__init__.py ADDED
File without changes
diffusion_webui/stable_diffusion/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (174 Bytes). View file
 
diffusion_webui/stable_diffusion/__pycache__/img2img_app.cpython-38.pyc ADDED
Binary file (2.44 kB). View file
 
diffusion_webui/stable_diffusion/__pycache__/inpaint_app.cpython-38.pyc ADDED
Binary file (3.08 kB). View file
 
diffusion_webui/stable_diffusion/__pycache__/text2img_app.cpython-38.pyc ADDED
Binary file (2.45 kB). View file
 
diffusion_webui/stable_diffusion/img2img_app.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import StableDiffusionImg2ImgPipeline, DDIMScheduler
2
+
3
+ from PIL import Image
4
+ import gradio as gr
5
+ import torch
6
+
7
+ stable_model_list = [
8
+ "runwayml/stable-diffusion-v1-5",
9
+ "stabilityai/stable-diffusion-2",
10
+ "stabilityai/stable-diffusion-2-base",
11
+ "stabilityai/stable-diffusion-2-1",
12
+ "stabilityai/stable-diffusion-2-1-base"
13
+ ]
14
+
15
+ stable_inpiant_model_list = [
16
+ "stabilityai/stable-diffusion-2-inpainting",
17
+ "runwayml/stable-diffusion-inpainting"
18
+ ]
19
+
20
+ stable_prompt_list = [
21
+ "a photo of a man.",
22
+ "a photo of a girl."
23
+ ]
24
+
25
+ stable_negative_prompt_list = [
26
+ "bad, ugly",
27
+ "deformed"
28
+ ]
29
+
30
+
31
+ def stable_diffusion_img2img(
32
+ model_path:str,
33
+ image_path:str,
34
+ prompt:str,
35
+ negative_prompt:str,
36
+ guidance_scale:int,
37
+ num_inference_step:int,
38
+ ):
39
+
40
+ image = Image.open(image_path)
41
+
42
+ pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
43
+ model_path,
44
+ safety_checker=None,
45
+ torch_dtype=torch.float16
46
+ )
47
+ pipe.to("cuda")
48
+ pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
49
+ pipe.enable_xformers_memory_efficient_attention()
50
+
51
+ output = pipe(
52
+ prompt = prompt,
53
+ image = image,
54
+ negative_prompt = negative_prompt,
55
+ num_inference_steps = num_inference_step,
56
+ guidance_scale = guidance_scale,
57
+ ).images
58
+
59
+ return output[0]
60
+
61
+
62
+ def stable_diffusion_img2img_app():
63
+ with gr.Tab('Image2Image'):
64
+ image2image2_image_file = gr.Image(
65
+ type='filepath',
66
+ label='Image'
67
+ )
68
+
69
+ image2image_model_path = gr.Dropdown(
70
+ choices=stable_model_list,
71
+ value=stable_model_list[0],
72
+ label='Image-Image Model Id'
73
+ )
74
+
75
+ image2image_prompt = gr.Textbox(
76
+ lines=1,
77
+ value=stable_prompt_list[0],
78
+ label='Prompt'
79
+ )
80
+
81
+ image2image_negative_prompt = gr.Textbox(
82
+ lines=1,
83
+ value=stable_negative_prompt_list[0],
84
+ label='Negative Prompt'
85
+ )
86
+
87
+ with gr.Accordion("Advanced Options", open=False):
88
+ image2image_guidance_scale = gr.Slider(
89
+ minimum=0.1,
90
+ maximum=15,
91
+ step=0.1,
92
+ value=7.5,
93
+ label='Guidance Scale'
94
+ )
95
+
96
+ image2image_num_inference_step = gr.Slider(
97
+ minimum=1,
98
+ maximum=100,
99
+ step=1,
100
+ value=50,
101
+ label='Num Inference Step'
102
+ )
103
+
104
+ image2image_predict = gr.Button(value='Generator')
105
+
106
+ variables = {
107
+ 'image_path': image2image2_image_file,
108
+ 'model_path': image2image_model_path,
109
+ 'prompt': image2image_prompt,
110
+ 'negative_prompt': image2image_negative_prompt,
111
+ 'guidance_scale': image2image_guidance_scale,
112
+ 'num_inference_step': image2image_num_inference_step,
113
+ 'predict': image2image_predict
114
+ }
115
+
116
+ return variables
diffusion_webui/stable_diffusion/inpaint_app.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import DiffusionPipeline, DDIMScheduler
2
+ from PIL import Image
3
+ import imageio
4
+ import torch
5
+
6
+ import gradio as gr
7
+
8
+ stable_model_list = [
9
+ "runwayml/stable-diffusion-v1-5",
10
+ "stabilityai/stable-diffusion-2",
11
+ "stabilityai/stable-diffusion-2-base",
12
+ "stabilityai/stable-diffusion-2-1",
13
+ "stabilityai/stable-diffusion-2-1-base"
14
+ ]
15
+
16
+ stable_inpiant_model_list = [
17
+ "stabilityai/stable-diffusion-2-inpainting",
18
+ "runwayml/stable-diffusion-inpainting"
19
+ ]
20
+
21
+ stable_prompt_list = [
22
+ "a photo of a man.",
23
+ "a photo of a girl."
24
+ ]
25
+
26
+ stable_negative_prompt_list = [
27
+ "bad, ugly",
28
+ "deformed"
29
+ ]
30
+
31
+
32
+ def resize(height,img):
33
+ baseheight = height
34
+ img = Image.open(img)
35
+ hpercent = (baseheight/float(img.size[1]))
36
+ wsize = int((float(img.size[0])*float(hpercent)))
37
+ img = img.resize((wsize,baseheight), Image.Resampling.LANCZOS)
38
+ return img
39
+
40
+ def img_preprocces(source_img, prompt, negative_prompt):
41
+ imageio.imwrite("data.png", source_img["image"])
42
+ imageio.imwrite("data_mask.png", source_img["mask"])
43
+ src = resize(512, "data.png")
44
+ src.save("src.png")
45
+ mask = resize(512, "data_mask.png")
46
+ mask.save("mask.png")
47
+ return src, mask
48
+
49
+ def stable_diffusion_inpaint(
50
+ image_path:str,
51
+ model_path:str,
52
+ prompt:str,
53
+ negative_prompt:str,
54
+ guidance_scale:int,
55
+ num_inference_step:int,
56
+ ):
57
+
58
+ image, mask_image = img_preprocces(image_path, prompt, negative_prompt)
59
+ pipe = DiffusionPipeline.from_pretrained(
60
+ model_path,
61
+ revision="fp16",
62
+ torch_dtype=torch.float16,
63
+ )
64
+ pipe.to('cuda')
65
+ pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
66
+ pipe.enable_xformers_memory_efficient_attention()
67
+
68
+ output = pipe(
69
+ prompt = prompt,
70
+ image = image,
71
+ mask_image=mask_image,
72
+ negative_prompt = negative_prompt,
73
+ num_inference_steps = num_inference_step,
74
+ guidance_scale = guidance_scale,
75
+ ).images
76
+
77
+ return output[0]
78
+
79
+
80
+ def stable_diffusion_inpaint_app():
81
+ with gr.Tab('Inpaint'):
82
+ inpaint_image_file = gr.Image(
83
+ source="upload",
84
+ type="numpy",
85
+ tool="sketch",
86
+ elem_id="source_container"
87
+ )
88
+
89
+ inpaint_model_id = gr.Dropdown(
90
+ choices=stable_inpiant_model_list,
91
+ value=stable_inpiant_model_list[0],
92
+ label='Inpaint Model Id'
93
+ )
94
+
95
+ inpaint_prompt = gr.Textbox(
96
+ lines=1,
97
+ value=stable_prompt_list[0],
98
+ label='Prompt'
99
+ )
100
+
101
+ inpaint_negative_prompt = gr.Textbox(
102
+ lines=1,
103
+ value=stable_negative_prompt_list[0],
104
+ label='Negative Prompt'
105
+ )
106
+
107
+ with gr.Accordion("Advanced Options", open=False):
108
+ inpaint_guidance_scale = gr.Slider(
109
+ minimum=0.1,
110
+ maximum=15,
111
+ step=0.1,
112
+ value=7.5,
113
+ label='Guidance Scale'
114
+ )
115
+
116
+ inpaint_num_inference_step = gr.Slider(
117
+ minimum=1,
118
+ maximum=100,
119
+ step=1,
120
+ value=50,
121
+ label='Num Inference Step'
122
+ )
123
+
124
+ inpaint_predict = gr.Button(value='Generator')
125
+
126
+ variables = {
127
+ "image_path": inpaint_image_file,
128
+ "model_path": inpaint_model_id,
129
+ "prompt": inpaint_prompt,
130
+ "negative_prompt": inpaint_negative_prompt,
131
+ "guidance_scale": inpaint_guidance_scale,
132
+ "num_inference_step": inpaint_num_inference_step,
133
+ "predict": inpaint_predict
134
+ }
135
+
136
+ return variables
diffusion_webui/stable_diffusion/text2img_app.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import StableDiffusionPipeline, DDIMScheduler
2
+ import gradio as gr
3
+ import torch
4
+
5
+ stable_model_list = [
6
+ "runwayml/stable-diffusion-v1-5",
7
+ "stabilityai/stable-diffusion-2",
8
+ "stabilityai/stable-diffusion-2-base",
9
+ "stabilityai/stable-diffusion-2-1",
10
+ "stabilityai/stable-diffusion-2-1-base"
11
+ ]
12
+
13
+ stable_inpiant_model_list = [
14
+ "stabilityai/stable-diffusion-2-inpainting",
15
+ "runwayml/stable-diffusion-inpainting"
16
+ ]
17
+
18
+ stable_prompt_list = [
19
+ "a photo of a man.",
20
+ "a photo of a girl."
21
+ ]
22
+
23
+ stable_negative_prompt_list = [
24
+ "bad, ugly",
25
+ "deformed"
26
+ ]
27
+
28
+ def stable_diffusion_text2img(
29
+ model_path:str,
30
+ prompt:str,
31
+ negative_prompt:str,
32
+ guidance_scale:int,
33
+ num_inference_step:int,
34
+ height:int,
35
+ width:int,
36
+ ):
37
+
38
+ pipe = StableDiffusionPipeline.from_pretrained(
39
+ model_path,
40
+ safety_checker=None,
41
+ torch_dtype=torch.float16
42
+ ).to("cuda")
43
+
44
+ pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
45
+ pipe.enable_xformers_memory_efficient_attention()
46
+
47
+ images = pipe(
48
+ prompt,
49
+ height=height,
50
+ width=width,
51
+ negative_prompt=negative_prompt,
52
+ num_inference_steps=num_inference_step,
53
+ guidance_scale=guidance_scale,
54
+ ).images
55
+
56
+ return images[0]
57
+
58
+ def stable_diffusion_text2img_app():
59
+ with gr.Tab('Text2Image'):
60
+ text2image_model_path = gr.Dropdown(
61
+ choices=stable_model_list,
62
+ value=stable_model_list[0],
63
+ label='Text-Image Model Id'
64
+ )
65
+
66
+ text2image_prompt = gr.Textbox(
67
+ lines=1,
68
+ value=stable_prompt_list[0],
69
+ label='Prompt'
70
+ )
71
+
72
+ text2image_negative_prompt = gr.Textbox(
73
+ lines=1,
74
+ value=stable_negative_prompt_list[0],
75
+ label='Negative Prompt'
76
+ )
77
+
78
+ with gr.Accordion("Advanced Options", open=False):
79
+ text2image_guidance_scale = gr.Slider(
80
+ minimum=0.1,
81
+ maximum=15,
82
+ step=0.1,
83
+ value=7.5,
84
+ label='Guidance Scale'
85
+ )
86
+
87
+ text2image_num_inference_step = gr.Slider(
88
+ minimum=1,
89
+ maximum=100,
90
+ step=1,
91
+ value=50,
92
+ label='Num Inference Step'
93
+ )
94
+
95
+ text2image_height = gr.Slider(
96
+ minimum=128,
97
+ maximum=1280,
98
+ step=32,
99
+ value=512,
100
+ label='Image Height'
101
+ )
102
+
103
+ text2image_width = gr.Slider(
104
+ minimum=128,
105
+ maximum=1280,
106
+ step=32,
107
+ value=768,
108
+ label='Image Height'
109
+ )
110
+
111
+ text2image_predict = gr.Button(value='Generator')
112
+
113
+ variables = {
114
+ "model_path": text2image_model_path,
115
+ "prompt": text2image_prompt,
116
+ "negative_prompt": text2image_negative_prompt,
117
+ "guidance_scale": text2image_guidance_scale,
118
+ "num_inference_step": text2image_num_inference_step,
119
+ "height": text2image_height,
120
+ "width": text2image_width,
121
+ "predict": text2image_predict
122
+ }
123
+
124
+ return variables
125
+
requirements.txt CHANGED
@@ -3,4 +3,6 @@ bitsandbytes==0.35.0
3
  xformers
4
  controlnet_aux
5
  diffusers
6
- imageio
 
 
 
3
  xformers
4
  controlnet_aux
5
  diffusers
6
+ imageio
7
+ gradio
8
+ triton