kadirnar commited on
Commit
e7e6b8b
1 Parent(s): 0a04bf3

upload upscale model

Browse files
diffusion_webui/__init__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "1.6.2"
 
1
+ __version__ = "1.8.0"
diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_canny.py CHANGED
@@ -3,9 +3,11 @@ import gradio as gr
3
  import numpy as np
4
  import torch
5
  from diffusers import ControlNetModel
6
- from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.pipeline_stable_diffusion_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
7
  from PIL import Image
8
 
 
 
 
9
  from diffusion_webui.utils.model_list import (
10
  controlnet_canny_model_list,
11
  stable_inpiant_model_list,
@@ -27,11 +29,13 @@ class StableDiffusionControlNetInpaintCannyGenerator:
27
  controlnet = ControlNetModel.from_pretrained(
28
  controlnet_model_path, torch_dtype=torch.float16
29
  )
30
- self.pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
31
- pretrained_model_name_or_path=stable_model_path,
32
- controlnet=controlnet,
33
- safety_checker=None,
34
- torch_dtype=torch.float16,
 
 
35
  )
36
 
37
  self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
@@ -39,7 +43,7 @@ class StableDiffusionControlNetInpaintCannyGenerator:
39
  self.pipe.enable_xformers_memory_efficient_attention()
40
 
41
  return self.pipe
42
-
43
  def load_image(self, image_path):
44
  image = np.array(image_path)
45
  image = Image.fromarray(image)
@@ -76,10 +80,10 @@ class StableDiffusionControlNetInpaintCannyGenerator:
76
 
77
  normal_image = image_path["image"].convert("RGB").resize((512, 512))
78
  mask_image = image_path["mask"].convert("RGB").resize((512, 512))
79
-
80
  normal_image = self.load_image(image_path=normal_image)
81
  mask_image = self.load_image(image_path=mask_image)
82
-
83
  control_image = self.controlnet_canny_inpaint(image_path=image_path)
84
  pipe = self.load_model(
85
  stable_model_path=stable_model_path,
 
3
  import numpy as np
4
  import torch
5
  from diffusers import ControlNetModel
 
6
  from PIL import Image
7
 
8
+ from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.pipeline_stable_diffusion_controlnet_inpaint import (
9
+ StableDiffusionControlNetInpaintPipeline,
10
+ )
11
  from diffusion_webui.utils.model_list import (
12
  controlnet_canny_model_list,
13
  stable_inpiant_model_list,
 
29
  controlnet = ControlNetModel.from_pretrained(
30
  controlnet_model_path, torch_dtype=torch.float16
31
  )
32
+ self.pipe = (
33
+ StableDiffusionControlNetInpaintPipeline.from_pretrained(
34
+ pretrained_model_name_or_path=stable_model_path,
35
+ controlnet=controlnet,
36
+ safety_checker=None,
37
+ torch_dtype=torch.float16,
38
+ )
39
  )
40
 
41
  self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
 
43
  self.pipe.enable_xformers_memory_efficient_attention()
44
 
45
  return self.pipe
46
+
47
  def load_image(self, image_path):
48
  image = np.array(image_path)
49
  image = Image.fromarray(image)
 
80
 
81
  normal_image = image_path["image"].convert("RGB").resize((512, 512))
82
  mask_image = image_path["mask"].convert("RGB").resize((512, 512))
83
+
84
  normal_image = self.load_image(image_path=normal_image)
85
  mask_image = self.load_image(image_path=mask_image)
86
+
87
  control_image = self.controlnet_canny_inpaint(image_path=image_path)
88
  pipe = self.load_model(
89
  stable_model_path=stable_model_path,
diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_depth.py CHANGED
@@ -2,10 +2,12 @@ import gradio as gr
2
  import numpy as np
3
  import torch
4
  from diffusers import ControlNetModel
5
- from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.pipeline_stable_diffusion_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
6
  from PIL import Image
7
  from transformers import pipeline
8
 
 
 
 
9
  from diffusion_webui.utils.model_list import (
10
  controlnet_depth_model_list,
11
  stable_inpiant_model_list,
@@ -27,11 +29,13 @@ class StableDiffusionControlInpaintNetDepthGenerator:
27
  controlnet = ControlNetModel.from_pretrained(
28
  controlnet_model_path, torch_dtype=torch.float16
29
  )
30
- self.pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
31
- pretrained_model_name_or_path=stable_model_path,
32
- controlnet=controlnet,
33
- safety_checker=None,
34
- torch_dtype=torch.float16,
 
 
35
  )
36
 
37
  self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
@@ -72,10 +76,10 @@ class StableDiffusionControlInpaintNetDepthGenerator:
72
  ):
73
  normal_image = image_path["image"].convert("RGB").resize((512, 512))
74
  mask_image = image_path["mask"].convert("RGB").resize((512, 512))
75
-
76
  normal_image = self.load_image(image_path=normal_image)
77
  mask_image = self.load_image(image_path=mask_image)
78
-
79
  control_image = self.controlnet_inpaint_depth(image_path=image_path)
80
 
81
  pipe = self.load_model(
@@ -92,7 +96,6 @@ class StableDiffusionControlInpaintNetDepthGenerator:
92
 
93
  output = pipe(
94
  prompt=prompt,
95
-
96
  image=normal_image,
97
  mask_image=mask_image,
98
  control_image=control_image,
 
2
  import numpy as np
3
  import torch
4
  from diffusers import ControlNetModel
 
5
  from PIL import Image
6
  from transformers import pipeline
7
 
8
+ from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.pipeline_stable_diffusion_controlnet_inpaint import (
9
+ StableDiffusionControlNetInpaintPipeline,
10
+ )
11
  from diffusion_webui.utils.model_list import (
12
  controlnet_depth_model_list,
13
  stable_inpiant_model_list,
 
29
  controlnet = ControlNetModel.from_pretrained(
30
  controlnet_model_path, torch_dtype=torch.float16
31
  )
32
+ self.pipe = (
33
+ StableDiffusionControlNetInpaintPipeline.from_pretrained(
34
+ pretrained_model_name_or_path=stable_model_path,
35
+ controlnet=controlnet,
36
+ safety_checker=None,
37
+ torch_dtype=torch.float16,
38
+ )
39
  )
40
 
41
  self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
 
76
  ):
77
  normal_image = image_path["image"].convert("RGB").resize((512, 512))
78
  mask_image = image_path["mask"].convert("RGB").resize((512, 512))
79
+
80
  normal_image = self.load_image(image_path=normal_image)
81
  mask_image = self.load_image(image_path=mask_image)
82
+
83
  control_image = self.controlnet_inpaint_depth(image_path=image_path)
84
 
85
  pipe = self.load_model(
 
96
 
97
  output = pipe(
98
  prompt=prompt,
 
99
  image=normal_image,
100
  mask_image=mask_image,
101
  control_image=control_image,
diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_hed.py CHANGED
@@ -3,8 +3,11 @@ import numpy as np
3
  import torch
4
  from controlnet_aux import HEDdetector
5
  from diffusers import ControlNetModel
6
- from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.pipeline_stable_diffusion_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
7
 
 
 
 
8
  from diffusion_webui.utils.model_list import (
9
  controlnet_hed_model_list,
10
  stable_inpiant_model_list,
@@ -13,7 +16,6 @@ from diffusion_webui.utils.scheduler_list import (
13
  SCHEDULER_LIST,
14
  get_scheduler_list,
15
  )
16
- from PIL import Image
17
 
18
  # https://github.com/mikonvergence/ControlNetInpaint
19
 
@@ -27,11 +29,13 @@ class StableDiffusionControlNetInpaintHedGenerator:
27
  controlnet = ControlNetModel.from_pretrained(
28
  controlnet_model_path, torch_dtype=torch.float16
29
  )
30
- self.pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
31
- pretrained_model_name_or_path=stable_model_path,
32
- controlnet=controlnet,
33
- safety_checker=None,
34
- torch_dtype=torch.float16,
 
 
35
  )
36
 
37
  self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
@@ -45,7 +49,6 @@ class StableDiffusionControlNetInpaintHedGenerator:
45
  image = Image.fromarray(image)
46
  return image
47
 
48
-
49
  def controlnet_inpaint_hed(self, image_path: str):
50
  hed = HEDdetector.from_pretrained("lllyasviel/ControlNet")
51
  image = image_path["image"].convert("RGB").resize((512, 512))
@@ -70,10 +73,10 @@ class StableDiffusionControlNetInpaintHedGenerator:
70
  ):
71
  normal_image = image_path["image"].convert("RGB").resize((512, 512))
72
  mask_image = image_path["mask"].convert("RGB").resize((512, 512))
73
-
74
  normal_image = self.load_image(image_path=normal_image)
75
  mask_image = self.load_image(image_path=mask_image)
76
-
77
  control_image = self.controlnet_inpaint_hed(image_path=image_path)
78
 
79
  pipe = self.load_model(
 
3
  import torch
4
  from controlnet_aux import HEDdetector
5
  from diffusers import ControlNetModel
6
+ from PIL import Image
7
 
8
+ from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.pipeline_stable_diffusion_controlnet_inpaint import (
9
+ StableDiffusionControlNetInpaintPipeline,
10
+ )
11
  from diffusion_webui.utils.model_list import (
12
  controlnet_hed_model_list,
13
  stable_inpiant_model_list,
 
16
  SCHEDULER_LIST,
17
  get_scheduler_list,
18
  )
 
19
 
20
  # https://github.com/mikonvergence/ControlNetInpaint
21
 
 
29
  controlnet = ControlNetModel.from_pretrained(
30
  controlnet_model_path, torch_dtype=torch.float16
31
  )
32
+ self.pipe = (
33
+ StableDiffusionControlNetInpaintPipeline.from_pretrained(
34
+ pretrained_model_name_or_path=stable_model_path,
35
+ controlnet=controlnet,
36
+ safety_checker=None,
37
+ torch_dtype=torch.float16,
38
+ )
39
  )
40
 
41
  self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
 
49
  image = Image.fromarray(image)
50
  return image
51
 
 
52
  def controlnet_inpaint_hed(self, image_path: str):
53
  hed = HEDdetector.from_pretrained("lllyasviel/ControlNet")
54
  image = image_path["image"].convert("RGB").resize((512, 512))
 
73
  ):
74
  normal_image = image_path["image"].convert("RGB").resize((512, 512))
75
  mask_image = image_path["mask"].convert("RGB").resize((512, 512))
76
+
77
  normal_image = self.load_image(image_path=normal_image)
78
  mask_image = self.load_image(image_path=mask_image)
79
+
80
  control_image = self.controlnet_inpaint_hed(image_path=image_path)
81
 
82
  pipe = self.load_model(
diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_mlsd.py CHANGED
@@ -3,8 +3,11 @@ import numpy as np
3
  import torch
4
  from controlnet_aux import MLSDdetector
5
  from diffusers import ControlNetModel
6
- from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.pipeline_stable_diffusion_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
7
 
 
 
 
8
  from diffusion_webui.utils.model_list import (
9
  controlnet_mlsd_model_list,
10
  stable_inpiant_model_list,
@@ -13,7 +16,6 @@ from diffusion_webui.utils.scheduler_list import (
13
  SCHEDULER_LIST,
14
  get_scheduler_list,
15
  )
16
- from PIL import Image
17
 
18
  # https://github.com/mikonvergence/ControlNetInpaint
19
 
@@ -27,11 +29,13 @@ class StableDiffusionControlNetInpaintMlsdGenerator:
27
  controlnet = ControlNetModel.from_pretrained(
28
  controlnet_model_path, torch_dtype=torch.float16
29
  )
30
- self.pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
31
- pretrained_model_name_or_path=stable_model_path,
32
- controlnet=controlnet,
33
- safety_checker=None,
34
- torch_dtype=torch.float16,
 
 
35
  )
36
 
37
  self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
@@ -39,13 +43,12 @@ class StableDiffusionControlNetInpaintMlsdGenerator:
39
  self.pipe.enable_xformers_memory_efficient_attention()
40
 
41
  return self.pipe
42
-
43
  def load_image(self, image_path):
44
  image = np.array(image_path)
45
  image = Image.fromarray(image)
46
  return image
47
 
48
-
49
  def controlnet_inpaint_mlsd(self, image_path: str):
50
  mlsd = MLSDdetector.from_pretrained("lllyasviel/ControlNet")
51
  image = image_path["image"].convert("RGB").resize((512, 512))
@@ -71,10 +74,10 @@ class StableDiffusionControlNetInpaintMlsdGenerator:
71
 
72
  normal_image = image_path["image"].convert("RGB").resize((512, 512))
73
  mask_image = image_path["mask"].convert("RGB").resize((512, 512))
74
-
75
  normal_image = self.load_image(image_path=normal_image)
76
  mask_image = self.load_image(image_path=mask_image)
77
-
78
  control_image = self.controlnet_inpaint_mlsd(image_path=image_path)
79
 
80
  pipe = self.load_model(
 
3
  import torch
4
  from controlnet_aux import MLSDdetector
5
  from diffusers import ControlNetModel
6
+ from PIL import Image
7
 
8
+ from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.pipeline_stable_diffusion_controlnet_inpaint import (
9
+ StableDiffusionControlNetInpaintPipeline,
10
+ )
11
  from diffusion_webui.utils.model_list import (
12
  controlnet_mlsd_model_list,
13
  stable_inpiant_model_list,
 
16
  SCHEDULER_LIST,
17
  get_scheduler_list,
18
  )
 
19
 
20
  # https://github.com/mikonvergence/ControlNetInpaint
21
 
 
29
  controlnet = ControlNetModel.from_pretrained(
30
  controlnet_model_path, torch_dtype=torch.float16
31
  )
32
+ self.pipe = (
33
+ StableDiffusionControlNetInpaintPipeline.from_pretrained(
34
+ pretrained_model_name_or_path=stable_model_path,
35
+ controlnet=controlnet,
36
+ safety_checker=None,
37
+ torch_dtype=torch.float16,
38
+ )
39
  )
40
 
41
  self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
 
43
  self.pipe.enable_xformers_memory_efficient_attention()
44
 
45
  return self.pipe
46
+
47
  def load_image(self, image_path):
48
  image = np.array(image_path)
49
  image = Image.fromarray(image)
50
  return image
51
 
 
52
  def controlnet_inpaint_mlsd(self, image_path: str):
53
  mlsd = MLSDdetector.from_pretrained("lllyasviel/ControlNet")
54
  image = image_path["image"].convert("RGB").resize((512, 512))
 
74
 
75
  normal_image = image_path["image"].convert("RGB").resize((512, 512))
76
  mask_image = image_path["mask"].convert("RGB").resize((512, 512))
77
+
78
  normal_image = self.load_image(image_path=normal_image)
79
  mask_image = self.load_image(image_path=mask_image)
80
+
81
  control_image = self.controlnet_inpaint_mlsd(image_path=image_path)
82
 
83
  pipe = self.load_model(
diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_pose.py CHANGED
@@ -3,9 +3,11 @@ import numpy as np
3
  import torch
4
  from controlnet_aux import OpenposeDetector
5
  from diffusers import ControlNetModel
6
- from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.pipeline_stable_diffusion_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
7
  from PIL import Image
8
 
 
 
 
9
  from diffusion_webui.utils.model_list import (
10
  controlnet_pose_model_list,
11
  stable_inpiant_model_list,
@@ -28,11 +30,13 @@ class StableDiffusionControlNetInpaintPoseGenerator:
28
  controlnet_model_path, torch_dtype=torch.float16
29
  )
30
 
31
- self.pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
32
- pretrained_model_name_or_path=stable_model_path,
33
- controlnet=controlnet,
34
- safety_checker=None,
35
- torch_dtype=torch.float16,
 
 
36
  )
37
 
38
  self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
@@ -40,12 +44,12 @@ class StableDiffusionControlNetInpaintPoseGenerator:
40
  self.pipe.enable_xformers_memory_efficient_attention()
41
 
42
  return self.pipe
43
-
44
  def load_image(self, image_path):
45
  image = np.array(image_path)
46
  image = Image.fromarray(image)
47
  return image
48
-
49
  def controlnet_pose_inpaint(self, image_path: str):
50
  openpose = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
51
 
@@ -71,10 +75,10 @@ class StableDiffusionControlNetInpaintPoseGenerator:
71
  ):
72
  normal_image = image_path["image"].convert("RGB").resize((512, 512))
73
  mask_image = image_path["mask"].convert("RGB").resize((512, 512))
74
-
75
  normal_image = self.load_image(image_path=normal_image)
76
  mask_image = self.load_image(image_path=mask_image)
77
-
78
  controlnet_image = self.controlnet_pose_inpaint(image_path=image_path)
79
 
80
  pipe = self.load_model(
 
3
  import torch
4
  from controlnet_aux import OpenposeDetector
5
  from diffusers import ControlNetModel
 
6
  from PIL import Image
7
 
8
+ from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.pipeline_stable_diffusion_controlnet_inpaint import (
9
+ StableDiffusionControlNetInpaintPipeline,
10
+ )
11
  from diffusion_webui.utils.model_list import (
12
  controlnet_pose_model_list,
13
  stable_inpiant_model_list,
 
30
  controlnet_model_path, torch_dtype=torch.float16
31
  )
32
 
33
+ self.pipe = (
34
+ StableDiffusionControlNetInpaintPipeline.from_pretrained(
35
+ pretrained_model_name_or_path=stable_model_path,
36
+ controlnet=controlnet,
37
+ safety_checker=None,
38
+ torch_dtype=torch.float16,
39
+ )
40
  )
41
 
42
  self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
 
44
  self.pipe.enable_xformers_memory_efficient_attention()
45
 
46
  return self.pipe
47
+
48
  def load_image(self, image_path):
49
  image = np.array(image_path)
50
  image = Image.fromarray(image)
51
  return image
52
+
53
  def controlnet_pose_inpaint(self, image_path: str):
54
  openpose = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
55
 
 
75
  ):
76
  normal_image = image_path["image"].convert("RGB").resize((512, 512))
77
  mask_image = image_path["mask"].convert("RGB").resize((512, 512))
78
+
79
  normal_image = self.load_image(image_path=normal_image)
80
  mask_image = self.load_image(image_path=mask_image)
81
+
82
  controlnet_image = self.controlnet_pose_inpaint(image_path=image_path)
83
 
84
  pipe = self.load_model(
diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_scribble.py CHANGED
@@ -3,9 +3,11 @@ import numpy as np
3
  import torch
4
  from controlnet_aux import HEDdetector
5
  from diffusers import ControlNetModel
6
- from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.pipeline_stable_diffusion_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
7
  from PIL import Image
8
 
 
 
 
9
  from diffusion_webui.utils.model_list import (
10
  controlnet_scribble_model_list,
11
  stable_inpiant_model_list,
@@ -17,6 +19,7 @@ from diffusion_webui.utils.scheduler_list import (
17
 
18
  # https://github.com/mikonvergence/ControlNetInpaint
19
 
 
20
  class StableDiffusionControlNetInpaintScribbleGenerator:
21
  def __init__(self):
22
  self.pipe = None
@@ -27,11 +30,13 @@ class StableDiffusionControlNetInpaintScribbleGenerator:
27
  controlnet_model_path, torch_dtype=torch.float16
28
  )
29
 
30
- self.pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
31
- pretrained_model_name_or_path=stable_model_path,
32
- controlnet=controlnet,
33
- safety_checker=None,
34
- torch_dtype=torch.float16,
 
 
35
  )
36
 
37
  self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
@@ -39,7 +44,7 @@ class StableDiffusionControlNetInpaintScribbleGenerator:
39
  self.pipe.enable_xformers_memory_efficient_attention()
40
 
41
  return self.pipe
42
-
43
  def load_image(self, image_path):
44
  image = np.array(image_path)
45
  image = Image.fromarray(image)
@@ -70,11 +75,13 @@ class StableDiffusionControlNetInpaintScribbleGenerator:
70
  ):
71
  normal_image = image_path["image"].convert("RGB").resize((512, 512))
72
  mask_image = image_path["mask"].convert("RGB").resize((512, 512))
73
-
74
  normal_image = self.load_image(image_path=normal_image)
75
  mask_image = self.load_image(image_path=mask_image)
76
-
77
- controlnet_image = self.controlnet_inpaint_scribble(image_path=image_path)
 
 
78
 
79
  pipe = self.load_model(
80
  stable_model_path=stable_model_path,
 
3
  import torch
4
  from controlnet_aux import HEDdetector
5
  from diffusers import ControlNetModel
 
6
  from PIL import Image
7
 
8
+ from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.pipeline_stable_diffusion_controlnet_inpaint import (
9
+ StableDiffusionControlNetInpaintPipeline,
10
+ )
11
  from diffusion_webui.utils.model_list import (
12
  controlnet_scribble_model_list,
13
  stable_inpiant_model_list,
 
19
 
20
  # https://github.com/mikonvergence/ControlNetInpaint
21
 
22
+
23
  class StableDiffusionControlNetInpaintScribbleGenerator:
24
  def __init__(self):
25
  self.pipe = None
 
30
  controlnet_model_path, torch_dtype=torch.float16
31
  )
32
 
33
+ self.pipe = (
34
+ StableDiffusionControlNetInpaintPipeline.from_pretrained(
35
+ pretrained_model_name_or_path=stable_model_path,
36
+ controlnet=controlnet,
37
+ safety_checker=None,
38
+ torch_dtype=torch.float16,
39
+ )
40
  )
41
 
42
  self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
 
44
  self.pipe.enable_xformers_memory_efficient_attention()
45
 
46
  return self.pipe
47
+
48
  def load_image(self, image_path):
49
  image = np.array(image_path)
50
  image = Image.fromarray(image)
 
75
  ):
76
  normal_image = image_path["image"].convert("RGB").resize((512, 512))
77
  mask_image = image_path["mask"].convert("RGB").resize((512, 512))
78
+
79
  normal_image = self.load_image(image_path=normal_image)
80
  mask_image = self.load_image(image_path=mask_image)
81
+
82
+ controlnet_image = self.controlnet_inpaint_scribble(
83
+ image_path=image_path
84
+ )
85
 
86
  pipe = self.load_model(
87
  stable_model_path=stable_model_path,
diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_seg.py CHANGED
@@ -200,11 +200,12 @@ class StableDiffusionControlNetInpaintSegGenerator:
200
  self.pipe.enable_xformers_memory_efficient_attention()
201
 
202
  return self.pipe
203
-
204
  def load_image(self, image_path):
205
  image = np.array(image_path)
206
  image = Image.fromarray(image)
207
  return image
 
208
  def controlnet_seg_inpaint(self, image_path: str):
209
  image_processor = AutoImageProcessor.from_pretrained(
210
  "openmmlab/upernet-convnext-small"
@@ -252,10 +253,10 @@ class StableDiffusionControlNetInpaintSegGenerator:
252
 
253
  normal_image = image_path["image"].convert("RGB").resize((512, 512))
254
  mask_image = image_path["mask"].convert("RGB").resize((512, 512))
255
-
256
  normal_image = self.load_image(image_path=normal_image)
257
  mask_image = self.load_image(image_path=mask_image)
258
-
259
  controlnet_image = self.controlnet_seg_inpaint(image_path=image_path)
260
 
261
  pipe = self.load_model(
 
200
  self.pipe.enable_xformers_memory_efficient_attention()
201
 
202
  return self.pipe
203
+
204
  def load_image(self, image_path):
205
  image = np.array(image_path)
206
  image = Image.fromarray(image)
207
  return image
208
+
209
  def controlnet_seg_inpaint(self, image_path: str):
210
  image_processor = AutoImageProcessor.from_pretrained(
211
  "openmmlab/upernet-convnext-small"
 
253
 
254
  normal_image = image_path["image"].convert("RGB").resize((512, 512))
255
  mask_image = image_path["mask"].convert("RGB").resize((512, 512))
256
+
257
  normal_image = self.load_image(image_path=normal_image)
258
  mask_image = self.load_image(image_path=mask_image)
259
+
260
  controlnet_image = self.controlnet_seg_inpaint(image_path=image_path)
261
 
262
  pipe = self.load_model(
diffusion_webui/helpers.py CHANGED
@@ -49,3 +49,6 @@ from diffusion_webui.diffusion_models.stable_diffusion.inpaint_app import (
49
  from diffusion_webui.diffusion_models.stable_diffusion.text2img_app import (
50
  StableDiffusionText2ImageGenerator,
51
  )
 
 
 
 
49
  from diffusion_webui.diffusion_models.stable_diffusion.text2img_app import (
50
  StableDiffusionText2ImageGenerator,
51
  )
52
+ from diffusion_webui.upscaler_models.codeformer_upscaler import (
53
+ CodeformerUpscalerGenerator,
54
+ )
diffusion_webui/upscaler_models/codeformer_upscaler.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from codeformer.app import inference_app
3
+
4
+
5
+ class CodeformerUpscalerGenerator:
6
+ def __init__(self):
7
+ self.pipe = None
8
+
9
+ def generate_image(
10
+ self,
11
+ image_path: str,
12
+ background_enhance: bool,
13
+ face_upsample: bool,
14
+ upscale: int,
15
+ codeformer_fidelity: int,
16
+ ):
17
+ if self.pipe is None:
18
+ self.pipe = inference_app(
19
+ image=image_path,
20
+ background_enhance=background_enhance,
21
+ face_upsample=face_upsample,
22
+ upscale=upscale,
23
+ codeformer_fidelity=codeformer_fidelity,
24
+ )
25
+
26
+ return [self.pipe]
27
+
28
+ def app():
29
+ with gr.Blocks():
30
+ with gr.Row():
31
+ with gr.Column():
32
+ codeformer_upscale_image_file = gr.Image(
33
+ type="filepath", label="Image"
34
+ ).style(height=260)
35
+
36
+ with gr.Row():
37
+ with gr.Column():
38
+ codeformer_face_upsample = gr.Checkbox(
39
+ label="Face Upsample",
40
+ value=True,
41
+ )
42
+ codeformer_upscale = gr.Slider(
43
+ label="Upscale",
44
+ minimum=1,
45
+ maximum=4,
46
+ step=1,
47
+ value=2,
48
+ )
49
+ with gr.Row():
50
+ with gr.Column():
51
+ codeformer_background_enhance = gr.Checkbox(
52
+ label="Background Enhance",
53
+ value=True,
54
+ )
55
+ codeformer_upscale_fidelity = gr.Slider(
56
+ label="Codeformer Fidelity",
57
+ minimum=0.1,
58
+ maximum=1.0,
59
+ step=0.1,
60
+ value=0.5,
61
+ )
62
+
63
+ codeformer_upscale_predict_button = gr.Button(
64
+ value="Generator"
65
+ )
66
+
67
+ with gr.Column():
68
+ output_image = gr.Gallery(
69
+ label="Generated images",
70
+ show_label=False,
71
+ elem_id="gallery",
72
+ ).style(grid=(1, 2))
73
+
74
+ codeformer_upscale_predict_button.click(
75
+ fn=CodeformerUpscalerGenerator().generate_image,
76
+ inputs=[
77
+ codeformer_upscale_image_file,
78
+ codeformer_background_enhance,
79
+ codeformer_face_upsample,
80
+ codeformer_upscale,
81
+ codeformer_upscale_fidelity,
82
+ ],
83
+ outputs=[output_image],
84
+ )
diffusion_webui/utils/model_list.py CHANGED
@@ -29,8 +29,8 @@ controlnet_scribble_model_list = [
29
  "thibaud/controlnet-sd21-scribble-diffusers",
30
  ]
31
  stable_inpiant_model_list = [
32
- "runwayml/stable-diffusion-inpainting",
33
  "stabilityai/stable-diffusion-2-inpainting",
 
34
  ]
35
 
36
  controlnet_mlsd_model_list = [
 
29
  "thibaud/controlnet-sd21-scribble-diffusers",
30
  ]
31
  stable_inpiant_model_list = [
 
32
  "stabilityai/stable-diffusion-2-inpainting",
33
+ "runwayml/stable-diffusion-inpainting",
34
  ]
35
 
36
  controlnet_mlsd_model_list = [