Upload folder using huggingface_hub
Browse files- inference.py +27 -7
- inference2.py +4 -1
- internals/pipelines/controlnets.py +265 -113
- internals/pipelines/upscaler.py +1 -1
- requirements.txt +2 -2
inference.py
CHANGED
@@ -22,6 +22,7 @@ from internals.util.avatar import Avatar
|
|
22 |
from internals.util.cache import auto_clear_cuda_and_gc, clear_cuda, clear_cuda_and_gc
|
23 |
from internals.util.commons import download_image, upload_image, upload_images
|
24 |
from internals.util.config import (
|
|
|
25 |
get_model_dir,
|
26 |
num_return_sequences,
|
27 |
set_configs_from_task,
|
@@ -185,8 +186,15 @@ def scribble(task: Task):
|
|
185 |
)
|
186 |
lora_patcher.patch()
|
187 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
188 |
kwargs = {
|
189 |
-
"
|
190 |
"seed": task.get_seed(),
|
191 |
"num_inference_steps": task.get_steps(),
|
192 |
"width": width,
|
@@ -305,19 +313,32 @@ def pose(task: Task, s3_outkey: str = "_pose", poses: Optional[list] = None):
|
|
305 |
else:
|
306 |
poses = [controlnet.detect_pose(task.get_imageUrl())] * num_return_sequences
|
307 |
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
312 |
|
313 |
kwargs = {
|
314 |
"prompt": prompt,
|
315 |
-
"image":
|
316 |
"seed": task.get_seed(),
|
317 |
"num_inference_steps": task.get_steps(),
|
318 |
"negative_prompt": [task.get_negative_prompt()] * num_return_sequences,
|
319 |
"width": width,
|
320 |
"height": height,
|
|
|
321 |
**task.cnp_kwargs(),
|
322 |
**lora_patcher.kwargs(),
|
323 |
}
|
@@ -336,7 +357,6 @@ def pose(task: Task, s3_outkey: str = "_pose", poses: Optional[list] = None):
|
|
336 |
images, _ = high_res.apply(**kwargs)
|
337 |
|
338 |
upload_image(poses[0], "crecoAI/{}_pose.png".format(task.get_taskId()))
|
339 |
-
upload_image(depth, "crecoAI/{}_depth.png".format(task.get_taskId()))
|
340 |
|
341 |
generated_image_urls = upload_images(images, s3_outkey, task.get_taskId())
|
342 |
|
|
|
22 |
from internals.util.cache import auto_clear_cuda_and_gc, clear_cuda, clear_cuda_and_gc
|
23 |
from internals.util.commons import download_image, upload_image, upload_images
|
24 |
from internals.util.config import (
|
25 |
+
get_is_sdxl,
|
26 |
get_model_dir,
|
27 |
num_return_sequences,
|
28 |
set_configs_from_task,
|
|
|
186 |
)
|
187 |
lora_patcher.patch()
|
188 |
|
189 |
+
image = download_image(task.get_imageUrl()).resize((width, height))
|
190 |
+
if get_is_sdxl():
|
191 |
+
# We use sketch in SDXL
|
192 |
+
image = ControlNet.pidinet_image(image)
|
193 |
+
else:
|
194 |
+
image = ControlNet.scribble_image(image)
|
195 |
+
|
196 |
kwargs = {
|
197 |
+
"image": [image] * num_return_sequences,
|
198 |
"seed": task.get_seed(),
|
199 |
"num_inference_steps": task.get_steps(),
|
200 |
"width": width,
|
|
|
313 |
else:
|
314 |
poses = [controlnet.detect_pose(task.get_imageUrl())] * num_return_sequences
|
315 |
|
316 |
+
if not get_is_sdxl():
|
317 |
+
# in normal pipeline we use depth + pose controlnet
|
318 |
+
depth = download_image(task.get_auxilary_imageUrl()).resize(
|
319 |
+
(task.get_width(), task.get_height())
|
320 |
+
)
|
321 |
+
depth = ControlNet.depth_image(depth)
|
322 |
+
images = [depth, poses[0]]
|
323 |
+
|
324 |
+
upload_image(depth, "crecoAI/{}_depth.png".format(task.get_taskId()))
|
325 |
+
|
326 |
+
kwargs = {
|
327 |
+
"control_guidance_end": [0.5, 1.0],
|
328 |
+
}
|
329 |
+
else:
|
330 |
+
images = poses[0]
|
331 |
+
kwargs = {}
|
332 |
|
333 |
kwargs = {
|
334 |
"prompt": prompt,
|
335 |
+
"image": images,
|
336 |
"seed": task.get_seed(),
|
337 |
"num_inference_steps": task.get_steps(),
|
338 |
"negative_prompt": [task.get_negative_prompt()] * num_return_sequences,
|
339 |
"width": width,
|
340 |
"height": height,
|
341 |
+
**kwargs,
|
342 |
**task.cnp_kwargs(),
|
343 |
**lora_patcher.kwargs(),
|
344 |
}
|
|
|
357 |
images, _ = high_res.apply(**kwargs)
|
358 |
|
359 |
upload_image(poses[0], "crecoAI/{}_pose.png".format(task.get_taskId()))
|
|
|
360 |
|
361 |
generated_image_urls = upload_images(images, s3_outkey, task.get_taskId())
|
362 |
|
inference2.py
CHANGED
@@ -18,7 +18,7 @@ from internals.pipelines.replace_background import ReplaceBackground
|
|
18 |
from internals.pipelines.safety_checker import SafetyChecker
|
19 |
from internals.pipelines.upscaler import Upscaler
|
20 |
from internals.util.avatar import Avatar
|
21 |
-
from internals.util.cache import auto_clear_cuda_and_gc, clear_cuda
|
22 |
from internals.util.commons import construct_default_s3_url, upload_image, upload_images
|
23 |
from internals.util.config import (
|
24 |
num_return_sequences,
|
@@ -218,6 +218,9 @@ def upscale_image(task: Task):
|
|
218 |
)
|
219 |
|
220 |
upload_image(BytesIO(out_img), output_key)
|
|
|
|
|
|
|
221 |
return {"generated_image_url": construct_default_s3_url(output_key)}
|
222 |
|
223 |
|
|
|
18 |
from internals.pipelines.safety_checker import SafetyChecker
|
19 |
from internals.pipelines.upscaler import Upscaler
|
20 |
from internals.util.avatar import Avatar
|
21 |
+
from internals.util.cache import auto_clear_cuda_and_gc, clear_cuda, clear_cuda_and_gc
|
22 |
from internals.util.commons import construct_default_s3_url, upload_image, upload_images
|
23 |
from internals.util.config import (
|
24 |
num_return_sequences,
|
|
|
218 |
)
|
219 |
|
220 |
upload_image(BytesIO(out_img), output_key)
|
221 |
+
|
222 |
+
clear_cuda_and_gc()
|
223 |
+
|
224 |
return {"generated_image_url": construct_default_s3_url(output_key)}
|
225 |
|
226 |
|
internals/pipelines/controlnets.py
CHANGED
@@ -1,19 +1,26 @@
|
|
1 |
-
from typing import List, Literal, Union
|
2 |
|
3 |
import cv2
|
4 |
import numpy as np
|
5 |
import torch
|
6 |
-
from controlnet_aux import
|
|
|
|
|
|
|
|
|
|
|
7 |
from diffusers import (
|
8 |
ControlNetModel,
|
9 |
DiffusionPipeline,
|
|
|
|
|
10 |
StableDiffusionControlNetPipeline,
|
|
|
11 |
StableDiffusionXLControlNetPipeline,
|
|
|
12 |
UniPCMultistepScheduler,
|
13 |
)
|
14 |
-
from diffusers.pipelines.
|
15 |
-
MultiControlNetModel,
|
16 |
-
)
|
17 |
from PIL import Image
|
18 |
from pydash import has
|
19 |
from torch.nn import Linear
|
@@ -24,9 +31,6 @@ import internals.util.image as ImageUtil
|
|
24 |
from external.midas import apply_midas
|
25 |
from internals.data.result import Result
|
26 |
from internals.pipelines.commons import AbstractPipeline
|
27 |
-
from internals.pipelines.tileUpscalePipeline import (
|
28 |
-
StableDiffusionControlNetImg2ImgPipeline,
|
29 |
-
)
|
30 |
from internals.util.cache import clear_cuda_and_gc
|
31 |
from internals.util.commons import download_image
|
32 |
from internals.util.config import (
|
@@ -39,16 +43,91 @@ from internals.util.config import (
|
|
39 |
CONTROLNET_TYPES = Literal["pose", "canny", "scribble", "linearart", "tile_upscaler"]
|
40 |
|
41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
class ControlNet(AbstractPipeline):
|
43 |
__current_task_name = ""
|
44 |
__loaded = False
|
45 |
-
|
46 |
-
__pipeline: AbstractPipeline
|
47 |
|
48 |
def init(self, pipeline: AbstractPipeline):
|
49 |
-
self
|
50 |
|
51 |
def load_model(self, task_name: CONTROLNET_TYPES):
|
|
|
|
|
52 |
config = self.__model_sdxl if get_is_sdxl() else self.__model_normal
|
53 |
if self.__current_task_name == task_name:
|
54 |
return
|
@@ -59,92 +138,116 @@ class ControlNet(AbstractPipeline):
|
|
59 |
task_name = model # pyright: ignore
|
60 |
model = config[task_name]
|
61 |
|
62 |
-
|
|
|
|
|
|
|
|
|
|
|
63 |
if "," in model:
|
64 |
-
|
65 |
-
controlnets = []
|
66 |
-
for name in model_names:
|
67 |
-
cn = ControlNetModel.from_pretrained(
|
68 |
-
name,
|
69 |
-
torch_dtype=torch.float16,
|
70 |
-
cache_dir=get_hf_cache_dir(),
|
71 |
-
).to("cuda")
|
72 |
-
controlnets.append(cn)
|
73 |
-
controlnet = MultiControlNetModel(controlnets).to("cuda")
|
74 |
-
# Single controlnet
|
75 |
-
else:
|
76 |
-
controlnet = ControlNetModel.from_pretrained(
|
77 |
-
model,
|
78 |
-
torch_dtype=torch.float16,
|
79 |
-
cache_dir=get_hf_cache_dir(),
|
80 |
-
).to("cuda")
|
81 |
-
self.__current_task_name = task_name
|
82 |
-
self.controlnet = controlnet
|
83 |
|
84 |
-
self.
|
|
|
|
|
|
|
|
|
|
|
85 |
|
86 |
-
if hasattr(self, "pipe"):
|
87 |
-
self.pipe.controlnet = controlnet
|
88 |
-
if hasattr(self, "pipe2"):
|
89 |
-
self.pipe2.controlnet = controlnet
|
90 |
clear_cuda_and_gc()
|
91 |
|
92 |
-
def
|
93 |
-
"
|
94 |
-
if self.__loaded:
|
95 |
-
return
|
96 |
|
97 |
-
|
98 |
-
|
|
|
|
|
|
|
|
|
99 |
|
100 |
-
|
101 |
-
|
102 |
-
|
|
|
|
|
|
|
103 |
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
108 |
else:
|
109 |
-
pipe
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
else:
|
127 |
-
pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
|
128 |
-
get_model_dir(),
|
129 |
-
controlnet=self.controlnet,
|
130 |
-
torch_dtype=torch.float16,
|
131 |
-
use_auth_token=get_hf_token(),
|
132 |
-
cache_dir=get_hf_cache_dir(),
|
133 |
-
).to("cuda")
|
134 |
-
# pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
|
135 |
-
pipe.enable_model_cpu_offload()
|
136 |
-
pipe.enable_xformers_memory_efficient_attention()
|
137 |
-
self.pipe = pipe
|
138 |
|
139 |
# controlnet pipeline for canny and pose
|
140 |
-
pipe2 =
|
141 |
-
|
142 |
-
|
|
|
|
|
|
|
143 |
)
|
144 |
-
pipe2
|
145 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
146 |
|
147 |
-
|
148 |
|
149 |
def process(self, **kwargs):
|
150 |
if self.__current_task_name == "pose":
|
@@ -220,7 +323,6 @@ class ControlNet(AbstractPipeline):
|
|
220 |
"num_inference_steps": num_inference_steps,
|
221 |
"negative_prompt": negative_prompt[0],
|
222 |
"guidance_scale": guidance_scale,
|
223 |
-
"control_guidance_end": [0.5, 1.0],
|
224 |
"height": height,
|
225 |
"width": width,
|
226 |
**kwargs,
|
@@ -256,7 +358,7 @@ class ControlNet(AbstractPipeline):
|
|
256 |
kwargs = {
|
257 |
"image": condition_image,
|
258 |
"prompt": prompt,
|
259 |
-
"
|
260 |
"num_inference_steps": num_inference_steps,
|
261 |
"negative_prompt": negative_prompt,
|
262 |
"height": condition_image.size[1],
|
@@ -270,7 +372,7 @@ class ControlNet(AbstractPipeline):
|
|
270 |
@torch.inference_mode()
|
271 |
def process_scribble(
|
272 |
self,
|
273 |
-
|
274 |
prompt: Union[str, List[str]],
|
275 |
negative_prompt: Union[str, List[str]],
|
276 |
num_inference_steps: int,
|
@@ -285,21 +387,25 @@ class ControlNet(AbstractPipeline):
|
|
285 |
|
286 |
torch.manual_seed(seed)
|
287 |
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
|
|
|
|
|
|
294 |
|
295 |
kwargs = {
|
296 |
-
"image":
|
297 |
"prompt": prompt,
|
298 |
"num_inference_steps": num_inference_steps,
|
299 |
"negative_prompt": negative_prompt,
|
300 |
"height": height,
|
301 |
"width": width,
|
302 |
"guidance_scale": guidance_scale,
|
|
|
303 |
**kwargs,
|
304 |
}
|
305 |
result = self.pipe2.__call__(**kwargs)
|
@@ -326,29 +432,35 @@ class ControlNet(AbstractPipeline):
|
|
326 |
init_image = download_image(imageUrl).resize((width, height))
|
327 |
condition_image = ControlNet.linearart_condition_image(init_image)
|
328 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
329 |
kwargs = {
|
330 |
-
"image": condition_image,
|
331 |
"prompt": prompt,
|
332 |
"num_inference_steps": num_inference_steps,
|
333 |
"negative_prompt": negative_prompt,
|
334 |
"height": height,
|
335 |
"width": width,
|
336 |
"guidance_scale": guidance_scale,
|
|
|
337 |
**kwargs,
|
338 |
}
|
339 |
result = self.pipe2.__call__(**kwargs)
|
340 |
return Result.from_result(result)
|
341 |
|
342 |
def cleanup(self):
|
343 |
-
|
344 |
-
|
345 |
-
|
346 |
-
del self.pipe2.controlnet
|
347 |
-
if hasattr(self, "controlnet"):
|
348 |
-
del self.controlnet
|
349 |
-
self.__current_task_name = ""
|
350 |
-
|
351 |
-
clear_cuda_and_gc()
|
352 |
|
353 |
def detect_pose(self, imageUrl: str) -> Image.Image:
|
354 |
detector = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
|
@@ -356,7 +468,8 @@ class ControlNet(AbstractPipeline):
|
|
356 |
image = detector.__call__(image)
|
357 |
return image
|
358 |
|
359 |
-
|
|
|
360 |
processor = HEDdetector.from_pretrained("lllyasviel/Annotators")
|
361 |
image = processor.__call__(input_image=image, scribble=True)
|
362 |
return image
|
@@ -369,12 +482,36 @@ class ControlNet(AbstractPipeline):
|
|
369 |
|
370 |
@staticmethod
|
371 |
def depth_image(image: Image.Image) -> Image.Image:
|
372 |
-
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
-
|
377 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
378 |
|
379 |
@staticmethod
|
380 |
def canny_detect_edge(image: Image.Image) -> Image.Image:
|
@@ -407,10 +544,25 @@ class ControlNet(AbstractPipeline):
|
|
407 |
"scribble": "lllyasviel/control_v11p_sd15_scribble",
|
408 |
"tile_upscaler": "lllyasviel/control_v11f1e_sd15_tile",
|
409 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
410 |
__model_sdxl = {
|
411 |
"pose": "thibaud/controlnet-openpose-sdxl-1.0",
|
412 |
"canny": "diffusers/controlnet-canny-sdxl-1.0",
|
413 |
-
"linearart": "
|
414 |
-
"scribble": "
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
415 |
"tile_upscaler": None,
|
416 |
}
|
|
|
1 |
+
from typing import AbstractSet, List, Literal, Optional, Union
|
2 |
|
3 |
import cv2
|
4 |
import numpy as np
|
5 |
import torch
|
6 |
+
from controlnet_aux import (
|
7 |
+
HEDdetector,
|
8 |
+
LineartDetector,
|
9 |
+
OpenposeDetector,
|
10 |
+
PidiNetDetector,
|
11 |
+
)
|
12 |
from diffusers import (
|
13 |
ControlNetModel,
|
14 |
DiffusionPipeline,
|
15 |
+
StableDiffusionAdapterPipeline,
|
16 |
+
StableDiffusionControlNetImg2ImgPipeline,
|
17 |
StableDiffusionControlNetPipeline,
|
18 |
+
StableDiffusionXLAdapterPipeline,
|
19 |
StableDiffusionXLControlNetPipeline,
|
20 |
+
T2IAdapter,
|
21 |
UniPCMultistepScheduler,
|
22 |
)
|
23 |
+
from diffusers.pipelines.controlnet import MultiControlNetModel
|
|
|
|
|
24 |
from PIL import Image
|
25 |
from pydash import has
|
26 |
from torch.nn import Linear
|
|
|
31 |
from external.midas import apply_midas
|
32 |
from internals.data.result import Result
|
33 |
from internals.pipelines.commons import AbstractPipeline
|
|
|
|
|
|
|
34 |
from internals.util.cache import clear_cuda_and_gc
|
35 |
from internals.util.commons import download_image
|
36 |
from internals.util.config import (
|
|
|
43 |
CONTROLNET_TYPES = Literal["pose", "canny", "scribble", "linearart", "tile_upscaler"]
|
44 |
|
45 |
|
46 |
+
class StableDiffusionNetworkModelPipelineLoader:
|
47 |
+
"""Loads the pipeline for network module, eg: controlnet or t2i.
|
48 |
+
Does not throw error in case of unsupported configurations, instead it returns None.
|
49 |
+
"""
|
50 |
+
|
51 |
+
def __new__(
|
52 |
+
cls,
|
53 |
+
is_sdxl,
|
54 |
+
is_img2img,
|
55 |
+
network_model,
|
56 |
+
pipeline_type,
|
57 |
+
base_pipe: Optional[AbstractSet] = None,
|
58 |
+
):
|
59 |
+
if is_sdxl and is_img2img:
|
60 |
+
# Does not matter pipeline type but tile upscale is not supported
|
61 |
+
print("Warning: Tile upscale is not supported on SDXL")
|
62 |
+
return None
|
63 |
+
|
64 |
+
if base_pipe is None:
|
65 |
+
pretrained = True
|
66 |
+
kwargs = {
|
67 |
+
"pretrained_model_name_or_path": get_model_dir(),
|
68 |
+
"torch_dtype": torch.float16,
|
69 |
+
"use_auth_token": get_hf_token(),
|
70 |
+
"cache_dir": get_hf_cache_dir(),
|
71 |
+
}
|
72 |
+
else:
|
73 |
+
pretrained = False
|
74 |
+
kwargs = {
|
75 |
+
**base_pipe.pipe.components, # pyright: ignore
|
76 |
+
}
|
77 |
+
|
78 |
+
if is_sdxl and pipeline_type == "controlnet":
|
79 |
+
model = (
|
80 |
+
StableDiffusionXLControlNetPipeline.from_pretrained
|
81 |
+
if pretrained
|
82 |
+
else StableDiffusionXLControlNetPipeline
|
83 |
+
)
|
84 |
+
return model(controlnet=network_model, **kwargs).to("cuda")
|
85 |
+
if is_sdxl and pipeline_type == "t2i":
|
86 |
+
model = (
|
87 |
+
StableDiffusionXLAdapterPipeline.from_pretrained
|
88 |
+
if pretrained
|
89 |
+
else StableDiffusionXLAdapterPipeline
|
90 |
+
)
|
91 |
+
return model(adapter=network_model, **kwargs).to("cuda")
|
92 |
+
if is_img2img and pipeline_type == "controlnet":
|
93 |
+
model = (
|
94 |
+
StableDiffusionControlNetImg2ImgPipeline.from_pretrained
|
95 |
+
if pretrained
|
96 |
+
else StableDiffusionControlNetImg2ImgPipeline
|
97 |
+
)
|
98 |
+
return model(controlnet=network_model, **kwargs).to("cuda")
|
99 |
+
if pipeline_type == "controlnet":
|
100 |
+
model = (
|
101 |
+
StableDiffusionControlNetPipeline.from_pretrained
|
102 |
+
if pretrained
|
103 |
+
else StableDiffusionControlNetPipeline
|
104 |
+
)
|
105 |
+
return model(controlnet=network_model, **kwargs).to("cuda")
|
106 |
+
if pipeline_type == "t2i":
|
107 |
+
model = (
|
108 |
+
StableDiffusionAdapterPipeline.from_pretrained
|
109 |
+
if pretrained
|
110 |
+
else StableDiffusionAdapterPipeline
|
111 |
+
)
|
112 |
+
return model(adapter=network_model, **kwargs).to("cuda")
|
113 |
+
|
114 |
+
print(
|
115 |
+
f"Warning: Unsupported configuration {is_sdxl=}, {is_img2img=}, {pipeline_type=}"
|
116 |
+
)
|
117 |
+
return None
|
118 |
+
|
119 |
+
|
120 |
class ControlNet(AbstractPipeline):
|
121 |
__current_task_name = ""
|
122 |
__loaded = False
|
123 |
+
__pipe_type = None
|
|
|
124 |
|
125 |
def init(self, pipeline: AbstractPipeline):
|
126 |
+
setattr(self, "__pipeline", pipeline)
|
127 |
|
128 |
def load_model(self, task_name: CONTROLNET_TYPES):
|
129 |
+
"Appropriately loads the network module, pipelines and cache it for reuse."
|
130 |
+
|
131 |
config = self.__model_sdxl if get_is_sdxl() else self.__model_normal
|
132 |
if self.__current_task_name == task_name:
|
133 |
return
|
|
|
138 |
task_name = model # pyright: ignore
|
139 |
model = config[task_name]
|
140 |
|
141 |
+
pipeline_type = (
|
142 |
+
self.__model_sdxl_types[task_name]
|
143 |
+
if get_is_sdxl()
|
144 |
+
else self.__model_normal_types[task_name]
|
145 |
+
)
|
146 |
+
|
147 |
if "," in model:
|
148 |
+
model = [m.strip() for m in model.split(",")]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
149 |
|
150 |
+
model = self.__load_network_model(model, pipeline_type)
|
151 |
+
|
152 |
+
self.__load_pipeline(model, pipeline_type)
|
153 |
+
|
154 |
+
self.network_model = model
|
155 |
+
self.__current_task_name = task_name
|
156 |
|
|
|
|
|
|
|
|
|
157 |
clear_cuda_and_gc()
|
158 |
|
159 |
+
def __load_network_model(self, model_name, pipeline_type):
|
160 |
+
"Loads the network module, eg: ControlNet or T2I Adapters"
|
|
|
|
|
161 |
|
162 |
+
def load_controlnet(model):
|
163 |
+
return ControlNetModel.from_pretrained(
|
164 |
+
model,
|
165 |
+
torch_dtype=torch.float16,
|
166 |
+
cache_dir=get_hf_cache_dir(),
|
167 |
+
).to("cuda")
|
168 |
|
169 |
+
def load_t2i(model):
|
170 |
+
return T2IAdapter.from_pretrained(
|
171 |
+
model,
|
172 |
+
torch_dtype=torch.float16,
|
173 |
+
varient="fp16",
|
174 |
+
).to("cuda")
|
175 |
|
176 |
+
if type(model_name) == str:
|
177 |
+
if pipeline_type == "controlnet":
|
178 |
+
return load_controlnet(model_name)
|
179 |
+
if pipeline_type == "t2i":
|
180 |
+
return load_t2i(model_name)
|
181 |
+
raise Exception("Invalid pipeline type")
|
182 |
+
elif type(model_name) == list:
|
183 |
+
if pipeline_type == "controlnet":
|
184 |
+
cns = []
|
185 |
+
for model in model_name:
|
186 |
+
cns.append(load_controlnet(model))
|
187 |
+
return MultiControlNetModel(cns).to("cuda")
|
188 |
+
elif pipeline_type == "t2i":
|
189 |
+
raise Exception("Multi T2I adapters are not supported")
|
190 |
+
raise Exception("Invalid pipeline type")
|
191 |
+
|
192 |
+
def __load_pipeline(self, network_model, pipeline_type):
|
193 |
+
"Load the base pipeline(s) (if not loaded already) based on pipeline type and attaches the network module to the pipeline"
|
194 |
+
|
195 |
+
def patch_pipe(pipe):
|
196 |
+
if not pipe:
|
197 |
+
# cases where the loader may return None
|
198 |
+
return None
|
199 |
+
|
200 |
+
if get_is_sdxl():
|
201 |
+
pipe.enable_vae_tiling()
|
202 |
+
pipe.enable_vae_slicing()
|
203 |
+
pipe.enable_xformers_memory_efficient_attention()
|
204 |
else:
|
205 |
+
pipe.enable_xformers_memory_efficient_attention()
|
206 |
+
return pipe
|
207 |
+
|
208 |
+
# If the pipeline type is changed we should reload all
|
209 |
+
# the pipelines
|
210 |
+
if not self.__loaded or self.__pipe_type != pipeline_type:
|
211 |
+
# controlnet pipeline for tile upscaler
|
212 |
+
pipe = StableDiffusionNetworkModelPipelineLoader(
|
213 |
+
is_sdxl=get_is_sdxl(),
|
214 |
+
is_img2img=True,
|
215 |
+
network_model=network_model,
|
216 |
+
pipeline_type=pipeline_type,
|
217 |
+
base_pipe=getattr(self, "__pipeline", None),
|
218 |
+
)
|
219 |
+
pipe = patch_pipe(pipe)
|
220 |
+
if pipe:
|
221 |
+
self.pipe = pipe
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
222 |
|
223 |
# controlnet pipeline for canny and pose
|
224 |
+
pipe2 = StableDiffusionNetworkModelPipelineLoader(
|
225 |
+
is_sdxl=get_is_sdxl(),
|
226 |
+
is_img2img=False,
|
227 |
+
network_model=network_model,
|
228 |
+
pipeline_type=pipeline_type,
|
229 |
+
base_pipe=getattr(self, "__pipeline", None),
|
230 |
)
|
231 |
+
pipe2 = patch_pipe(pipe2)
|
232 |
+
if pipe2:
|
233 |
+
self.pipe2 = pipe2
|
234 |
+
|
235 |
+
self.__loaded = True
|
236 |
+
self.__pipe_type = pipeline_type
|
237 |
+
|
238 |
+
# Set the network module in the pipeline
|
239 |
+
if pipeline_type == "controlnet":
|
240 |
+
if hasattr(self, "pipe"):
|
241 |
+
setattr(self.pipe, "controlnet", network_model)
|
242 |
+
if hasattr(self, "pipe2"):
|
243 |
+
setattr(self.pipe2, "controlnet", network_model)
|
244 |
+
elif pipeline_type == "t2i":
|
245 |
+
if hasattr(self, "pipe"):
|
246 |
+
setattr(self.pipe, "adapter", network_model)
|
247 |
+
if hasattr(self, "pipe2"):
|
248 |
+
setattr(self.pipe2, "adapter", network_model)
|
249 |
|
250 |
+
clear_cuda_and_gc()
|
251 |
|
252 |
def process(self, **kwargs):
|
253 |
if self.__current_task_name == "pose":
|
|
|
323 |
"num_inference_steps": num_inference_steps,
|
324 |
"negative_prompt": negative_prompt[0],
|
325 |
"guidance_scale": guidance_scale,
|
|
|
326 |
"height": height,
|
327 |
"width": width,
|
328 |
**kwargs,
|
|
|
358 |
kwargs = {
|
359 |
"image": condition_image,
|
360 |
"prompt": prompt,
|
361 |
+
"control_image": condition_image,
|
362 |
"num_inference_steps": num_inference_steps,
|
363 |
"negative_prompt": negative_prompt,
|
364 |
"height": condition_image.size[1],
|
|
|
372 |
@torch.inference_mode()
|
373 |
def process_scribble(
|
374 |
self,
|
375 |
+
image: List[Image.Image],
|
376 |
prompt: Union[str, List[str]],
|
377 |
negative_prompt: Union[str, List[str]],
|
378 |
num_inference_steps: int,
|
|
|
387 |
|
388 |
torch.manual_seed(seed)
|
389 |
|
390 |
+
sdxl_args = (
|
391 |
+
{
|
392 |
+
"guidance_scale": 6,
|
393 |
+
"adapter_conditioning_scale": 0.6,
|
394 |
+
"adapter_conditioning_factor": 1.0,
|
395 |
+
}
|
396 |
+
if get_is_sdxl()
|
397 |
+
else {}
|
398 |
+
)
|
399 |
|
400 |
kwargs = {
|
401 |
+
"image": image,
|
402 |
"prompt": prompt,
|
403 |
"num_inference_steps": num_inference_steps,
|
404 |
"negative_prompt": negative_prompt,
|
405 |
"height": height,
|
406 |
"width": width,
|
407 |
"guidance_scale": guidance_scale,
|
408 |
+
**sdxl_args,
|
409 |
**kwargs,
|
410 |
}
|
411 |
result = self.pipe2.__call__(**kwargs)
|
|
|
432 |
init_image = download_image(imageUrl).resize((width, height))
|
433 |
condition_image = ControlNet.linearart_condition_image(init_image)
|
434 |
|
435 |
+
# we use t2i adapter and the conditioning scale should always be 0.8
|
436 |
+
sdxl_args = (
|
437 |
+
{
|
438 |
+
"guidance_scale": 6,
|
439 |
+
"adapter_conditioning_scale": 0.5,
|
440 |
+
"adapter_conditioning_factor": 0.9,
|
441 |
+
}
|
442 |
+
if get_is_sdxl()
|
443 |
+
else {}
|
444 |
+
)
|
445 |
+
|
446 |
kwargs = {
|
447 |
+
"image": [condition_image] * 4,
|
448 |
"prompt": prompt,
|
449 |
"num_inference_steps": num_inference_steps,
|
450 |
"negative_prompt": negative_prompt,
|
451 |
"height": height,
|
452 |
"width": width,
|
453 |
"guidance_scale": guidance_scale,
|
454 |
+
**sdxl_args,
|
455 |
**kwargs,
|
456 |
}
|
457 |
result = self.pipe2.__call__(**kwargs)
|
458 |
return Result.from_result(result)
|
459 |
|
460 |
def cleanup(self):
|
461 |
+
"""Doesn't do anything considering new diffusers has itself a cleanup mechanism
|
462 |
+
after controlnet generation"""
|
463 |
+
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
464 |
|
465 |
def detect_pose(self, imageUrl: str) -> Image.Image:
|
466 |
detector = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
|
|
|
468 |
image = detector.__call__(image)
|
469 |
return image
|
470 |
|
471 |
+
@staticmethod
|
472 |
+
def scribble_image(image: Image.Image) -> Image.Image:
|
473 |
processor = HEDdetector.from_pretrained("lllyasviel/Annotators")
|
474 |
image = processor.__call__(input_image=image, scribble=True)
|
475 |
return image
|
|
|
482 |
|
483 |
@staticmethod
|
484 |
def depth_image(image: Image.Image) -> Image.Image:
|
485 |
+
global midas, midas_transforms
|
486 |
+
if "midas" not in globals():
|
487 |
+
midas = torch.hub.load("intel-isl/MiDaS", "MiDaS").to("cuda")
|
488 |
+
midas_transforms = torch.hub.load("intel-isl/MiDaS", "transforms")
|
489 |
+
transform = midas_transforms.default_transform
|
490 |
+
|
491 |
+
cv_image = np.array(image)
|
492 |
+
img = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)
|
493 |
+
|
494 |
+
input_batch = transform(img).to("cuda")
|
495 |
+
with torch.no_grad():
|
496 |
+
prediction = midas(input_batch)
|
497 |
+
|
498 |
+
prediction = torch.nn.functional.interpolate(
|
499 |
+
prediction.unsqueeze(1),
|
500 |
+
size=img.shape[:2],
|
501 |
+
mode="bicubic",
|
502 |
+
align_corners=False,
|
503 |
+
).squeeze()
|
504 |
+
|
505 |
+
output = prediction.cpu().numpy()
|
506 |
+
formatted = (output * 255 / np.max(output)).astype("uint8")
|
507 |
+
img = Image.fromarray(formatted)
|
508 |
+
return img
|
509 |
+
|
510 |
+
@staticmethod
|
511 |
+
def pidinet_image(image: Image.Image) -> Image.Image:
|
512 |
+
pidinet = PidiNetDetector.from_pretrained("lllyasviel/Annotators").to("cuda")
|
513 |
+
image = pidinet.__call__(input_image=image, apply_filter=True)
|
514 |
+
return image
|
515 |
|
516 |
@staticmethod
|
517 |
def canny_detect_edge(image: Image.Image) -> Image.Image:
|
|
|
544 |
"scribble": "lllyasviel/control_v11p_sd15_scribble",
|
545 |
"tile_upscaler": "lllyasviel/control_v11f1e_sd15_tile",
|
546 |
}
|
547 |
+
__model_normal_types = {
|
548 |
+
"pose": "controlnet",
|
549 |
+
"canny": "controlnet",
|
550 |
+
"linearart": "controlnet",
|
551 |
+
"scribble": "controlnet",
|
552 |
+
"tile_upscaler": "controlnet",
|
553 |
+
}
|
554 |
+
|
555 |
__model_sdxl = {
|
556 |
"pose": "thibaud/controlnet-openpose-sdxl-1.0",
|
557 |
"canny": "diffusers/controlnet-canny-sdxl-1.0",
|
558 |
+
"linearart": "TencentARC/t2i-adapter-lineart-sdxl-1.0",
|
559 |
+
"scribble": "TencentARC/t2i-adapter-sketch-sdxl-1.0",
|
560 |
+
"tile_upscaler": None,
|
561 |
+
}
|
562 |
+
__model_sdxl_types = {
|
563 |
+
"pose": "controlnet",
|
564 |
+
"canny": "controlnet",
|
565 |
+
"linearart": "t2i",
|
566 |
+
"scribble": "t2i",
|
567 |
"tile_upscaler": None,
|
568 |
}
|
internals/pipelines/upscaler.py
CHANGED
@@ -148,7 +148,7 @@ class Upscaler:
|
|
148 |
model=model,
|
149 |
half=False,
|
150 |
gpu_id="0",
|
151 |
-
tile=
|
152 |
tile_pad=10,
|
153 |
pre_pad=0,
|
154 |
)
|
|
|
148 |
model=model,
|
149 |
half=False,
|
150 |
gpu_id="0",
|
151 |
+
tile=128,
|
152 |
tile_pad=10,
|
153 |
pre_pad=0,
|
154 |
)
|
requirements.txt
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
boto3==1.24.61
|
2 |
triton==2.0.0
|
3 |
-
diffusers==0.
|
4 |
fastapi==0.87.0
|
5 |
Pillow==9.3.0
|
6 |
redis==4.3.4
|
@@ -9,7 +9,7 @@ transformers==4.34.1
|
|
9 |
rembg==2.0.30
|
10 |
gfpgan==1.3.8
|
11 |
rembg==2.0.30
|
12 |
-
controlnet-aux==0.0.
|
13 |
gfpgan>=1.3.4
|
14 |
realesrgan==0.3.0
|
15 |
compel==1.0.4
|
|
|
1 |
boto3==1.24.61
|
2 |
triton==2.0.0
|
3 |
+
diffusers==0.23.0
|
4 |
fastapi==0.87.0
|
5 |
Pillow==9.3.0
|
6 |
redis==4.3.4
|
|
|
9 |
rembg==2.0.30
|
10 |
gfpgan==1.3.8
|
11 |
rembg==2.0.30
|
12 |
+
controlnet-aux==0.0.7
|
13 |
gfpgan>=1.3.4
|
14 |
realesrgan==0.3.0
|
15 |
compel==1.0.4
|