Upload folder using huggingface_hub
Browse files- inference.py +82 -26
- internals/data/task.py +4 -0
- internals/pipelines/commons.py +11 -2
- internals/pipelines/controlnets.py +17 -0
- internals/pipelines/inpainter.py +10 -1
- internals/pipelines/object_remove.py +7 -0
- internals/pipelines/replace_background.py +12 -2
- internals/pipelines/safety_checker.py +3 -2
- internals/pipelines/sdxl_tile_upscale.py +14 -0
- internals/pipelines/upscaler.py +5 -1
- internals/util/config.py +22 -1
- internals/util/model_loader.py +9 -1
- internals/util/prompt.py +7 -6
- models/ultrasharp/model.py +6 -4
- models/ultrasharp/util.py +84 -3
- requirements.txt +2 -2
inference.py
CHANGED
@@ -38,8 +38,9 @@ from internals.util.commons import (
|
|
38 |
)
|
39 |
from internals.util.config import (
|
40 |
get_is_sdxl,
|
|
|
41 |
get_model_dir,
|
42 |
-
|
43 |
set_configs_from_task,
|
44 |
set_model_config,
|
45 |
set_root_dir,
|
@@ -54,7 +55,7 @@ torch.backends.cuda.matmul.allow_tf32 = True
|
|
54 |
|
55 |
auto_mode = False
|
56 |
|
57 |
-
prompt_modifier = PromptModifier(num_of_sequences=
|
58 |
upscaler = Upscaler()
|
59 |
pose_detector = PoseDetector()
|
60 |
inpainter = InPainter()
|
@@ -128,7 +129,7 @@ def canny(task: Task):
|
|
128 |
"negative_prompt": [
|
129 |
f"monochrome, neon, x-ray, negative image, oversaturated, {task.get_negative_prompt()}"
|
130 |
]
|
131 |
-
*
|
132 |
**task.cnc_kwargs(),
|
133 |
**lora_patcher.kwargs(),
|
134 |
}
|
@@ -136,7 +137,8 @@ def canny(task: Task):
|
|
136 |
if task.get_high_res_fix():
|
137 |
kwargs = {
|
138 |
"prompt": prompt,
|
139 |
-
"negative_prompt": [task.get_negative_prompt()]
|
|
|
140 |
"images": images,
|
141 |
"width": task.get_width(),
|
142 |
"height": task.get_height(),
|
@@ -235,13 +237,13 @@ def scribble(task: Task):
|
|
235 |
image = ControlNet.scribble_image(image)
|
236 |
|
237 |
kwargs = {
|
238 |
-
"image": [image] *
|
239 |
"seed": task.get_seed(),
|
240 |
"num_inference_steps": task.get_steps(),
|
241 |
"width": width,
|
242 |
"height": height,
|
243 |
"prompt": prompt,
|
244 |
-
"negative_prompt": [task.get_negative_prompt()] *
|
245 |
**task.cns_kwargs(),
|
246 |
}
|
247 |
images, has_nsfw = controlnet.process(**kwargs)
|
@@ -249,7 +251,8 @@ def scribble(task: Task):
|
|
249 |
if task.get_high_res_fix():
|
250 |
kwargs = {
|
251 |
"prompt": prompt,
|
252 |
-
"negative_prompt": [task.get_negative_prompt()]
|
|
|
253 |
"images": images,
|
254 |
"width": task.get_width(),
|
255 |
"height": task.get_height(),
|
@@ -292,7 +295,7 @@ def linearart(task: Task):
|
|
292 |
"width": width,
|
293 |
"height": height,
|
294 |
"prompt": prompt,
|
295 |
-
"negative_prompt": [task.get_negative_prompt()] *
|
296 |
**task.cnl_kwargs(),
|
297 |
}
|
298 |
images, has_nsfw = controlnet.process(**kwargs)
|
@@ -300,7 +303,8 @@ def linearart(task: Task):
|
|
300 |
if task.get_high_res_fix():
|
301 |
kwargs = {
|
302 |
"prompt": prompt,
|
303 |
-
"negative_prompt": [task.get_negative_prompt()]
|
|
|
304 |
"images": images,
|
305 |
"width": task.get_width(),
|
306 |
"height": task.get_height(),
|
@@ -342,7 +346,7 @@ def pose(task: Task, s3_outkey: str = "_pose", poses: Optional[list] = None):
|
|
342 |
pose = download_image(task.get_imageUrl()).resize(
|
343 |
(task.get_width(), task.get_height())
|
344 |
)
|
345 |
-
poses = [pose] *
|
346 |
elif task.get_pose_coordinates():
|
347 |
infered_pose = pose_detector.transform(
|
348 |
image=task.get_imageUrl(),
|
@@ -350,9 +354,11 @@ def pose(task: Task, s3_outkey: str = "_pose", poses: Optional[list] = None):
|
|
350 |
width=task.get_width(),
|
351 |
height=task.get_height(),
|
352 |
)
|
353 |
-
poses = [infered_pose] *
|
354 |
else:
|
355 |
-
poses = [
|
|
|
|
|
356 |
|
357 |
if not get_is_sdxl():
|
358 |
# in normal pipeline we use depth + pose controlnet
|
@@ -376,7 +382,7 @@ def pose(task: Task, s3_outkey: str = "_pose", poses: Optional[list] = None):
|
|
376 |
"image": images,
|
377 |
"seed": task.get_seed(),
|
378 |
"num_inference_steps": task.get_steps(),
|
379 |
-
"negative_prompt": [task.get_negative_prompt()] *
|
380 |
"width": width,
|
381 |
"height": height,
|
382 |
**kwargs,
|
@@ -388,7 +394,8 @@ def pose(task: Task, s3_outkey: str = "_pose", poses: Optional[list] = None):
|
|
388 |
if task.get_high_res_fix():
|
389 |
kwargs = {
|
390 |
"prompt": prompt,
|
391 |
-
"negative_prompt": [task.get_negative_prompt()]
|
|
|
392 |
"images": images,
|
393 |
"width": task.get_width(),
|
394 |
"height": task.get_height(),
|
@@ -439,8 +446,11 @@ def text2img(task: Task):
|
|
439 |
|
440 |
if task.get_high_res_fix():
|
441 |
kwargs = {
|
442 |
-
"prompt": params.prompt
|
443 |
-
|
|
|
|
|
|
|
444 |
"images": images,
|
445 |
"width": task.get_width(),
|
446 |
"height": task.get_height(),
|
@@ -486,7 +496,8 @@ def img2img(task: Task):
|
|
486 |
"width": width,
|
487 |
"height": height,
|
488 |
"prompt": prompt,
|
489 |
-
"negative_prompt": [task.get_negative_prompt()]
|
|
|
490 |
**task.cnl_kwargs(),
|
491 |
"adapter_conditioning_scale": 0.3,
|
492 |
}
|
@@ -500,7 +511,8 @@ def img2img(task: Task):
|
|
500 |
kwargs = {
|
501 |
"prompt": prompt,
|
502 |
"imageUrl": task.get_imageUrl(),
|
503 |
-
"negative_prompt": [task.get_negative_prompt()]
|
|
|
504 |
"num_inference_steps": task.get_steps(),
|
505 |
"width": width,
|
506 |
"height": height,
|
@@ -512,7 +524,8 @@ def img2img(task: Task):
|
|
512 |
if task.get_high_res_fix():
|
513 |
kwargs = {
|
514 |
"prompt": prompt,
|
515 |
-
"negative_prompt": [task.get_negative_prompt()]
|
|
|
516 |
"images": images,
|
517 |
"width": task.get_width(),
|
518 |
"height": task.get_height(),
|
@@ -535,7 +548,12 @@ def img2img(task: Task):
|
|
535 |
@update_db
|
536 |
@slack.auto_send_alert
|
537 |
def inpaint(task: Task):
|
538 |
-
|
|
|
|
|
|
|
|
|
|
|
539 |
|
540 |
print({"prompts": prompt})
|
541 |
|
@@ -546,13 +564,13 @@ def inpaint(task: Task):
|
|
546 |
"width": task.get_width(),
|
547 |
"height": task.get_height(),
|
548 |
"seed": task.get_seed(),
|
549 |
-
"negative_prompt": [task.get_negative_prompt()] *
|
550 |
"num_inference_steps": task.get_steps(),
|
551 |
**task.ip_kwargs(),
|
552 |
}
|
553 |
images = inpainter.process(**kwargs)
|
554 |
|
555 |
-
generated_image_urls = upload_images(images,
|
556 |
|
557 |
clear_cuda_and_gc()
|
558 |
|
@@ -566,7 +584,7 @@ def replace_bg(task: Task):
|
|
566 |
if task.is_prompt_engineering():
|
567 |
prompt = prompt_modifier.modify(prompt)
|
568 |
else:
|
569 |
-
prompt = [prompt] *
|
570 |
|
571 |
lora_patcher = lora_style.get_patcher(replace_background.pipe, task.get_style())
|
572 |
lora_patcher.patch()
|
@@ -574,7 +592,7 @@ def replace_bg(task: Task):
|
|
574 |
images, has_nsfw = replace_background.replace(
|
575 |
image=task.get_imageUrl(),
|
576 |
prompt=prompt,
|
577 |
-
negative_prompt=[task.get_negative_prompt()] *
|
578 |
seed=task.get_seed(),
|
579 |
width=task.get_width(),
|
580 |
height=task.get_height(),
|
@@ -749,11 +767,13 @@ def load_model_by_task(task_type: TaskType, model_id=-1):
|
|
749 |
inpainter.init(text2img_pipe)
|
750 |
controlnet.init(text2img_pipe)
|
751 |
|
752 |
-
if task_type == TaskType.INPAINT:
|
753 |
inpainter.load()
|
754 |
safety_checker.apply(inpainter)
|
755 |
elif task_type == TaskType.REPLACE_BG:
|
756 |
-
replace_background.load(
|
|
|
|
|
757 |
elif task_type == TaskType.RT_DRAW_SEG or task_type == TaskType.RT_DRAW_IMG:
|
758 |
realtime_draw.load(text2img_pipe)
|
759 |
elif task_type == TaskType.OBJECT_REMOVAL:
|
@@ -776,6 +796,28 @@ def load_model_by_task(task_type: TaskType, model_id=-1):
|
|
776 |
controlnet.load_model("pose")
|
777 |
|
778 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
779 |
def apply_safety_checkers():
|
780 |
safety_checker.apply(text2img_pipe)
|
781 |
safety_checker.apply(img2img_pipe)
|
@@ -801,6 +843,18 @@ def model_fn(model_dir):
|
|
801 |
return
|
802 |
|
803 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
804 |
@FailureHandler.clear
|
805 |
def predict_fn(data, pipe):
|
806 |
task = Task(data)
|
@@ -851,6 +905,8 @@ def predict_fn(data, pipe):
|
|
851 |
return tile_upscale(task)
|
852 |
elif task_type == TaskType.INPAINT:
|
853 |
return inpaint(task)
|
|
|
|
|
854 |
elif task_type == TaskType.SCRIBBLE:
|
855 |
return scribble(task)
|
856 |
elif task_type == TaskType.LINEARART:
|
|
|
38 |
)
|
39 |
from internals.util.config import (
|
40 |
get_is_sdxl,
|
41 |
+
get_low_gpu_mem,
|
42 |
get_model_dir,
|
43 |
+
get_num_return_sequences,
|
44 |
set_configs_from_task,
|
45 |
set_model_config,
|
46 |
set_root_dir,
|
|
|
55 |
|
56 |
auto_mode = False
|
57 |
|
58 |
+
prompt_modifier = PromptModifier(num_of_sequences=get_num_return_sequences())
|
59 |
upscaler = Upscaler()
|
60 |
pose_detector = PoseDetector()
|
61 |
inpainter = InPainter()
|
|
|
129 |
"negative_prompt": [
|
130 |
f"monochrome, neon, x-ray, negative image, oversaturated, {task.get_negative_prompt()}"
|
131 |
]
|
132 |
+
* get_num_return_sequences(),
|
133 |
**task.cnc_kwargs(),
|
134 |
**lora_patcher.kwargs(),
|
135 |
}
|
|
|
137 |
if task.get_high_res_fix():
|
138 |
kwargs = {
|
139 |
"prompt": prompt,
|
140 |
+
"negative_prompt": [task.get_negative_prompt()]
|
141 |
+
* get_num_return_sequences(),
|
142 |
"images": images,
|
143 |
"width": task.get_width(),
|
144 |
"height": task.get_height(),
|
|
|
237 |
image = ControlNet.scribble_image(image)
|
238 |
|
239 |
kwargs = {
|
240 |
+
"image": [image] * get_num_return_sequences(),
|
241 |
"seed": task.get_seed(),
|
242 |
"num_inference_steps": task.get_steps(),
|
243 |
"width": width,
|
244 |
"height": height,
|
245 |
"prompt": prompt,
|
246 |
+
"negative_prompt": [task.get_negative_prompt()] * get_num_return_sequences(),
|
247 |
**task.cns_kwargs(),
|
248 |
}
|
249 |
images, has_nsfw = controlnet.process(**kwargs)
|
|
|
251 |
if task.get_high_res_fix():
|
252 |
kwargs = {
|
253 |
"prompt": prompt,
|
254 |
+
"negative_prompt": [task.get_negative_prompt()]
|
255 |
+
* get_num_return_sequences(),
|
256 |
"images": images,
|
257 |
"width": task.get_width(),
|
258 |
"height": task.get_height(),
|
|
|
295 |
"width": width,
|
296 |
"height": height,
|
297 |
"prompt": prompt,
|
298 |
+
"negative_prompt": [task.get_negative_prompt()] * get_num_return_sequences(),
|
299 |
**task.cnl_kwargs(),
|
300 |
}
|
301 |
images, has_nsfw = controlnet.process(**kwargs)
|
|
|
303 |
if task.get_high_res_fix():
|
304 |
kwargs = {
|
305 |
"prompt": prompt,
|
306 |
+
"negative_prompt": [task.get_negative_prompt()]
|
307 |
+
* get_num_return_sequences(),
|
308 |
"images": images,
|
309 |
"width": task.get_width(),
|
310 |
"height": task.get_height(),
|
|
|
346 |
pose = download_image(task.get_imageUrl()).resize(
|
347 |
(task.get_width(), task.get_height())
|
348 |
)
|
349 |
+
poses = [pose] * get_num_return_sequences()
|
350 |
elif task.get_pose_coordinates():
|
351 |
infered_pose = pose_detector.transform(
|
352 |
image=task.get_imageUrl(),
|
|
|
354 |
width=task.get_width(),
|
355 |
height=task.get_height(),
|
356 |
)
|
357 |
+
poses = [infered_pose] * get_num_return_sequences()
|
358 |
else:
|
359 |
+
poses = [
|
360 |
+
controlnet.detect_pose(task.get_imageUrl())
|
361 |
+
] * get_num_return_sequences()
|
362 |
|
363 |
if not get_is_sdxl():
|
364 |
# in normal pipeline we use depth + pose controlnet
|
|
|
382 |
"image": images,
|
383 |
"seed": task.get_seed(),
|
384 |
"num_inference_steps": task.get_steps(),
|
385 |
+
"negative_prompt": [task.get_negative_prompt()] * get_num_return_sequences(),
|
386 |
"width": width,
|
387 |
"height": height,
|
388 |
**kwargs,
|
|
|
394 |
if task.get_high_res_fix():
|
395 |
kwargs = {
|
396 |
"prompt": prompt,
|
397 |
+
"negative_prompt": [task.get_negative_prompt()]
|
398 |
+
* get_num_return_sequences(),
|
399 |
"images": images,
|
400 |
"width": task.get_width(),
|
401 |
"height": task.get_height(),
|
|
|
446 |
|
447 |
if task.get_high_res_fix():
|
448 |
kwargs = {
|
449 |
+
"prompt": params.prompt
|
450 |
+
if params.prompt
|
451 |
+
else [""] * get_num_return_sequences(),
|
452 |
+
"negative_prompt": [task.get_negative_prompt()]
|
453 |
+
* get_num_return_sequences(),
|
454 |
"images": images,
|
455 |
"width": task.get_width(),
|
456 |
"height": task.get_height(),
|
|
|
496 |
"width": width,
|
497 |
"height": height,
|
498 |
"prompt": prompt,
|
499 |
+
"negative_prompt": [task.get_negative_prompt()]
|
500 |
+
* get_num_return_sequences(),
|
501 |
**task.cnl_kwargs(),
|
502 |
"adapter_conditioning_scale": 0.3,
|
503 |
}
|
|
|
511 |
kwargs = {
|
512 |
"prompt": prompt,
|
513 |
"imageUrl": task.get_imageUrl(),
|
514 |
+
"negative_prompt": [task.get_negative_prompt()]
|
515 |
+
* get_num_return_sequences(),
|
516 |
"num_inference_steps": task.get_steps(),
|
517 |
"width": width,
|
518 |
"height": height,
|
|
|
524 |
if task.get_high_res_fix():
|
525 |
kwargs = {
|
526 |
"prompt": prompt,
|
527 |
+
"negative_prompt": [task.get_negative_prompt()]
|
528 |
+
* get_num_return_sequences(),
|
529 |
"images": images,
|
530 |
"width": task.get_width(),
|
531 |
"height": task.get_height(),
|
|
|
548 |
@update_db
|
549 |
@slack.auto_send_alert
|
550 |
def inpaint(task: Task):
|
551 |
+
if task.get_type() == TaskType.OUTPAINT:
|
552 |
+
key = "_outpaint"
|
553 |
+
prompt = [img2text.process(task.get_imageUrl())] * num_return_sequences
|
554 |
+
else:
|
555 |
+
key = "_inpaint"
|
556 |
+
prompt, _ = get_patched_prompt(task)
|
557 |
|
558 |
print({"prompts": prompt})
|
559 |
|
|
|
564 |
"width": task.get_width(),
|
565 |
"height": task.get_height(),
|
566 |
"seed": task.get_seed(),
|
567 |
+
"negative_prompt": [task.get_negative_prompt()] * get_num_return_sequences(),
|
568 |
"num_inference_steps": task.get_steps(),
|
569 |
**task.ip_kwargs(),
|
570 |
}
|
571 |
images = inpainter.process(**kwargs)
|
572 |
|
573 |
+
generated_image_urls = upload_images(images, key, task.get_taskId())
|
574 |
|
575 |
clear_cuda_and_gc()
|
576 |
|
|
|
584 |
if task.is_prompt_engineering():
|
585 |
prompt = prompt_modifier.modify(prompt)
|
586 |
else:
|
587 |
+
prompt = [prompt] * get_num_return_sequences()
|
588 |
|
589 |
lora_patcher = lora_style.get_patcher(replace_background.pipe, task.get_style())
|
590 |
lora_patcher.patch()
|
|
|
592 |
images, has_nsfw = replace_background.replace(
|
593 |
image=task.get_imageUrl(),
|
594 |
prompt=prompt,
|
595 |
+
negative_prompt=[task.get_negative_prompt()] * get_num_return_sequences(),
|
596 |
seed=task.get_seed(),
|
597 |
width=task.get_width(),
|
598 |
height=task.get_height(),
|
|
|
767 |
inpainter.init(text2img_pipe)
|
768 |
controlnet.init(text2img_pipe)
|
769 |
|
770 |
+
if task_type == TaskType.INPAINT or task_type == TaskType.OUTPAINT:
|
771 |
inpainter.load()
|
772 |
safety_checker.apply(inpainter)
|
773 |
elif task_type == TaskType.REPLACE_BG:
|
774 |
+
replace_background.load(
|
775 |
+
upscaler=upscaler, base=text2img_pipe, high_res=high_res
|
776 |
+
)
|
777 |
elif task_type == TaskType.RT_DRAW_SEG or task_type == TaskType.RT_DRAW_IMG:
|
778 |
realtime_draw.load(text2img_pipe)
|
779 |
elif task_type == TaskType.OBJECT_REMOVAL:
|
|
|
796 |
controlnet.load_model("pose")
|
797 |
|
798 |
|
799 |
+
def unload_model_by_task(task_type: TaskType):
|
800 |
+
if task_type == TaskType.INPAINT or task_type == TaskType.OUTPAINT:
|
801 |
+
inpainter.unload()
|
802 |
+
elif task_type == TaskType.REPLACE_BG:
|
803 |
+
replace_background.unload()
|
804 |
+
elif task_type == TaskType.OBJECT_REMOVAL:
|
805 |
+
object_removal.unload()
|
806 |
+
elif task_type == TaskType.TILE_UPSCALE:
|
807 |
+
if get_is_sdxl():
|
808 |
+
sdxl_tileupscaler.unload()
|
809 |
+
else:
|
810 |
+
controlnet.unload()
|
811 |
+
elif task_type == TaskType.CANNY:
|
812 |
+
controlnet.unload()
|
813 |
+
elif task_type == TaskType.SCRIBBLE:
|
814 |
+
controlnet.unload()
|
815 |
+
elif task_type == TaskType.LINEARART:
|
816 |
+
controlnet.unload()
|
817 |
+
elif task_type == TaskType.POSE:
|
818 |
+
controlnet.unload()
|
819 |
+
|
820 |
+
|
821 |
def apply_safety_checkers():
|
822 |
safety_checker.apply(text2img_pipe)
|
823 |
safety_checker.apply(img2img_pipe)
|
|
|
843 |
return
|
844 |
|
845 |
|
846 |
+
def auto_unload_task(func):
|
847 |
+
def wrapper(*args, **kwargs):
|
848 |
+
result = func(*args, **kwargs)
|
849 |
+
if get_low_gpu_mem():
|
850 |
+
task = Task(args[0])
|
851 |
+
unload_model_by_task(task.get_type()) # pyright: ignore
|
852 |
+
return result
|
853 |
+
|
854 |
+
return wrapper
|
855 |
+
|
856 |
+
|
857 |
+
@auto_unload_task
|
858 |
@FailureHandler.clear
|
859 |
def predict_fn(data, pipe):
|
860 |
task = Task(data)
|
|
|
905 |
return tile_upscale(task)
|
906 |
elif task_type == TaskType.INPAINT:
|
907 |
return inpaint(task)
|
908 |
+
elif task_type == TaskType.OUTPAINT:
|
909 |
+
return inpaint(task)
|
910 |
elif task_type == TaskType.SCRIBBLE:
|
911 |
return scribble(task)
|
912 |
elif task_type == TaskType.LINEARART:
|
internals/data/task.py
CHANGED
@@ -23,6 +23,7 @@ class TaskType(Enum):
|
|
23 |
PRELOAD_MODEL = "PRELOAD_MODEL"
|
24 |
CUSTOM_ACTION = "CUSTOM_ACTION"
|
25 |
SYSTEM_CMD = "SYSTEM_CMD"
|
|
|
26 |
|
27 |
|
28 |
class ModelType(Enum):
|
@@ -140,6 +141,9 @@ class Task:
|
|
140 |
def get_nsfw_threshold(self) -> float:
|
141 |
return self.__data.get("nsfw_threshold", 0.03)
|
142 |
|
|
|
|
|
|
|
143 |
def can_access_nsfw(self) -> bool:
|
144 |
return self.__data.get("can_access_nsfw", False)
|
145 |
|
|
|
23 |
PRELOAD_MODEL = "PRELOAD_MODEL"
|
24 |
CUSTOM_ACTION = "CUSTOM_ACTION"
|
25 |
SYSTEM_CMD = "SYSTEM_CMD"
|
26 |
+
OUTPAINT = "OUTPAINT"
|
27 |
|
28 |
|
29 |
class ModelType(Enum):
|
|
|
141 |
def get_nsfw_threshold(self) -> float:
|
142 |
return self.__data.get("nsfw_threshold", 0.03)
|
143 |
|
144 |
+
def get_num_return_sequences(self) -> int:
|
145 |
+
return self.__data.get("num_return_sequences", 4)
|
146 |
+
|
147 |
def can_access_nsfw(self) -> bool:
|
148 |
return self.__data.get("can_access_nsfw", False)
|
149 |
|
internals/pipelines/commons.py
CHANGED
@@ -12,7 +12,12 @@ from diffusers import (
|
|
12 |
from internals.data.result import Result
|
13 |
from internals.pipelines.twoStepPipeline import two_step_pipeline
|
14 |
from internals.util.commons import disable_safety_checker, download_image
|
15 |
-
from internals.util.config import
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
|
18 |
class AbstractPipeline:
|
@@ -41,6 +46,7 @@ class Text2Img(AbstractPipeline):
|
|
41 |
torch_dtype=torch.float16,
|
42 |
use_auth_token=get_hf_token(),
|
43 |
use_safetensors=True,
|
|
|
44 |
)
|
45 |
pipe.vae = vae
|
46 |
pipe.to("cuda")
|
@@ -104,18 +110,20 @@ class Text2Img(AbstractPipeline):
|
|
104 |
print("Warning: Two step pipeline is not supported on SDXL")
|
105 |
kwargs = {
|
106 |
"prompt": modified_prompt,
|
|
|
107 |
}
|
108 |
else:
|
109 |
kwargs = {
|
110 |
"prompt": prompt,
|
111 |
"modified_prompts": modified_prompt,
|
112 |
"iteration": iteration,
|
|
|
113 |
}
|
114 |
|
115 |
kwargs = {
|
116 |
"height": height,
|
117 |
"width": width,
|
118 |
-
"negative_prompt": [negative_prompt or ""] *
|
119 |
"num_inference_steps": num_inference_steps,
|
120 |
**kwargs,
|
121 |
}
|
@@ -136,6 +144,7 @@ class Img2Img(AbstractPipeline):
|
|
136 |
model_dir,
|
137 |
torch_dtype=torch.float16,
|
138 |
use_auth_token=get_hf_token(),
|
|
|
139 |
use_safetensors=True,
|
140 |
).to("cuda")
|
141 |
else:
|
|
|
12 |
from internals.data.result import Result
|
13 |
from internals.pipelines.twoStepPipeline import two_step_pipeline
|
14 |
from internals.util.commons import disable_safety_checker, download_image
|
15 |
+
from internals.util.config import (
|
16 |
+
get_base_model_variant,
|
17 |
+
get_hf_token,
|
18 |
+
get_is_sdxl,
|
19 |
+
num_return_sequences,
|
20 |
+
)
|
21 |
|
22 |
|
23 |
class AbstractPipeline:
|
|
|
46 |
torch_dtype=torch.float16,
|
47 |
use_auth_token=get_hf_token(),
|
48 |
use_safetensors=True,
|
49 |
+
variant=get_base_model_variant(),
|
50 |
)
|
51 |
pipe.vae = vae
|
52 |
pipe.to("cuda")
|
|
|
110 |
print("Warning: Two step pipeline is not supported on SDXL")
|
111 |
kwargs = {
|
112 |
"prompt": modified_prompt,
|
113 |
+
**kwargs,
|
114 |
}
|
115 |
else:
|
116 |
kwargs = {
|
117 |
"prompt": prompt,
|
118 |
"modified_prompts": modified_prompt,
|
119 |
"iteration": iteration,
|
120 |
+
**kwargs,
|
121 |
}
|
122 |
|
123 |
kwargs = {
|
124 |
"height": height,
|
125 |
"width": width,
|
126 |
+
"negative_prompt": [negative_prompt or ""] * get_num_return_sequences(),
|
127 |
"num_inference_steps": num_inference_steps,
|
128 |
**kwargs,
|
129 |
}
|
|
|
144 |
model_dir,
|
145 |
torch_dtype=torch.float16,
|
146 |
use_auth_token=get_hf_token(),
|
147 |
+
variant=get_base_model_variant(),
|
148 |
use_safetensors=True,
|
149 |
).to("cuda")
|
150 |
else:
|
internals/pipelines/controlnets.py
CHANGED
@@ -126,6 +126,23 @@ class ControlNet(AbstractPipeline):
|
|
126 |
def init(self, pipeline: AbstractPipeline):
|
127 |
setattr(self, "__pipeline", pipeline)
|
128 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
129 |
def load_model(self, task_name: CONTROLNET_TYPES):
|
130 |
"Appropriately loads the network module, pipelines and cache it for reuse."
|
131 |
|
|
|
126 |
def init(self, pipeline: AbstractPipeline):
|
127 |
setattr(self, "__pipeline", pipeline)
|
128 |
|
129 |
+
def unload(self):
|
130 |
+
"Unloads the network module, pipelines and clears the cache."
|
131 |
+
|
132 |
+
if not self.__loaded:
|
133 |
+
return
|
134 |
+
|
135 |
+
self.__loaded = False
|
136 |
+
self.__pipe_type = None
|
137 |
+
self.__current_task_name = ""
|
138 |
+
|
139 |
+
if hasattr(self, "pipe"):
|
140 |
+
delattr(self, "pipe")
|
141 |
+
if hasattr(self, "pipe2"):
|
142 |
+
delattr(self, "pipe2")
|
143 |
+
|
144 |
+
clear_cuda_and_gc()
|
145 |
+
|
146 |
def load_model(self, task_name: CONTROLNET_TYPES):
|
147 |
"Appropriately loads the network module, pipelines and cache it for reuse."
|
148 |
|
internals/pipelines/inpainter.py
CHANGED
@@ -4,12 +4,14 @@ import torch
|
|
4 |
from diffusers import StableDiffusionInpaintPipeline, StableDiffusionXLInpaintPipeline
|
5 |
|
6 |
from internals.pipelines.commons import AbstractPipeline
|
|
|
7 |
from internals.util.commons import disable_safety_checker, download_image
|
8 |
from internals.util.config import (
|
|
|
9 |
get_hf_cache_dir,
|
10 |
get_hf_token,
|
11 |
-
get_is_sdxl,
|
12 |
get_inpaint_model_path,
|
|
|
13 |
get_model_dir,
|
14 |
)
|
15 |
|
@@ -35,6 +37,7 @@ class InPainter(AbstractPipeline):
|
|
35 |
torch_dtype=torch.float16,
|
36 |
cache_dir=get_hf_cache_dir(),
|
37 |
use_auth_token=get_hf_token(),
|
|
|
38 |
).to("cuda")
|
39 |
else:
|
40 |
self.pipe = StableDiffusionInpaintPipeline.from_pretrained(
|
@@ -69,6 +72,11 @@ class InPainter(AbstractPipeline):
|
|
69 |
self.pipe.enable_vae_slicing()
|
70 |
self.pipe.enable_xformers_memory_efficient_attention()
|
71 |
|
|
|
|
|
|
|
|
|
|
|
72 |
@torch.inference_mode()
|
73 |
def process(
|
74 |
self,
|
@@ -95,6 +103,7 @@ class InPainter(AbstractPipeline):
|
|
95 |
"width": width,
|
96 |
"negative_prompt": negative_prompt,
|
97 |
"num_inference_steps": num_inference_steps,
|
|
|
98 |
**kwargs,
|
99 |
}
|
100 |
return self.pipe.__call__(**kwargs).images
|
|
|
4 |
from diffusers import StableDiffusionInpaintPipeline, StableDiffusionXLInpaintPipeline
|
5 |
|
6 |
from internals.pipelines.commons import AbstractPipeline
|
7 |
+
from internals.util.cache import clear_cuda_and_gc
|
8 |
from internals.util.commons import disable_safety_checker, download_image
|
9 |
from internals.util.config import (
|
10 |
+
get_base_inpaint_model_variant,
|
11 |
get_hf_cache_dir,
|
12 |
get_hf_token,
|
|
|
13 |
get_inpaint_model_path,
|
14 |
+
get_is_sdxl,
|
15 |
get_model_dir,
|
16 |
)
|
17 |
|
|
|
37 |
torch_dtype=torch.float16,
|
38 |
cache_dir=get_hf_cache_dir(),
|
39 |
use_auth_token=get_hf_token(),
|
40 |
+
variant=get_base_inpaint_model_variant(),
|
41 |
).to("cuda")
|
42 |
else:
|
43 |
self.pipe = StableDiffusionInpaintPipeline.from_pretrained(
|
|
|
72 |
self.pipe.enable_vae_slicing()
|
73 |
self.pipe.enable_xformers_memory_efficient_attention()
|
74 |
|
75 |
+
def unload(self):
|
76 |
+
self.__loaded = False
|
77 |
+
self.pipe = None
|
78 |
+
clear_cuda_and_gc()
|
79 |
+
|
80 |
@torch.inference_mode()
|
81 |
def process(
|
82 |
self,
|
|
|
103 |
"width": width,
|
104 |
"negative_prompt": negative_prompt,
|
105 |
"num_inference_steps": num_inference_steps,
|
106 |
+
"strength": 1.0,
|
107 |
**kwargs,
|
108 |
}
|
109 |
return self.pipe.__call__(**kwargs).images
|
internals/pipelines/object_remove.py
CHANGED
@@ -10,6 +10,7 @@ from omegaconf import OmegaConf
|
|
10 |
from PIL import Image
|
11 |
from torch.utils.data._utils.collate import default_collate
|
12 |
|
|
|
13 |
from internals.util.commons import download_file, download_image
|
14 |
from internals.util.config import get_root_dir
|
15 |
from saicinpainting.evaluation.utils import move_to_device
|
@@ -42,6 +43,12 @@ class ObjectRemoval:
|
|
42 |
|
43 |
self.__loaded = True
|
44 |
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
@torch.no_grad()
|
46 |
def process(
|
47 |
self,
|
|
|
10 |
from PIL import Image
|
11 |
from torch.utils.data._utils.collate import default_collate
|
12 |
|
13 |
+
from internals.util.cache import clear_cuda_and_gc
|
14 |
from internals.util.commons import download_file, download_image
|
15 |
from internals.util.config import get_root_dir
|
16 |
from saicinpainting.evaluation.utils import move_to_device
|
|
|
43 |
|
44 |
self.__loaded = True
|
45 |
|
46 |
+
def unload(self):
|
47 |
+
self.__loaded = False
|
48 |
+
self.model = None
|
49 |
+
|
50 |
+
clear_cuda_and_gc()
|
51 |
+
|
52 |
@torch.no_grad()
|
53 |
def process(
|
54 |
self,
|
internals/pipelines/replace_background.py
CHANGED
@@ -6,21 +6,22 @@ from cv2 import inpaint
|
|
6 |
from diffusers import (
|
7 |
ControlNetModel,
|
8 |
StableDiffusionControlNetInpaintPipeline,
|
9 |
-
StableDiffusionInpaintPipeline,
|
10 |
StableDiffusionControlNetPipeline,
|
|
|
11 |
UniPCMultistepScheduler,
|
12 |
)
|
13 |
from PIL import Image, ImageFilter, ImageOps
|
14 |
-
from internals.data.task import ModelType
|
15 |
|
16 |
import internals.util.image as ImageUtil
|
17 |
from internals.data.result import Result
|
|
|
18 |
from internals.pipelines.commons import AbstractPipeline
|
19 |
from internals.pipelines.controlnets import ControlNet
|
20 |
from internals.pipelines.high_res import HighRes
|
21 |
from internals.pipelines.inpainter import InPainter
|
22 |
from internals.pipelines.remove_background import RemoveBackgroundV2
|
23 |
from internals.pipelines.upscaler import Upscaler
|
|
|
24 |
from internals.util.commons import download_image
|
25 |
from internals.util.config import (
|
26 |
get_hf_cache_dir,
|
@@ -82,6 +83,15 @@ class ReplaceBackground(AbstractPipeline):
|
|
82 |
|
83 |
self.__loaded = True
|
84 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
@torch.inference_mode()
|
86 |
def replace(
|
87 |
self,
|
|
|
6 |
from diffusers import (
|
7 |
ControlNetModel,
|
8 |
StableDiffusionControlNetInpaintPipeline,
|
|
|
9 |
StableDiffusionControlNetPipeline,
|
10 |
+
StableDiffusionInpaintPipeline,
|
11 |
UniPCMultistepScheduler,
|
12 |
)
|
13 |
from PIL import Image, ImageFilter, ImageOps
|
|
|
14 |
|
15 |
import internals.util.image as ImageUtil
|
16 |
from internals.data.result import Result
|
17 |
+
from internals.data.task import ModelType
|
18 |
from internals.pipelines.commons import AbstractPipeline
|
19 |
from internals.pipelines.controlnets import ControlNet
|
20 |
from internals.pipelines.high_res import HighRes
|
21 |
from internals.pipelines.inpainter import InPainter
|
22 |
from internals.pipelines.remove_background import RemoveBackgroundV2
|
23 |
from internals.pipelines.upscaler import Upscaler
|
24 |
+
from internals.util.cache import clear_cuda_and_gc
|
25 |
from internals.util.commons import download_image
|
26 |
from internals.util.config import (
|
27 |
get_hf_cache_dir,
|
|
|
83 |
|
84 |
self.__loaded = True
|
85 |
|
86 |
+
def unload(self):
|
87 |
+
self.__loaded = False
|
88 |
+
self.pipe = None
|
89 |
+
self.high_res = None
|
90 |
+
self.upscaler = None
|
91 |
+
self.remove_background = None
|
92 |
+
|
93 |
+
clear_cuda_and_gc()
|
94 |
+
|
95 |
@torch.inference_mode()
|
96 |
def replace(
|
97 |
self,
|
internals/pipelines/safety_checker.py
CHANGED
@@ -31,9 +31,10 @@ class SafetyChecker:
|
|
31 |
self.__loaded = True
|
32 |
|
33 |
def apply(self, pipeline: AbstractPipeline):
|
34 |
-
self.load()
|
35 |
-
|
36 |
model = self.model if not get_nsfw_access() else None
|
|
|
|
|
|
|
37 |
if not pipeline:
|
38 |
return
|
39 |
if hasattr(pipeline, "pipe"):
|
|
|
31 |
self.__loaded = True
|
32 |
|
33 |
def apply(self, pipeline: AbstractPipeline):
|
|
|
|
|
34 |
model = self.model if not get_nsfw_access() else None
|
35 |
+
if model:
|
36 |
+
self.load()
|
37 |
+
|
38 |
if not pipeline:
|
39 |
return
|
40 |
if hasattr(pipeline, "pipe"):
|
internals/pipelines/sdxl_tile_upscale.py
CHANGED
@@ -10,6 +10,7 @@ from internals.pipelines.commons import AbstractPipeline, Text2Img
|
|
10 |
from internals.pipelines.controlnets import ControlNet
|
11 |
from internals.pipelines.demofusion_sdxl import DemoFusionSDXLControlNetPipeline
|
12 |
from internals.pipelines.high_res import HighRes
|
|
|
13 |
from internals.util.commons import download_image
|
14 |
from internals.util.config import get_base_dimension
|
15 |
|
@@ -17,7 +18,11 @@ controlnet = ControlNet()
|
|
17 |
|
18 |
|
19 |
class SDXLTileUpscaler(AbstractPipeline):
|
|
|
|
|
20 |
def create(self, high_res: HighRes, pipeline: Text2Img, model_id: int):
|
|
|
|
|
21 |
# temporal hack for upscale model till multicontrolnet support is added
|
22 |
model = (
|
23 |
"thibaud/controlnet-openpose-sdxl-1.0"
|
@@ -38,6 +43,15 @@ class SDXLTileUpscaler(AbstractPipeline):
|
|
38 |
|
39 |
self.pipe = pipe
|
40 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
def process(
|
42 |
self,
|
43 |
prompt: str,
|
|
|
10 |
from internals.pipelines.controlnets import ControlNet
|
11 |
from internals.pipelines.demofusion_sdxl import DemoFusionSDXLControlNetPipeline
|
12 |
from internals.pipelines.high_res import HighRes
|
13 |
+
from internals.util.cache import clear_cuda_and_gc
|
14 |
from internals.util.commons import download_image
|
15 |
from internals.util.config import get_base_dimension
|
16 |
|
|
|
18 |
|
19 |
|
20 |
class SDXLTileUpscaler(AbstractPipeline):
|
21 |
+
__loaded = False
|
22 |
+
|
23 |
def create(self, high_res: HighRes, pipeline: Text2Img, model_id: int):
|
24 |
+
if self.__loaded:
|
25 |
+
return
|
26 |
# temporal hack for upscale model till multicontrolnet support is added
|
27 |
model = (
|
28 |
"thibaud/controlnet-openpose-sdxl-1.0"
|
|
|
43 |
|
44 |
self.pipe = pipe
|
45 |
|
46 |
+
self.__loaded = True
|
47 |
+
|
48 |
+
def unload(self):
|
49 |
+
self.__loaded = False
|
50 |
+
self.pipe = None
|
51 |
+
self.high_res = None
|
52 |
+
|
53 |
+
clear_cuda_and_gc()
|
54 |
+
|
55 |
def process(
|
56 |
self,
|
57 |
prompt: str,
|
internals/pipelines/upscaler.py
CHANGED
@@ -139,7 +139,11 @@ class Upscaler:
|
|
139 |
os.chdir(str(Path.home() / ".cache"))
|
140 |
if scale == 4:
|
141 |
print("Using 4x-Ultrasharp")
|
142 |
-
upsampler = Ultrasharp(
|
|
|
|
|
|
|
|
|
143 |
else:
|
144 |
print("Using RealESRGANer")
|
145 |
upsampler = RealESRGANer(
|
|
|
139 |
os.chdir(str(Path.home() / ".cache"))
|
140 |
if scale == 4:
|
141 |
print("Using 4x-Ultrasharp")
|
142 |
+
upsampler = Ultrasharp(
|
143 |
+
model_path=self.__model_path_4x_ultrasharp,
|
144 |
+
tile=320,
|
145 |
+
tile_pad=10,
|
146 |
+
)
|
147 |
else:
|
148 |
print("Using RealESRGANer")
|
149 |
upsampler = RealESRGANer(
|
internals/util/config.py
CHANGED
@@ -45,7 +45,7 @@ def set_model_config(config: ModelConfig):
|
|
45 |
|
46 |
|
47 |
def set_configs_from_task(task: Task):
|
48 |
-
global env, nsfw_threshold, nsfw_access, access_token, base_dimension
|
49 |
name = task.get_queue_name()
|
50 |
if name.startswith("gamma"):
|
51 |
env = "gamma"
|
@@ -55,6 +55,7 @@ def set_configs_from_task(task: Task):
|
|
55 |
nsfw_access = task.can_access_nsfw()
|
56 |
access_token = task.get_access_token()
|
57 |
base_dimension = task.get_base_dimension()
|
|
|
58 |
|
59 |
|
60 |
def get_model_dir():
|
@@ -84,6 +85,11 @@ def get_root_dir():
|
|
84 |
return root_dir
|
85 |
|
86 |
|
|
|
|
|
|
|
|
|
|
|
87 |
def get_environment():
|
88 |
global env
|
89 |
return env
|
@@ -104,6 +110,21 @@ def get_hf_token():
|
|
104 |
return hf_token
|
105 |
|
106 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
107 |
def api_headers():
|
108 |
return {
|
109 |
"Access-Token": access_token,
|
|
|
45 |
|
46 |
|
47 |
def set_configs_from_task(task: Task):
|
48 |
+
global env, nsfw_threshold, nsfw_access, access_token, base_dimension, num_return_sequences
|
49 |
name = task.get_queue_name()
|
50 |
if name.startswith("gamma"):
|
51 |
env = "gamma"
|
|
|
55 |
nsfw_access = task.can_access_nsfw()
|
56 |
access_token = task.get_access_token()
|
57 |
base_dimension = task.get_base_dimension()
|
58 |
+
num_return_sequences = task.get_num_return_sequences()
|
59 |
|
60 |
|
61 |
def get_model_dir():
|
|
|
85 |
return root_dir
|
86 |
|
87 |
|
88 |
+
def get_num_return_sequences():
|
89 |
+
global num_return_sequences
|
90 |
+
return num_return_sequences
|
91 |
+
|
92 |
+
|
93 |
def get_environment():
|
94 |
global env
|
95 |
return env
|
|
|
110 |
return hf_token
|
111 |
|
112 |
|
113 |
+
def get_low_gpu_mem():
|
114 |
+
global model_config
|
115 |
+
return model_config.low_gpu_mem # pyright: ignore
|
116 |
+
|
117 |
+
|
118 |
+
def get_base_model_variant():
|
119 |
+
global model_config
|
120 |
+
return model_config.get_base_model_variant # pyright: ignore
|
121 |
+
|
122 |
+
|
123 |
+
def get_base_inpaint_model_variant():
|
124 |
+
global model_config
|
125 |
+
return model_config.base_inpaint_model_variant # pyright: ignore
|
126 |
+
|
127 |
+
|
128 |
def api_headers():
|
129 |
return {
|
130 |
"Access-Token": access_token,
|
internals/util/model_loader.py
CHANGED
@@ -16,6 +16,9 @@ class ModelConfig:
|
|
16 |
base_inpaint_model_path: str
|
17 |
is_sdxl: bool = False
|
18 |
base_dimension: int = 512
|
|
|
|
|
|
|
19 |
|
20 |
|
21 |
def load_model_from_config(path):
|
@@ -24,14 +27,19 @@ def load_model_from_config(path):
|
|
24 |
with open(path + "/inference.json", "r") as f:
|
25 |
config = json.loads(f.read())
|
26 |
model_path = config.get("model_path", path)
|
27 |
-
inpaint_model_path = config.get("inpaint_model_path",
|
28 |
is_sdxl = config.get("is_sdxl", False)
|
29 |
base_dimension = config.get("base_dimension", 512)
|
|
|
|
|
30 |
|
31 |
m_config.base_model_path = model_path
|
32 |
m_config.base_inpaint_model_path = inpaint_model_path
|
33 |
m_config.is_sdxl = is_sdxl
|
34 |
m_config.base_dimension = base_dimension
|
|
|
|
|
|
|
35 |
|
36 |
#
|
37 |
# if config.get("model_type") == "huggingface":
|
|
|
16 |
base_inpaint_model_path: str
|
17 |
is_sdxl: bool = False
|
18 |
base_dimension: int = 512
|
19 |
+
low_gpu_mem: bool = False
|
20 |
+
base_model_variant: Optional[str] = None
|
21 |
+
base_inpaint_model_variant: Optional[str] = None
|
22 |
|
23 |
|
24 |
def load_model_from_config(path):
|
|
|
27 |
with open(path + "/inference.json", "r") as f:
|
28 |
config = json.loads(f.read())
|
29 |
model_path = config.get("model_path", path)
|
30 |
+
inpaint_model_path = config.get("inpaint_model_path", model_path)
|
31 |
is_sdxl = config.get("is_sdxl", False)
|
32 |
base_dimension = config.get("base_dimension", 512)
|
33 |
+
base_model_variant = config.get("base_model_variant", None)
|
34 |
+
base_inpaint_model_variant = config.get("base_inpaint_model_variant", None)
|
35 |
|
36 |
m_config.base_model_path = model_path
|
37 |
m_config.base_inpaint_model_path = inpaint_model_path
|
38 |
m_config.is_sdxl = is_sdxl
|
39 |
m_config.base_dimension = base_dimension
|
40 |
+
m_config.low_gpu_mem = config.get("low_gpu_mem", False)
|
41 |
+
m_config.base_model_variant = base_model_variant
|
42 |
+
m_config.base_inpaint_model_variant = base_inpaint_model_variant
|
43 |
|
44 |
#
|
45 |
# if config.get("model_type") == "huggingface":
|
internals/util/prompt.py
CHANGED
@@ -7,7 +7,7 @@ from internals.pipelines.img_to_text import Image2Text
|
|
7 |
from internals.pipelines.prompt_modifier import PromptModifier
|
8 |
from internals.util.anomaly import remove_colors
|
9 |
from internals.util.avatar import Avatar
|
10 |
-
from internals.util.config import
|
11 |
from internals.util.lora_style import LoraStyle
|
12 |
|
13 |
|
@@ -29,9 +29,9 @@ def get_patched_prompt(
|
|
29 |
if task.is_prompt_engineering():
|
30 |
prompt = prompt_modifier.modify(prompt)
|
31 |
else:
|
32 |
-
prompt = [prompt] *
|
33 |
|
34 |
-
ori_prompt = [task.get_prompt()] *
|
35 |
|
36 |
class_name = None
|
37 |
add_style_and_character(ori_prompt, class_name)
|
@@ -60,7 +60,7 @@ def get_patched_prompt_text2img(
|
|
60 |
if task.is_prompt_engineering():
|
61 |
mod_prompt = prompt_modifier.modify(task.get_prompt())
|
62 |
else:
|
63 |
-
mod_prompt = [task.get_prompt()] *
|
64 |
|
65 |
prompt, prompt_left, prompt_right = [], [], []
|
66 |
for i in range(len(mod_prompt)):
|
@@ -82,11 +82,12 @@ def get_patched_prompt_text2img(
|
|
82 |
if task.is_prompt_engineering():
|
83 |
mod_prompt = prompt_modifier.modify(task.get_prompt())
|
84 |
else:
|
85 |
-
mod_prompt = [task.get_prompt()] *
|
86 |
mod_prompt = [add_style_and_character(mp) for mp in mod_prompt]
|
87 |
|
88 |
params = Text2Img.Params(
|
89 |
-
prompt=[add_style_and_character(task.get_prompt())]
|
|
|
90 |
modified_prompt=mod_prompt,
|
91 |
)
|
92 |
|
|
|
7 |
from internals.pipelines.prompt_modifier import PromptModifier
|
8 |
from internals.util.anomaly import remove_colors
|
9 |
from internals.util.avatar import Avatar
|
10 |
+
from internals.util.config import get_num_return_sequences
|
11 |
from internals.util.lora_style import LoraStyle
|
12 |
|
13 |
|
|
|
29 |
if task.is_prompt_engineering():
|
30 |
prompt = prompt_modifier.modify(prompt)
|
31 |
else:
|
32 |
+
prompt = [prompt] * get_num_return_sequences()
|
33 |
|
34 |
+
ori_prompt = [task.get_prompt()] * get_num_return_sequences()
|
35 |
|
36 |
class_name = None
|
37 |
add_style_and_character(ori_prompt, class_name)
|
|
|
60 |
if task.is_prompt_engineering():
|
61 |
mod_prompt = prompt_modifier.modify(task.get_prompt())
|
62 |
else:
|
63 |
+
mod_prompt = [task.get_prompt()] * get_num_return_sequences()
|
64 |
|
65 |
prompt, prompt_left, prompt_right = [], [], []
|
66 |
for i in range(len(mod_prompt)):
|
|
|
82 |
if task.is_prompt_engineering():
|
83 |
mod_prompt = prompt_modifier.modify(task.get_prompt())
|
84 |
else:
|
85 |
+
mod_prompt = [task.get_prompt()] * get_num_return_sequences()
|
86 |
mod_prompt = [add_style_and_character(mp) for mp in mod_prompt]
|
87 |
|
88 |
params = Text2Img.Params(
|
89 |
+
prompt=[add_style_and_character(task.get_prompt())]
|
90 |
+
* get_num_return_sequences(),
|
91 |
modified_prompt=mod_prompt,
|
92 |
)
|
93 |
|
models/ultrasharp/model.py
CHANGED
@@ -3,12 +3,14 @@ from typing import List
|
|
3 |
import torch
|
4 |
|
5 |
import models.ultrasharp.arch as arch
|
6 |
-
from models.ultrasharp.util import infer_params,
|
7 |
|
8 |
|
9 |
class Ultrasharp:
|
10 |
-
def __init__(self,
|
11 |
-
self.filename =
|
|
|
|
|
12 |
|
13 |
def enhance(self, img, outscale=4):
|
14 |
state_dict = torch.load(self.filename, map_location="cpu")
|
@@ -23,5 +25,5 @@ class Ultrasharp:
|
|
23 |
|
24 |
model.to("cuda")
|
25 |
|
26 |
-
img =
|
27 |
return img, None
|
|
|
3 |
import torch
|
4 |
|
5 |
import models.ultrasharp.arch as arch
|
6 |
+
from models.ultrasharp.util import infer_params, upscale
|
7 |
|
8 |
|
9 |
class Ultrasharp:
|
10 |
+
def __init__(self, model_path, tile_pad=0, tile=0):
|
11 |
+
self.filename = model_path
|
12 |
+
self.tile_pad = tile_pad
|
13 |
+
self.tile = tile
|
14 |
|
15 |
def enhance(self, img, outscale=4):
|
16 |
state_dict = torch.load(self.filename, map_location="cpu")
|
|
|
25 |
|
26 |
model.to("cuda")
|
27 |
|
28 |
+
img = upscale(model, img, self.tile_pad, self.tile)
|
29 |
return img, None
|
models/ultrasharp/util.py
CHANGED
@@ -1,3 +1,5 @@
|
|
|
|
|
|
1 |
import numpy as np
|
2 |
import torch
|
3 |
|
@@ -32,14 +34,93 @@ def infer_params(state_dict):
|
|
32 |
return in_nc, out_nc, nf, nb, plus, scale
|
33 |
|
34 |
|
35 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
img = np.array(img)
|
37 |
img = img[:, :, ::-1]
|
38 |
img = np.ascontiguousarray(np.transpose(img, (2, 0, 1))) / 255
|
39 |
img = torch.from_numpy(img).float()
|
40 |
img = img.unsqueeze(0).to("cuda")
|
41 |
-
|
42 |
-
|
|
|
43 |
output = output.squeeze().float().cpu().clamp_(0, 1).numpy()
|
44 |
output = 255.0 * np.moveaxis(output, 0, 2)
|
45 |
output = output.astype(np.uint8)
|
|
|
1 |
+
import math
|
2 |
+
|
3 |
import numpy as np
|
4 |
import torch
|
5 |
|
|
|
34 |
return in_nc, out_nc, nf, nb, plus, scale
|
35 |
|
36 |
|
37 |
+
def tile_process(model, img, tile_pad, tile_size, scale=4):
|
38 |
+
"""It will first crop input images to tiles, and then process each tile.
|
39 |
+
Finally, all the processed tiles are merged into one images.
|
40 |
+
|
41 |
+
Modified from: https://github.com/ata4/esrgan-launcher
|
42 |
+
"""
|
43 |
+
batch, channel, height, width = img.shape
|
44 |
+
output_height = height * scale
|
45 |
+
output_width = width * scale
|
46 |
+
output_shape = (batch, channel, output_height, output_width)
|
47 |
+
|
48 |
+
# start with black image
|
49 |
+
output = img.new_zeros(output_shape)
|
50 |
+
tiles_x = math.ceil(width / tile_size)
|
51 |
+
tiles_y = math.ceil(height / tile_size)
|
52 |
+
|
53 |
+
# loop over all tiles
|
54 |
+
for y in range(tiles_y):
|
55 |
+
for x in range(tiles_x):
|
56 |
+
# extract tile from input image
|
57 |
+
ofs_x = x * tile_size
|
58 |
+
ofs_y = y * tile_size
|
59 |
+
# input tile area on total image
|
60 |
+
input_start_x = ofs_x
|
61 |
+
input_end_x = min(ofs_x + tile_size, width)
|
62 |
+
input_start_y = ofs_y
|
63 |
+
input_end_y = min(ofs_y + tile_size, height)
|
64 |
+
|
65 |
+
# input tile area on total image with padding
|
66 |
+
input_start_x_pad = max(input_start_x - tile_pad, 0)
|
67 |
+
input_end_x_pad = min(input_end_x + tile_pad, width)
|
68 |
+
input_start_y_pad = max(input_start_y - tile_pad, 0)
|
69 |
+
input_end_y_pad = min(input_end_y + tile_pad, height)
|
70 |
+
|
71 |
+
# input tile dimensions
|
72 |
+
input_tile_width = input_end_x - input_start_x
|
73 |
+
input_tile_height = input_end_y - input_start_y
|
74 |
+
tile_idx = y * tiles_x + x + 1
|
75 |
+
input_tile = img[
|
76 |
+
:,
|
77 |
+
:,
|
78 |
+
input_start_y_pad:input_end_y_pad,
|
79 |
+
input_start_x_pad:input_end_x_pad,
|
80 |
+
]
|
81 |
+
|
82 |
+
# upscale tile
|
83 |
+
try:
|
84 |
+
with torch.no_grad():
|
85 |
+
output_tile = model(input_tile)
|
86 |
+
except RuntimeError as error:
|
87 |
+
print("Error", error)
|
88 |
+
print(f"\tTile {tile_idx}/{tiles_x * tiles_y}")
|
89 |
+
|
90 |
+
# output tile area on total image
|
91 |
+
output_start_x = input_start_x * scale
|
92 |
+
output_end_x = input_end_x * scale
|
93 |
+
output_start_y = input_start_y * scale
|
94 |
+
output_end_y = input_end_y * scale
|
95 |
+
|
96 |
+
# output tile area without padding
|
97 |
+
output_start_x_tile = (input_start_x - input_start_x_pad) * scale
|
98 |
+
output_end_x_tile = output_start_x_tile + input_tile_width * scale
|
99 |
+
output_start_y_tile = (input_start_y - input_start_y_pad) * scale
|
100 |
+
output_end_y_tile = output_start_y_tile + input_tile_height * scale
|
101 |
+
|
102 |
+
# put tile into output image
|
103 |
+
output[
|
104 |
+
:, :, output_start_y:output_end_y, output_start_x:output_end_x
|
105 |
+
] = output_tile[
|
106 |
+
:,
|
107 |
+
:,
|
108 |
+
output_start_y_tile:output_end_y_tile,
|
109 |
+
output_start_x_tile:output_end_x_tile,
|
110 |
+
]
|
111 |
+
|
112 |
+
return output
|
113 |
+
|
114 |
+
|
115 |
+
def upscale(model, img, tile_pad, tile_size):
|
116 |
img = np.array(img)
|
117 |
img = img[:, :, ::-1]
|
118 |
img = np.ascontiguousarray(np.transpose(img, (2, 0, 1))) / 255
|
119 |
img = torch.from_numpy(img).float()
|
120 |
img = img.unsqueeze(0).to("cuda")
|
121 |
+
|
122 |
+
output = tile_process(model, img, tile_pad, tile_size, scale=4)
|
123 |
+
|
124 |
output = output.squeeze().float().cpu().clamp_(0, 1).numpy()
|
125 |
output = 255.0 * np.moveaxis(output, 0, 2)
|
126 |
output = output.astype(np.uint8)
|
requirements.txt
CHANGED
@@ -1,11 +1,11 @@
|
|
1 |
boto3==1.24.61
|
2 |
triton==2.0.0
|
3 |
-
diffusers==0.
|
4 |
fastapi==0.87.0
|
5 |
Pillow==9.3.0
|
6 |
redis==4.3.4
|
7 |
requests==2.28.1
|
8 |
-
transformers==4.
|
9 |
rembg==2.0.30
|
10 |
gfpgan==1.3.8
|
11 |
rembg==2.0.30
|
|
|
1 |
boto3==1.24.61
|
2 |
triton==2.0.0
|
3 |
+
diffusers==0.25.0
|
4 |
fastapi==0.87.0
|
5 |
Pillow==9.3.0
|
6 |
redis==4.3.4
|
7 |
requests==2.28.1
|
8 |
+
transformers==4.36.2
|
9 |
rembg==2.0.30
|
10 |
gfpgan==1.3.8
|
11 |
rembg==2.0.30
|