Spaces:
Running
Running
File size: 1,827 Bytes
681fa96 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
from ..utils import common_annotator_call, define_preprocessor_inputs, INPUT
import comfy.model_management as model_management
import torch
from einops import rearrange
class AnimeFace_SemSegPreprocessor:
@classmethod
def INPUT_TYPES(s):
#This preprocessor is only trained on 512x resolution
#https://github.com/siyeong0/Anime-Face-Segmentation/blob/main/predict.py#L25
return define_preprocessor_inputs(
remove_background_using_abg=INPUT.BOOLEAN(True),
resolution=INPUT.RESOLUTION(default=512, min=512, max=512)
)
RETURN_TYPES = ("IMAGE", "MASK")
RETURN_NAMES = ("IMAGE", "ABG_CHARACTER_MASK (MASK)")
FUNCTION = "execute"
CATEGORY = "ControlNet Preprocessors/Semantic Segmentation"
def execute(self, image, remove_background_using_abg=True, resolution=512, **kwargs):
from custom_controlnet_aux.anime_face_segment import AnimeFaceSegmentor
model = AnimeFaceSegmentor.from_pretrained().to(model_management.get_torch_device())
if remove_background_using_abg:
out_image_with_mask = common_annotator_call(model, image, resolution=resolution, remove_background=True)
out_image = out_image_with_mask[..., :3]
mask = out_image_with_mask[..., 3:]
mask = rearrange(mask, "n h w c -> n c h w")
else:
out_image = common_annotator_call(model, image, resolution=resolution, remove_background=False)
N, H, W, C = out_image.shape
mask = torch.ones(N, C, H, W)
del model
return (out_image, mask)
NODE_CLASS_MAPPINGS = {
"AnimeFace_SemSegPreprocessor": AnimeFace_SemSegPreprocessor
}
NODE_DISPLAY_NAME_MAPPINGS = {
"AnimeFace_SemSegPreprocessor": "Anime Face Segmentor"
} |