|
from modules.utils import * |
|
|
|
class Image2Canny: |
|
def __init__(self, device, pretrained_model_dir): |
|
print("Initializing Image2Canny") |
|
self.low_threshold = 100 |
|
self.high_threshold = 200 |
|
|
|
@prompts(name="Edge Detection On Image", |
|
description="useful when you want to detect the edge of the image. " |
|
"like: detect the edges of this image, or canny detection on image, " |
|
"or perform edge detection on this image, or detect the canny image of this image. " |
|
"The input to this tool should be a string, representing the image_path") |
|
def inference(self, inputs): |
|
image = Image.open(inputs) |
|
image = np.array(image) |
|
canny = cv2.Canny(image, self.low_threshold, self.high_threshold) |
|
canny = canny[:, :, None] |
|
canny = np.concatenate([canny, canny, canny], axis=2) |
|
canny = Image.fromarray(canny) |
|
updated_image_path = get_new_image_name(inputs, func_name="edge") |
|
canny.save(updated_image_path) |
|
print(f"\nProcessed Image2Canny, Input Image: {inputs}, Output Text: {updated_image_path}") |
|
return updated_image_path |
|
|
|
class CannyText2Image: |
|
def __init__(self, device, pretrained_model_dir): |
|
print("Initializing CannyText2Image to %s" % device) |
|
self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32 |
|
self.controlnet = ControlNetModel.from_pretrained(f"{pretrained_model_dir}/sd-controlnet-canny", |
|
torch_dtype=self.torch_dtype) |
|
self.pipe = StableDiffusionControlNetPipeline.from_pretrained( |
|
f"{pretrained_model_dir}/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=None, |
|
torch_dtype=self.torch_dtype) |
|
self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config) |
|
self.pipe.to(device) |
|
self.seed = -1 |
|
self.a_prompt = 'best quality, extremely detailed' |
|
self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, ' \ |
|
'fewer digits, cropped, worst quality, low quality' |
|
|
|
@prompts(name="Generate Image Condition On Canny Image", |
|
description="useful when you want to generate a new real image from both the user desciption and a canny image." |
|
" like: generate a real image of a object or something from this canny image," |
|
" or generate a new real image of a object or something from this edge image. " |
|
"The input to this tool should be a comma seperated string of two, " |
|
"representing the image_path and the user description. ") |
|
def inference(self, inputs): |
|
image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:]) |
|
image = Image.open(image_path) |
|
self.seed = random.randint(0, 65535) |
|
seed_everything(self.seed) |
|
prompt = instruct_text + ', ' + self.a_prompt |
|
image = self.pipe(prompt, image, num_inference_steps=20, eta=0.0, negative_prompt=self.n_prompt, |
|
guidance_scale=9.0).images[0] |
|
updated_image_path = get_new_image_name(image_path, func_name="canny2image") |
|
image.save(updated_image_path) |
|
print(f"\nProcessed CannyText2Image, Input Canny: {image_path}, Input Text: {instruct_text}, " |
|
f"Output Text: {updated_image_path}") |
|
return updated_image_path |