Spaces:
Runtime error
Runtime error
File size: 3,474 Bytes
6853258 5cbbc02 6853258 e5e5100 6853258 b14f00e 6853258 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 |
import numpy as np
import torch
from transformers.tools.base import Tool, get_default_device
from transformers.utils import (
is_accelerate_available,
is_diffusers_available,
is_opencv_available,
is_vision_available,
)
if is_vision_available():
from PIL import Image
if is_diffusers_available():
from diffusers import ControlNetModel, StableDiffusionControlNetPipeline, UniPCMultistepScheduler
if is_opencv_available():
import cv2
IMAGE_TRANSFORMATION_DESCRIPTION = (
"This is a tool that transforms an image according to a prompt. It takes two inputs: `image`, which should be "
"the image to transform, and `prompt`, which should be the prompt to use to change it. It returns the "
"modified image."
)
class ImageTransformationTool(Tool):
default_stable_diffusion_checkpoint = "runwayml/stable-diffusion-v1-5"
default_controlnet_checkpoint = "lllyasviel/sd-controlnet-canny"
description = IMAGE_TRANSFORMATION_DESCRIPTION
inputs = ['image', 'text']
outputs = ['image']
def __init__(self, device=None, controlnet=None, stable_diffusion=None, **hub_kwargs) -> None:
if not is_accelerate_available():
raise ImportError("Accelerate should be installed in order to use tools.")
if not is_diffusers_available():
raise ImportError("Diffusers should be installed in order to use the StableDiffusionTool.")
if not is_vision_available():
raise ImportError("Pillow should be installed in order to use the StableDiffusionTool.")
if not is_opencv_available():
raise ImportError("opencv should be installed in order to use the StableDiffusionTool.")
super().__init__()
if controlnet is None:
controlnet = self.default_controlnet_checkpoint
self.controlnet_checkpoint = controlnet
if stable_diffusion is None:
stable_diffusion = self.default_stable_diffusion_checkpoint
self.stable_diffusion_checkpoint = stable_diffusion
self.device = device
self.hub_kwargs = hub_kwargs
def setup(self):
if self.device is None:
self.device = get_default_device()
self.controlnet = ControlNetModel.from_pretrained(self.controlnet_checkpoint)
self.pipeline = StableDiffusionControlNetPipeline.from_pretrained(
self.stable_diffusion_checkpoint, controlnet=self.controlnet
)
self.pipeline.scheduler = UniPCMultistepScheduler.from_config(self.pipeline.scheduler.config)
self.pipeline.enable_model_cpu_offload()
self.is_initialized = True
def __call__(self, image, prompt):
if not self.is_initialized:
self.setup()
initial_prompt = "super-hero character, best quality, extremely detailed"
prompt = initial_prompt + prompt
low_threshold = 100
high_threshold = 200
image = np.array(image)
image = cv2.Canny(image, low_threshold, high_threshold)
image = image[:, :, None]
image = np.concatenate([image, image, image], axis=2)
canny_image = Image.fromarray(image)
generator = torch.Generator(device="cpu").manual_seed(2)
return self.pipeline(
prompt,
canny_image,
negative_prompt="monochrome, lowres, bad anatomy, worst quality, low quality",
num_inference_steps=20,
generator=generator,
).images[0]
|