|
|
|
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel |
|
import requests |
|
import torch |
|
from PIL import Image |
|
from io import BytesIO |
|
|
|
url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" |
|
|
|
response = requests.get(url) |
|
init_image = Image.open(BytesIO(response.content)).convert("RGB") |
|
init_image = init_image.resize((512, 512)) |
|
|
|
path = "runwayml/stable-diffusion-v1-5" |
|
|
|
run_compile = False |
|
controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16) |
|
pipe = StableDiffusionControlNetPipeline.from_pretrained( |
|
path, controlnet=controlnet, torch_dtype=torch.float16 |
|
) |
|
|
|
pipe = pipe.to("cuda:0") |
|
pipe.unet.to(memory_format=torch.channels_last) |
|
pipe.controlnet.to(memory_format=torch.channels_last) |
|
|
|
if run_compile: |
|
print("Run torch compile") |
|
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) |
|
pipe.controlnet = torch.compile(pipe.controlnet, mode="reduce-overhead", fullgraph=True) |
|
|
|
prompt = "ghibli style, a fantasy landscape with castles" |
|
|
|
for _ in range(3): |
|
image = pipe(prompt=prompt, image=init_image).images[0] |
|
|