text
stringlengths 0
5.54k
|
---|
num_inference_steps=4, |
guidance_scale=1, |
strength=0.6, |
generator=generator |
).images[0] |
make_image_grid([init_image, image], rows=1, cols=2) You can get different results based on your prompt and the image you provide. To get the best results, we recommend trying different values for num_inference_steps, strength, and guidance_scale parameters and choose the best one. Combine with styled LoRAs LCM-LoRA can be combined with other LoRAs to generate styled-images in very few steps (4-8). In the following example, we’ll use the LCM-LoRA with the papercut LoRA. |
To learn more about how to combine LoRAs, refer to this guide. Copied import torch |
from diffusers import DiffusionPipeline, LCMScheduler |
pipe = DiffusionPipeline.from_pretrained( |
"stabilityai/stable-diffusion-xl-base-1.0", |
variant="fp16", |
torch_dtype=torch.float16 |
).to("cuda") |
# set scheduler |
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) |
# load LoRAs |
pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl", adapter_name="lcm") |
pipe.load_lora_weights("TheLastBen/Papercut_SDXL", weight_name="papercut.safetensors", adapter_name="papercut") |
# Combine LoRAs |
pipe.set_adapters(["lcm", "papercut"], adapter_weights=[1.0, 0.8]) |
prompt = "papercut, a cute fox" |
generator = torch.manual_seed(0) |
image = pipe(prompt, num_inference_steps=4, guidance_scale=1, generator=generator).images[0] |
image ControlNet/T2I-Adapter Let’s look at how we can perform inference with ControlNet/T2I-Adapter and LCM-LoRA. ControlNet For this example, we’ll use the SD-v1-5 model and the LCM-LoRA for SD-v1-5 with canny ControlNet. Copied import torch |
import cv2 |
import numpy as np |
from PIL import Image |
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, LCMScheduler |
from diffusers.utils import load_image |
image = load_image( |
"https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png" |
).resize((512, 512)) |
image = np.array(image) |
low_threshold = 100 |
high_threshold = 200 |
image = cv2.Canny(image, low_threshold, high_threshold) |
image = image[:, :, None] |
image = np.concatenate([image, image, image], axis=2) |
canny_image = Image.fromarray(image) |
controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16) |
pipe = StableDiffusionControlNetPipeline.from_pretrained( |
"runwayml/stable-diffusion-v1-5", |
controlnet=controlnet, |
torch_dtype=torch.float16, |
safety_checker=None, |
variant="fp16" |
).to("cuda") |
# set scheduler |
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) |
# load LCM-LoRA |
pipe.load_lora_weights("latent-consistency/lcm-lora-sdv1-5") |
generator = torch.manual_seed(0) |
image = pipe( |
"the mona lisa", |
image=canny_image, |
num_inference_steps=4, |
guidance_scale=1.5, |
controlnet_conditioning_scale=0.8, |
cross_attention_kwargs={"scale": 1}, |
generator=generator, |
).images[0] |
make_image_grid([canny_image, image], rows=1, cols=2) The inference parameters in this example might not work for all examples, so we recommend you to try different values for `num_inference_steps`, `guidance_scale`, `controlnet_conditioning_scale` and `cross_attention_kwargs` parameters and choose the best one. T2I-Adapter This example shows how to use the LCM-LoRA with the Canny T2I-Adapter and SDXL. Copied import torch |
import cv2 |
import numpy as np |
from PIL import Image |
from diffusers import StableDiffusionXLAdapterPipeline, T2IAdapter, LCMScheduler |
from diffusers.utils import load_image, make_image_grid |
# Prepare image |
# Detect the canny map in low resolution to avoid high-frequency details |
image = load_image( |
"https://huggingface.co/Adapter/t2iadapter/resolve/main/figs_SDXLV1.0/org_canny.jpg" |
).resize((384, 384)) |
image = np.array(image) |
low_threshold = 100 |
high_threshold = 200 |
image = cv2.Canny(image, low_threshold, high_threshold) |
image = image[:, :, None] |
image = np.concatenate([image, image, image], axis=2) |
canny_image = Image.fromarray(image).resize((1024, 1024)) |
# load adapter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.