text
stringlengths 0
5.54k
|
---|
### Text-to-Image |
images = pipe.text2img("An astronaut riding a horse").images |
### Image-to-Image |
init_image = download_image( |
"https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" |
) |
prompt = "A fantasy landscape, trending on artstation" |
images = pipe.img2img(prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5).images |
### Inpainting |
img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" |
mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" |
init_image = download_image(img_url).resize((512, 512)) |
mask_image = download_image(mask_url).resize((512, 512)) |
prompt = "a cat sitting on a bench" |
images = pipe.inpaint(prompt=prompt, image=init_image, mask_image=mask_image, strength=0.75).images |
As shown above this one pipeline can run all both “text-to-image”, “image-to-image”, and “inpainting” in one pipeline. |
Long Prompt Weighting Stable Diffusion |
The Pipeline lets you input prompt without 77 token length limit. And you can increase words weighting by using ”()” or decrease words weighting by using ”[]” |
The Pipeline also lets you use the main use cases of the stable diffusion pipeline in a single class. |
pytorch |
Copied |
from diffusers import DiffusionPipeline |
import torch |
pipe = DiffusionPipeline.from_pretrained( |
"hakurei/waifu-diffusion", custom_pipeline="lpw_stable_diffusion", torch_dtype=torch.float16 |
) |
pipe = pipe.to("cuda") |
prompt = "best_quality (1girl:1.3) bow bride brown_hair closed_mouth frilled_bow frilled_hair_tubes frills (full_body:1.3) fox_ear hair_bow hair_tubes happy hood japanese_clothes kimono long_sleeves red_bow smile solo tabi uchikake white_kimono wide_sleeves cherry_blossoms" |
neg_prompt = "lowres, bad_anatomy, error_body, error_hair, error_arm, error_hands, bad_hands, error_fingers, bad_fingers, missing_fingers, error_legs, bad_legs, multiple_legs, missing_legs, error_lighting, error_shadow, error_reflection, text, error, extra_digit, fewer_digits, cropped, worst_quality, low_quality, normal_quality, jpeg_artifacts, signature, watermark, username, blurry" |
pipe.text2img(prompt, negative_prompt=neg_prompt, width=512, height=512, max_embeddings_multiples=3).images[0] |
onnxruntime |
Copied |
from diffusers import DiffusionPipeline |
import torch |
pipe = DiffusionPipeline.from_pretrained( |
"CompVis/stable-diffusion-v1-4", |
custom_pipeline="lpw_stable_diffusion_onnx", |
revision="onnx", |
provider="CUDAExecutionProvider", |
) |
prompt = "a photo of an astronaut riding a horse on mars, best quality" |
neg_prompt = "lowres, bad anatomy, error body, error hair, error arm, error hands, bad hands, error fingers, bad fingers, missing fingers, error legs, bad legs, multiple legs, missing legs, error lighting, error shadow, error reflection, text, error, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry" |
pipe.text2img(prompt, negative_prompt=neg_prompt, width=512, height=512, max_embeddings_multiples=3).images[0] |
if you see Token indices sequence length is longer than the specified maximum sequence length for this model ( *** > 77 ) . Running this sequence through the model will result in indexing errors. Do not worry, it is normal. |
Speech to Image |
The following code can generate an image from an audio sample using pre-trained OpenAI whisper-small and Stable Diffusion. |
Copied |
import torch |
import matplotlib.pyplot as plt |
from datasets import load_dataset |
from diffusers import DiffusionPipeline |
from transformers import ( |
WhisperForConditionalGeneration, |
WhisperProcessor, |
) |
device = "cuda" if torch.cuda.is_available() else "cpu" |
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") |
audio_sample = ds[3] |
text = audio_sample["text"].lower() |
speech_data = audio_sample["audio"]["array"] |
model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-small").to(device) |
processor = WhisperProcessor.from_pretrained("openai/whisper-small") |
diffuser_pipeline = DiffusionPipeline.from_pretrained( |
"CompVis/stable-diffusion-v1-4", |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.