Spaces:
Runtime error
Runtime error
import model_loader | |
import pipeline | |
from PIL import Image | |
from transformers import CLIPTokenizer | |
import torch | |
DEVICE = "cpu" | |
ALLOW_CUDA = True | |
ALLOW_MPS = False | |
if torch.cuda.is_available() and ALLOW_CUDA: | |
DEVICE = "cuda" | |
elif (torch.has_mps or torch.backends.mps.is_available()) and ALLOW_MPS: | |
DEVICE = "mps" | |
print(f"Using device: {DEVICE}") | |
tokenizer = CLIPTokenizer(r"../data/vocab.json", merges_file="../data/merges.txt") | |
model_file = "../data/v1-5-pruned-emaonly.ckpt" | |
models = model_loader.preload_models_from_standard_weights(model_file, DEVICE) | |
## TEXT TO IMAGE | |
prompt = "A playful dog running through a field of flowers, bathed in golden sunlight." | |
uncond_prompt = "" # Also known as negative prompt | |
do_cfg = True | |
cfg_scale = 8 # min: 1, max: 14 | |
## SAMPLER | |
sampler = "ddpm" | |
num_inference_steps = 50 | |
seed = 42 | |
output_image = pipeline.generate( | |
prompt=prompt, | |
uncond_prompt=uncond_prompt, | |
input_image=None, # No input image provided | |
strength=0.5, # Strength not needed for text-to-image | |
do_cfg=do_cfg, | |
cfg_scale=cfg_scale, | |
sampler_name=sampler, | |
n_inference_steps=num_inference_steps, | |
seed=seed, | |
models=models, | |
device=DEVICE, | |
idle_device="cpu", # Idle device still set to CPU | |
tokenizer=tokenizer, | |
) | |
# Save the output image | |
output_image_path = "output_image.png" | |
Image.fromarray(output_image).save(output_image_path) | |
print("Image saved successfully at:", output_image_path) |