File size: 3,143 Bytes
2acb9f2 6421583 2acb9f2 7f2323b 2acb9f2 9478dd2 2acb9f2 71da11e 9478dd2 71da11e 811d1c6 6c27fdd 7f2323b 6c27fdd f985b1c 71da11e 811d1c6 6c27fdd 9478dd2 04896b2 f985b1c 04896b2 71da11e 04896b2 f985b1c 04896b2 f985b1c 04896b2 f985b1c 04896b2 f985b1c 04896b2 f985b1c 2acb9f2 a67f088 6421583 04896b2 6421583 71da11e 6421583 2acb9f2 04896b2 6421583 2acb9f2 04896b2 a67f088 04896b2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 |
#!/usr/bin/env python3
from diffusers import DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionPipeline, KDPM2DiscreteScheduler, StableDiffusionImg2ImgPipeline, HeunDiscreteScheduler, KDPM2AncestralDiscreteScheduler, DDIMScheduler
from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline, AutoencoderKL
import time
from pytorch_lightning import seed_everything
import os
from huggingface_hub import HfApi
# from compel import Compel
import torch
import sys
from pathlib import Path
import requests
from PIL import Image
from io import BytesIO
import xformers
api = HfApi()
start_time = time.time()
# use_refiner = bool(int(sys.argv[1]))
use_refiner = False
use_diffusers = True
path = "stabilityai/stable-diffusion-xl-base-1.0"
refiner_path = "stabilityai/stable-diffusion-xl-refiner-1.0"
vae_path = "madebyollin/sdxl-vae-fp16-fix"
vae = AutoencoderKL.from_pretrained(vae_path, torch_dtype=torch.float16)
if use_diffusers:
# pipe = StableDiffusionXLPipeline.from_pretrained(path, vae=vae, torch_dtype=torch.float16, variant="fp16", use_safetensors=True, local_files_only=True)
pipe = StableDiffusionXLPipeline.from_pretrained(path, torch_dtype=torch.float16, vae=vae, variant="fp16", use_safetensors=True, local_files_only=True, add_watermarker=False)
import ipdb; ipdb.set_trace()
# pipe.enable_xformers_memory_efficient_attention()
print(time.time() - start_time)
pipe.to("cuda")
if use_refiner:
start_time = time.time()
refiner = StableDiffusionXLImg2ImgPipeline.from_pretrained(refiner_path, vae=vae, torch_dtype=torch.float16, use_safetensors=True, variant="fp16")
print(time.time() - start_time)
refiner.to("cuda")
# refiner.enable_sequential_cpu_offload()
else:
start_time = time.time()
pipe = StableDiffusionXLPipeline.from_single_file("https://huggingface.co/stabilityai/stable-diffusion-xl-base-0.9/blob/main/sd_xl_base_0.9.safetensors", torch_dtype=torch.float16, use_safetensors=True)
print(time.time() - start_time)
pipe.to("cuda")
if use_refiner:
start_time = time.time()
refiner = StableDiffusionXLImg2ImgPipeline.from_single_file("https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-0.9/blob/main/sd_xl_refiner_0.9.safetensors", torch_dtype=torch.float16, use_safetensors=True)
print(time.time() - start_time)
refiner.to("cuda")
prompt = "An astronaut riding a green horse on Mars"
steps = 20
seed = 0
seed_everything(seed)
start_time = time.time()
image = pipe(prompt=prompt, num_inference_steps=steps, output_type="latent" if use_refiner else "pil").images[0]
print(time.time() - start_time)
if use_refiner:
image = refiner(prompt=prompt, num_inference_steps=steps - 10, image=image).images[0]
file_name = f"aaa"
path = os.path.join(Path.home(), "images", "ediffi_sdxl", f"{file_name}.png")
image.save(path)
api.upload_file(
path_or_fileobj=path,
path_in_repo=path.split("/")[-1],
repo_id="patrickvonplaten/images",
repo_type="dataset",
)
print(f"https://huggingface.co/datasets/patrickvonplaten/images/blob/main/{file_name}.png")
|