File size: 4,112 Bytes
7c052fc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 |
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler, AutoencoderKL
import torch
import os
#////////////////////////////////////////////////////////////////
guidance_scale=7
steps=20
width=1024
height=1024
base_model_id_str = "SDXL-xmasize-Lora-Images"
prompt_prefix = ""
prompt_suffix = " xmasize, Very detailed, clean, high quality, sharp image"
neg_prompt = "text, watermark, grainy, blurry, unfocused, nsfw, naked, nude, noisy image, deformed, distorted, pixelated"
#////////////////////////////////////////////////////////////////
base = None
refiner = None
#////////////////////////////////////////////////////////////////
def load():
global base, refiner
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
base = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
)
base.to("cuda")
base.load_lora_weights("Norod78/SDXL-xmasize-Lora")
base.enable_xformers_memory_efficient_attention()
refiner = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-refiner-1.0",
text_encoder_2=base.text_encoder_2,
vae=base.vae,
torch_dtype=torch.float16,
use_safetensors=True,
variant="fp16",
)
refiner.to("cuda")
refiner.enable_xformers_memory_efficient_attention()
def generate(prompt, file_prefix ,samples = 2, seed = 7777):
global base, refiner
torch.manual_seed(seed)
prompt = prompt_prefix + prompt
prompt += prompt_suffix
base_model_latents = base([prompt] * samples,
negative_prompt = [neg_prompt] * samples,
num_inference_steps=steps,
guidance_scale=guidance_scale,
height=height, width=width,
output_type="latent")["images"]
torch.manual_seed(seed)
refiner_model_images = refiner([prompt] * samples,
negative_prompt = [neg_prompt] * samples,
num_inference_steps=steps,
image=base_model_latents)["images"]
for idx, image in enumerate(refiner_model_images):
image.save(f"{base_model_id_str}/{file_prefix}-{idx}-{seed}--{width}x{height}--{guidance_scale}--{base_model_id_str}.jpg")
def main():
load()
os.mkdir(base_model_id_str)
generate("A livingroom", "01_LivingRoom")
generate("A nice town", "02_NiceTown")
generate("A scene in \"The Minions\" movie", "03_MinionsMovie")
generate("Wonderwoman", "04_Wonderwoman")
generate("Marge Simpson", "05_MargeSimpson")
generate("A beautiful woman", "06_BeautifulWoman")
generate("A magical landscape", "07_MagicalLandscape")
generate("Cute dog", "08_CuteDog")
generate("An oil on canvas portrait of Snoop Dogg, Mark Ryden", "09_SnoopDog")
generate("A flemish baroque painting of Kermit from the muppet show", "10_KermitFlemishBaroque")
generate("Gal Gadot in Avatar", "11_GalGadotAvatar")
generate("Ninja turtles, Naoto Hattori", "12_TMNT")
generate("A socially awkward potato", "13_AwkwardPotato")
generate("Pikachu as Rick and morty, Eric Wallis", "14_PikachuRnM")
generate("The girl with pearl earing", "15_PearlEaring")
generate("American Gothic ", "16_AmericanGothic")
generate("Miss. Piggy as the Mona Lisa", "17_MsPiggyMonaLisa")
generate("Rick Sanchez from the TV show \"Rick and Morty\"", "18_RickSanchez")
generate("A paiting of Southpark with rainbow", "19_Southpark")
generate("An oil painting of Phineas and Pherb hamering on a new machine, Eric Wallis", "20_PhineasPherb")
generate("Bender, Saturno Butto", "21_Bender")
generate("A psychedelic image of Bojack Horseman", "22_Bojack")
generate("A movie poster for Gravity Falls Cthulhu stories", "23_GravityFalls")
generate("A vibrant oil painting portrait of She-Ra", "24_Shira", 2, 512)
if __name__ == '__main__':
main() |