SD_txt2img / model.py
sdart's picture
update
5852deb
from __future__ import annotations
from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler
from diffusers import DPMSolverMultistepScheduler
import torch
import PIL.Image
import numpy as np
import datetime
# Check environment
print(f"Is CUDA available: {torch.cuda.is_available()}")
# True
print(f"CUDA device: {torch.cuda.get_device_name(torch.cuda.current_device())}")
# Tesla T4
device = "cuda"
class Model:
def __init__(self, modelID):
#modelID = "runwayml/stable-diffusion-v1-5"
self.modelID = modelID
self.pipe = StableDiffusionPipeline.from_pretrained(modelID, torch_dtype=torch.float16)
self.pipe = self.pipe.to(device)
self.pipe.scheduler = DPMSolverMultistepScheduler.from_config(self.pipe.scheduler.config)
self.pipe.enable_xformers_memory_efficient_attention()
#self.pipe = StableDiffusionPipeline.from_pretrained(modelID)
#prompt = "a photo of an astronaut riding a horse on mars"
#n_prompt = "deformed, disfigured"
def process(self,
prompt: str,
negative_prompt: str,
guidance_scale:int = 7,
num_images:int = 1,
num_steps:int = 20,
):
seed = np.random.randint(0, np.iinfo(np.int32).max)
generator = torch.Generator(device).manual_seed(seed)
now = datetime.datetime.now()
print(now)
print(self.modelID)
print(prompt)
print(negative_prompt)
with torch.inference_mode():
images = self.pipe(prompt=prompt,
negative_prompt=negative_prompt,
guidance_scale=guidance_scale,
num_images_per_prompt=num_images,
num_inference_steps=num_steps,
generator=generator).images
return images
# image = pipeline(prompt=prompt,
# negative_prompt = n_prompt,
# num_inference_steps = 2,
# guidance_scale = 7).images