|
from __future__ import annotations |
|
|
|
|
|
from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler |
|
from diffusers import DPMSolverMultistepScheduler |
|
import torch |
|
import PIL.Image |
|
import numpy as np |
|
import datetime |
|
|
|
|
|
print(f"Is CUDA available: {torch.cuda.is_available()}") |
|
|
|
print(f"CUDA device: {torch.cuda.get_device_name(torch.cuda.current_device())}") |
|
|
|
|
|
|
|
device = "cuda" |
|
|
|
|
|
|
|
class Model: |
|
def __init__(self, modelID): |
|
|
|
|
|
self.modelID = modelID |
|
self.pipe = StableDiffusionPipeline.from_pretrained(modelID, torch_dtype=torch.float16) |
|
self.pipe = self.pipe.to(device) |
|
self.pipe.scheduler = DPMSolverMultistepScheduler.from_config(self.pipe.scheduler.config) |
|
self.pipe.enable_xformers_memory_efficient_attention() |
|
|
|
|
|
|
|
|
|
|
|
def process(self, |
|
prompt: str, |
|
negative_prompt: str, |
|
guidance_scale:int = 7, |
|
num_images:int = 1, |
|
num_steps:int = 20, |
|
): |
|
seed = np.random.randint(0, np.iinfo(np.int32).max) |
|
generator = torch.Generator(device).manual_seed(seed) |
|
now = datetime.datetime.now() |
|
print(now) |
|
print(self.modelID) |
|
print(prompt) |
|
print(negative_prompt) |
|
with torch.inference_mode(): |
|
images = self.pipe(prompt=prompt, |
|
negative_prompt=negative_prompt, |
|
guidance_scale=guidance_scale, |
|
num_images_per_prompt=num_images, |
|
num_inference_steps=num_steps, |
|
generator=generator).images |
|
|
|
return images |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|