File size: 2,965 Bytes
76af8c0 694cec7 76af8c0 694cec7 76af8c0 694cec7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 |
import torch
from PIL.Image import Image
from diffusers import StableDiffusionXLPipeline
from pipelines.models import TextToImageRequest
from torch import Generator
from DeepCache import DeepCacheSDHelper
'''
def load_pipeline() -> StableDiffusionXLPipeline:
pipeline = StableDiffusionXLPipeline.from_pretrained(
"stablediffusionapi/newdream-sdxl-20",
torch_dtype=torch.float16,
local_files_only=True,
).to("cuda")
pipeline(prompt="")
return pipeline
def infer(request: TextToImageRequest, pipeline: StableDiffusionXLPipeline) -> Image:
if request.seed is None:
generator = None
else:
generator = Generator(pipeline.device).manual_seed(request.seed)
return pipeline(
prompt=request.prompt,
negative_prompt=request.negative_prompt,
width=request.width,
height=request.height,
generator=generator,
num_inference_steps=20,
).images[0]
'''
def load_pipeline() -> dict: #StableDiffusionXLPipeline, AutoPipelineForImage2Image:
pipeline_dict = {}
pipeline = StableDiffusionXLPipeline.from_pretrained(
"stablediffusionapi/newdream-sdxl-20",
torch_dtype=torch.float16,
#local_files_only=True,
use_safetensors=True,
variant='fp16',
).to("cuda")
refiner = AutoPipelineForImage2Image.from_pretrained(
'stabilityai/stable-diffusion-xl-refiner-1.0',
use_safetensors=True,
torch_dtype=torch.float16,
variant='fp16',
).to('cuda')
helper = DeepCacheSDHelper(pipe=pipeline)
helper.set_params(cache_interval=3, cache_branch_id=0)
helper.enable()
refiner_helper = DeepCacheSDHelper(pipe=refiner)
refiner_helper.set_params(cache_interval=3, cache_branch_id=0)
refiner_helper.enable()
for _ in range(5):
pipeline(prompt="")
pipeline_dict = {
'base_pipeline': pipeline,
'refiner': refiner
}
return pipeline_dict #base_pipeline, refiner
def infer(request: TextToImageRequest, pipeline_dict: dict) -> Image: #pipeline: StableDiffusionXLPipeline, refiner: AutoPipelineForImage2Image) -> Image:
if request.seed is None:
generator = None
else:
generator = Generator(pipeline_dict['base_pipeline'].device).manual_seed(request.seed)
image = pipeline_dict['base_pipeline'](
prompt=request.prompt,
negative_prompt=request.negative_prompt,
width=request.width,
height=request.height,
generator=generator,
num_inference_steps=27,
denoising_end=0.8,
output_type='latent',
).images
return pipeline_dict['refiner'](
prompt=request.prompt,
negative_prompt=request.negative_prompt,
width=request.width,
height=request.height,
generator=generator,
num_inference_steps=27,
denoising_start=0.8,
image=image,
).images[0]
|