edge_max-v4 / src /pipeline.py
rak-r05's picture
Optimization
694cec7
import torch
from PIL.Image import Image
from diffusers import StableDiffusionXLPipeline
from pipelines.models import TextToImageRequest
from torch import Generator
from DeepCache import DeepCacheSDHelper
'''
def load_pipeline() -> StableDiffusionXLPipeline:
pipeline = StableDiffusionXLPipeline.from_pretrained(
"stablediffusionapi/newdream-sdxl-20",
torch_dtype=torch.float16,
local_files_only=True,
).to("cuda")
pipeline(prompt="")
return pipeline
def infer(request: TextToImageRequest, pipeline: StableDiffusionXLPipeline) -> Image:
if request.seed is None:
generator = None
else:
generator = Generator(pipeline.device).manual_seed(request.seed)
return pipeline(
prompt=request.prompt,
negative_prompt=request.negative_prompt,
width=request.width,
height=request.height,
generator=generator,
num_inference_steps=20,
).images[0]
'''
def load_pipeline() -> dict: #StableDiffusionXLPipeline, AutoPipelineForImage2Image:
pipeline_dict = {}
pipeline = StableDiffusionXLPipeline.from_pretrained(
"stablediffusionapi/newdream-sdxl-20",
torch_dtype=torch.float16,
#local_files_only=True,
use_safetensors=True,
variant='fp16',
).to("cuda")
refiner = AutoPipelineForImage2Image.from_pretrained(
'stabilityai/stable-diffusion-xl-refiner-1.0',
use_safetensors=True,
torch_dtype=torch.float16,
variant='fp16',
).to('cuda')
helper = DeepCacheSDHelper(pipe=pipeline)
helper.set_params(cache_interval=3, cache_branch_id=0)
helper.enable()
refiner_helper = DeepCacheSDHelper(pipe=refiner)
refiner_helper.set_params(cache_interval=3, cache_branch_id=0)
refiner_helper.enable()
for _ in range(5):
pipeline(prompt="")
pipeline_dict = {
'base_pipeline': pipeline,
'refiner': refiner
}
return pipeline_dict #base_pipeline, refiner
def infer(request: TextToImageRequest, pipeline_dict: dict) -> Image: #pipeline: StableDiffusionXLPipeline, refiner: AutoPipelineForImage2Image) -> Image:
if request.seed is None:
generator = None
else:
generator = Generator(pipeline_dict['base_pipeline'].device).manual_seed(request.seed)
image = pipeline_dict['base_pipeline'](
prompt=request.prompt,
negative_prompt=request.negative_prompt,
width=request.width,
height=request.height,
generator=generator,
num_inference_steps=27,
denoising_end=0.8,
output_type='latent',
).images
return pipeline_dict['refiner'](
prompt=request.prompt,
negative_prompt=request.negative_prompt,
width=request.width,
height=request.height,
generator=generator,
num_inference_steps=27,
denoising_start=0.8,
image=image,
).images[0]