|
import torch |
|
from PIL.Image import Image |
|
from onediffx.deep_cache import StableDiffusionXLPipeline |
|
from pipelines.models import TextToImageRequest |
|
from torch import Generator |
|
import oneflow as flow |
|
from onediff.infer_compiler import oneflow_compile |
|
from onediffx import compile_pipe, save_pipe, load_pipe |
|
from diffusers import DDIMScheduler |
|
from loss import SchedulerWrapper |
|
|
|
def load_pipeline(pipeline=None) -> StableDiffusionXLPipeline: |
|
if not pipeline: |
|
pipeline = StableDiffusionXLPipeline.from_pretrained( |
|
"./models/newdream-sdxl-20", |
|
torch_dtype=torch.float16, |
|
local_files_only=True, |
|
) |
|
pipeline.to("cuda") |
|
pipeline.scheduler = SchedulerWrapper(DDIMScheduler.from_config(pipeline.scheduler.config)) |
|
pipeline = compile_pipe(pipeline) |
|
pipeline.unet = oneflow_compile(pipeline.unet) |
|
|
|
load_pipe(pipeline,dir="cached_pipe") |
|
for _ in range(4): |
|
deepcache_output = pipeline(prompt="make submissions great again", cache_interval=1, cache_layer_id=0, cache_block_id=0, num_inference_steps=20) |
|
pipeline.scheduler.prepare_loss() |
|
return pipeline |
|
|
|
def infer(request: TextToImageRequest, pipeline: StableDiffusionXLPipeline) -> Image: |
|
if request.seed is None: |
|
generator = None |
|
else: |
|
generator = Generator(pipeline.device).manual_seed(request.seed) |
|
|
|
return pipeline( |
|
prompt=request.prompt, |
|
negative_prompt=request.negative_prompt, |
|
width=request.width, |
|
height=request.height, |
|
generator=generator, |
|
num_inference_steps=15, |
|
cache_interval=1, |
|
cache_layer_id=0, |
|
cache_block_id=0, |
|
).images[0] |
|
|
|
|