|
import gradio as gr |
|
import numpy as np |
|
from optimum.intel import OVStableDiffusionPipeline, OVStableDiffusionXLPipeline, OVLatentConsistencyModelPipeline |
|
from optimum.intel.openvino.modeling_diffusion import OVModelVaeDecoder, OVBaseModel |
|
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker |
|
from diffusers import DiffusionPipeline |
|
from diffusers.schedulers import EulerDiscreteScheduler |
|
import openvino.runtime as ov |
|
from typing import Optional, Dict |
|
from huggingface_hub import snapshot_download |
|
|
|
|
|
|
|
model_id = "OpenVINO/LCM_Dreamshaper_v7-int8-ov" |
|
|
|
|
|
|
|
|
|
|
|
pipeline = OVLatentConsistencyModelPipeline.from_pretrained(model_id, compile=False) |
|
|
|
batch_size, num_images, height, width = 1, 1, 1024, 512 |
|
|
|
|
|
class CustomOVModelVaeDecoder(OVModelVaeDecoder): |
|
def __init__( |
|
self, model: ov.Model, parent_model: OVBaseModel, ov_config: Optional[Dict[str, str]] = None, model_dir: str = None, |
|
): |
|
super(OVModelVaeDecoder, self).__init__(model, parent_model, ov_config, "vae_decoder", model_dir) |
|
|
|
|
|
pipeline = OVStableDiffusionPipeline.from_pretrained(model_id, compile = False, ov_config = {"CACHE_DIR":""}) |
|
|
|
taesd_dir = snapshot_download(repo_id="deinferno/taesd-openvino") |
|
pipeline.vae_decoder = CustomOVModelVaeDecoder(model = OVBaseModel.load_model(f"{taesd_dir}/vae_decoder/openvino_model.xml"), parent_model = pipeline, model_dir = taesd_dir) |
|
|
|
|
|
pipeline.reshape(batch_size=batch_size, height=height, width=width, num_images_per_prompt=num_images) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pipeline.compile() |
|
|
|
|
|
|
|
|
|
def infer(prompt, num_inference_steps): |
|
|
|
image = pipeline( |
|
prompt = prompt, |
|
|
|
guidance_scale = 7.0, |
|
num_inference_steps = num_inference_steps, |
|
width = width, |
|
height = height, |
|
num_images_per_prompt=num_images, |
|
).images[0] |
|
|
|
return image |
|
|
|
examples = [ |
|
"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", |
|
"An astronaut riding a green horse", |
|
"A delicious ceviche cheesecake slice", |
|
] |
|
|
|
css=""" |
|
#col-container { |
|
margin: 0 auto; |
|
max-width: 520px; |
|
} |
|
""" |
|
|
|
|
|
with gr.Blocks(css=css) as demo: |
|
|
|
with gr.Column(elem_id="col-container"): |
|
gr.Markdown(f""" |
|
# Demo : [Fast LCM](https://huggingface.co/OpenVINO/LCM_Dreamshaper_v7-int8-ov) quantized with NNCF ⚡ |
|
""") |
|
|
|
with gr.Row(): |
|
|
|
prompt = gr.Text( |
|
label="Prompt", |
|
show_label=False, |
|
max_lines=1, |
|
placeholder="Enter your prompt", |
|
container=False, |
|
) |
|
|
|
run_button = gr.Button("Run", scale=0) |
|
|
|
result = gr.Image(label="Result", show_label=False) |
|
|
|
with gr.Accordion("Advanced Settings", open=False): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Row(): |
|
|
|
num_inference_steps = gr.Slider( |
|
label="Number of inference steps", |
|
minimum=1, |
|
maximum=10, |
|
step=1, |
|
value=5, |
|
) |
|
|
|
gr.Examples( |
|
examples = examples, |
|
inputs = [prompt] |
|
) |
|
|
|
run_button.click( |
|
fn = infer, |
|
inputs = [prompt, num_inference_steps], |
|
outputs = [result] |
|
) |
|
|
|
demo.queue().launch(share=True) |