Spaces:
Sleeping
Sleeping
File size: 2,945 Bytes
e73f9c3 19cfd55 e73f9c3 19cfd55 e73f9c3 19cfd55 e73f9c3 19cfd55 e73f9c3 19cfd55 e73f9c3 19cfd55 e73f9c3 19cfd55 e73f9c3 19cfd55 e73f9c3 19cfd55 e73f9c3 19cfd55 e73f9c3 19cfd55 e73f9c3 19cfd55 e73f9c3 19cfd55 e73f9c3 19cfd55 e73f9c3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 |
import gradio as gr
import numpy as np
from optimum.intel import OVStableDiffusionPipeline, OVStableDiffusionXLPipeline, OVLatentConsistencyModelPipeline
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
# model_id = "echarlaix/sdxl-turbo-openvino-int8"
# model_id = "echarlaix/LCM_Dreamshaper_v7-openvino"
#safety_checker = StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-safety-checker")
model_id = "OpenVINO/LCM_Dreamshaper_v7-int8-ov"
#pipeline = OVLatentConsistencyModelPipeline.from_pretrained(model_id, compile=False, safety_checker=safety_checker)
pipeline = OVLatentConsistencyModelPipeline.from_pretrained(model_id, compile=False)
batch_size, num_images, height, width = 1, 1, 512, 512
pipeline.reshape(batch_size=batch_size, height=height, width=width, num_images_per_prompt=num_images)
pipeline.compile()
def infer(prompt, num_inference_steps):
image = pipeline(
prompt = prompt,
negative_prompt = negative_prompt,
# guidance_scale = guidance_scale,
num_inference_steps = num_inference_steps,
width = width,
height = height,
num_images_per_prompt=num_images,
).images[0]
return image
examples = [
"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
"An astronaut riding a green horse",
"A delicious ceviche cheesecake slice",
]
css="""
#col-container {
margin: 0 auto;
max-width: 520px;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(f"""
# Demo : [Fast LCM](https://huggingface.co/OpenVINO/LCM_Dreamshaper_v7-int8-ov) quantized with NNCF ⚡
""")
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
container=False,
)
run_button = gr.Button("Run", scale=0)
result = gr.Image(label="Result", show_label=False)
with gr.Accordion("Advanced Settings", open=False):
negative_prompt = gr.Text(
label="Negative prompt",
max_lines=1,
placeholder="Enter a negative prompt",
visible=True,
)
with gr.Row():
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=10,
step=1,
value=5,
)
gr.Examples(
examples = examples,
inputs = [prompt]
)
run_button.click(
fn = infer,
inputs = [prompt, num_inference_steps],
outputs = [result]
)
demo.queue().launch() |