File size: 3,570 Bytes
e73f9c3 19cfd55 e7f40fd 3d65110 48c5907 1884209 e73f9c3 19cfd55 e73f9c3 0a83ea8 19cfd55 d5bd739 19cfd55 6d32913 19cfd55 258eed8 82fd44d 0f45713 8bb7ab1 19cfd55 6c29290 18b3b5c 8bb7ab1 19cfd55 e73f9c3 19cfd55 edb9ac5 e73f9c3 19cfd55 e73f9c3 19cfd55 e73f9c3 8bb7ab1 e73f9c3 19cfd55 e73f9c3 19cfd55 e73f9c3 8bb7ab1 e73f9c3 e65963a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 |
import gradio as gr
import numpy as np
from optimum.intel import OVStableDiffusionPipeline, OVStableDiffusionXLPipeline, OVLatentConsistencyModelPipeline
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
from diffusers import DiffusionPipeline
#model_id = "echarlaix/sdxl-turbo-openvino-int8"
#model_id = "echarlaix/LCM_Dreamshaper_v7-openvino"
model_id = "OpenVINO/LCM_Dreamshaper_v7-int8-ov"
#safety_checker = StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-safety-checker")
#pipeline = OVLatentConsistencyModelPipeline.from_pretrained(model_id, compile=False, safety_checker=safety_checker)
pipeline = OVLatentConsistencyModelPipeline.from_pretrained(model_id, compile=False)
batch_size, num_images, height, width = 1, 1, 1024, 512
pipeline.reshape(batch_size=batch_size, height=height, width=width, num_images_per_prompt=num_images)
#pipeline.load_textual_inversion("./badhandv4.pt", "badhandv4")
#hiten1
pipeline.load_textual_inversion("./hiten1.pt", "hiten1")
pipeline.compile()
#TypeError: LatentConsistencyPipelineMixin.__call__() got an unexpected keyword argument 'negative_prompt'
negative_prompt="easynegative,bad anatomy, bad hands, missing fingers, extra fingers, three hands, three legs, bad arms, missing legs, missing arms, poorly drawn face, bad face, fused face, cloned face, three crus, fused feet, fused thigh, extra crus, ugly fingers, horn, cartoon, cg, 3d, unreal, animate, amputation, disconnected limbs, nsfw, nude, censored, "
def infer(prompt, num_inference_steps):
image = pipeline(
prompt = prompt,
#negative_prompt = negative_prompt,
guidance_scale = 7.0,
num_inference_steps = num_inference_steps,
width = width,
height = height,
num_images_per_prompt=num_images,
).images[0]
return image
examples = [
"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
"An astronaut riding a green horse",
"A delicious ceviche cheesecake slice",
]
css="""
#col-container {
margin: 0 auto;
max-width: 520px;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(f"""
# Demo : [Fast LCM](https://huggingface.co/OpenVINO/LCM_Dreamshaper_v7-int8-ov) quantized with NNCF ⚡
""")
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
container=False,
)
run_button = gr.Button("Run", scale=0)
result = gr.Image(label="Result", show_label=False)
with gr.Accordion("Advanced Settings", open=False):
#with gr.Row():
# negative_prompt = gr.Text(
# label="Negative prompt",
# max_lines=1,
# placeholder="Enter a negative prompt",
# )
with gr.Row():
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=10,
step=1,
value=5,
)
gr.Examples(
examples = examples,
inputs = [prompt]
)
run_button.click(
fn = infer,
inputs = [prompt, num_inference_steps],
outputs = [result]
)
demo.queue().launch(share=True) |