File size: 3,314 Bytes
ca4f7e4
 
 
74008a8
c49d49f
 
 
ca4f7e4
2b7c6f5
 
c49d49f
9691ef5
87fdcac
9691ef5
 
 
 
87fdcac
 
 
 
 
 
9691ef5
 
 
 
 
ca4f7e4
2188f5e
923b990
ad178df
 
 
2188f5e
 
9691ef5
 
 
 
c49d49f
 
 
9691ef5
e9ba573
9691ef5
e9ba573
2b7c6f5
84f01b7
 
 
 
2b7c6f5
cdcffe0
ca4f7e4
17ab53b
 
 
 
8e4e8b9
ad14b11
ca4f7e4
 
84f01b7
26fbd8a
84f01b7
2188f5e
cdcffe0
17ab53b
0ee3f65
 
ca4f7e4
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import gradio as gr
from optimum.intel.openvino import OVStableDiffusionPipeline
from diffusers.training_utils import set_seed
from diffusers import DDPMScheduler, StableDiffusionPipeline
import gc

import subprocess

import time


def create_pipeline(name):
    if name == "valhalla/sd-pokemon-model":
        scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012,
             beta_schedule="scaled_linear", num_train_timesteps=1000)
        pipe = StableDiffusionPipeline.from_pretrained(name, scheduler=scheduler)
        pipe.safety_checker = lambda images, clip_input: (images, False)
    elif name == "stable-diffusion-pokemons-valhalla-fp32":
        scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012,
             beta_schedule="scaled_linear", num_train_timesteps=1000)
        pipe = OVStableDiffusionPipeline.from_pretrained(name, compile=False, scheduler=scheduler)
        pipe.reshape(batch_size=1, height=512, width=512, num_images_per_prompt=1)
        pipe.compile()
    else:
        pipe = OVStableDiffusionPipeline.from_pretrained(name, compile=False)
        pipe.reshape(batch_size=1, height=512, width=512, num_images_per_prompt=1)
        pipe.compile()
    return pipe

pipes = {
    "Torch fp32": "valhalla/sd-pokemon-model", #"svjack/Stable-Diffusion-Pokemon-en",
    "OpenVINO fp32": "OpenVINO/stable-diffusion-pokemons-valhalla-fp32", #"OpenVINO/stable-diffusion-pokemons-fp32",
    "OpenVINO 8-bit quantized": "OpenVINO/stable-diffusion-pokemons-valhalla-quantized-agressive", #"OpenVINO/stable-diffusion-pokemons-quantized-aggressive",
    "OpenVINO merged and quantized": "OpenVINO/stable-diffusion-pokemons-valhalla-tome-quantized-agressive", #"OpenVINO/stable-diffusion-pokemons-tome-quantized-aggressive"
}

# prefetch pipelines on start
for v in pipes.values():
    pipe = create_pipeline(v)
    del pipe
    gc.collect()

print((subprocess.check_output("lscpu", shell=True).strip()).decode())

def generate(prompt, option, seed):
    pipe = create_pipeline(pipes[option])
    set_seed(int(seed))
    start_time = time.time()
    if "Torch" in option:
        output = pipe(prompt, num_inference_steps=50, output_type="pil", height=512, width=512)
    else:
        output = pipe(prompt, num_inference_steps=50, output_type="pil")
    elapsed_time = time.time() - start_time
    return (output.images[0], "{:10.4f}".format(elapsed_time))

examples = ["cartoon bird",
            "a drawing of a green pokemon with red eyes",
            "plant pokemon in jungle"]

model_options = [option for option in pipes.keys()]

gr.Interface(
    fn=generate,
    inputs=[gr.inputs.Textbox(default="cartoon bird", label="Prompt", lines=1),
            gr.inputs.Dropdown(choices=model_options, default=model_options[-1], label="Model version"),
            gr.inputs.Textbox(default="42", label="Seed", lines=1)
           ],
    outputs=[gr.outputs.Image(type="pil", label="Generated Image"), gr.outputs.Textbox(label="Inference time")],
    title="OpenVINO-optimized Stable Diffusion",
    description="This is the Optimum-based demo for NNCF-optimized Stable Diffusion pipeline trained on 'lambdalabs/pokemon-blip-captions' dataset and running with OpenVINO.\n"
                 "The pipeline is run using 8 vCPUs (4 cores) only.",
    theme="huggingface",
).launch()