rupeshs commited on
Commit
3469d37
1 Parent(s): 8c39361

Added hf demo ui

Browse files
Files changed (1) hide show
  1. frontend/webui/hf_demo.py +145 -0
frontend/webui/hf_demo.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from backend.lcm_text_to_image import LCMTextToImage
3
+ from backend.models.lcmdiffusion_setting import LCMLora, LCMDiffusionSetting
4
+ from constants import DEVICE, LCM_DEFAULT_MODEL_OPENVINO
5
+ from time import perf_counter
6
+ import numpy as np
7
+ from cv2 import imencode
8
+ import base64
9
+ from backend.device import get_device_name
10
+ from constants import APP_VERSION
11
+ from backend.device import is_openvino_device
12
+
13
+ lcm_text_to_image = LCMTextToImage()
14
+ lcm_lora = LCMLora(
15
+ base_model_id="Lykon/dreamshaper-7",
16
+ lcm_lora_id="latent-consistency/lcm-lora-sdv1-5",
17
+ )
18
+
19
+
20
+ # https://github.com/gradio-app/gradio/issues/2635#issuecomment-1423531319
21
+ def encode_pil_to_base64_new(pil_image):
22
+ image_arr = np.asarray(pil_image)[:, :, ::-1]
23
+ _, byte_data = imencode(".png", image_arr)
24
+ base64_data = base64.b64encode(byte_data)
25
+ base64_string_opencv = base64_data.decode("utf-8")
26
+ return "data:image/png;base64," + base64_string_opencv
27
+
28
+
29
+ # monkey patching encode pil
30
+ gr.processing_utils.encode_pil_to_base64 = encode_pil_to_base64_new
31
+
32
+
33
+ def predict(
34
+ prompt,
35
+ steps,
36
+ seed,
37
+ ):
38
+ lcm_text_to_image.init(
39
+ model_id=LCM_DEFAULT_MODEL_OPENVINO,
40
+ use_lora=True,
41
+ lcm_lora=lcm_lora,
42
+ use_openvino=True if is_openvino_device() else False,
43
+ )
44
+
45
+ lcm_diffusion_setting = LCMDiffusionSetting()
46
+ lcm_diffusion_setting.prompt = prompt
47
+ lcm_diffusion_setting.guidance_scale = 1.0
48
+ lcm_diffusion_setting.inference_steps = steps
49
+ lcm_diffusion_setting.seed = seed
50
+ lcm_diffusion_setting.use_seed = True
51
+ lcm_diffusion_setting.image_width = 384 if is_openvino_device() else 512
52
+ lcm_diffusion_setting.image_height = 384 if is_openvino_device() else 512
53
+ lcm_diffusion_setting.use_openvino = True if is_openvino_device() else False
54
+ start = perf_counter()
55
+ images = lcm_text_to_image.generate(lcm_diffusion_setting)
56
+ latency = perf_counter() - start
57
+ print(f"Latency: {latency:.2f} seconds")
58
+ return images[0]
59
+
60
+
61
+ css = """
62
+ #container{
63
+ margin: 0 auto;
64
+ max-width: 40rem;
65
+ }
66
+ #intro{
67
+ max-width: 100%;
68
+ text-align: center;
69
+ margin: 0 auto;
70
+ }
71
+ #generate_button {
72
+ color: white;
73
+ border-color: #007bff;
74
+ background: #007bff;
75
+ width: 200px;
76
+ height: 50px;
77
+ }
78
+ footer {
79
+ visibility: hidden
80
+ }
81
+ """
82
+
83
+
84
+ def _get_footer_message() -> str:
85
+ version = f"<center><p> {APP_VERSION} "
86
+ footer_msg = version + (
87
+ ' © 2023 <a href="https://github.com/rupeshs">'
88
+ " Rupesh Sreeraman</a></p></center>"
89
+ )
90
+ return footer_msg
91
+
92
+
93
+ with gr.Blocks(css=css) as demo:
94
+ with gr.Column(elem_id="container"):
95
+ use_openvino = "- OpenVINO" if is_openvino_device() else ""
96
+ gr.Markdown(
97
+ f"""FastSD CPU demo {use_openvino}
98
+ **Device : {DEVICE.upper()} , {get_device_name()}**
99
+ """,
100
+ elem_id="intro",
101
+ )
102
+
103
+ with gr.Row():
104
+ with gr.Row():
105
+ prompt = gr.Textbox(
106
+ placeholder="Describe the image you'd like to see",
107
+ scale=5,
108
+ container=False,
109
+ )
110
+ generate_btn = gr.Button(
111
+ "Generate",
112
+ scale=1,
113
+ elem_id="generate_button",
114
+ )
115
+
116
+ image = gr.Image(type="filepath")
117
+ with gr.Accordion("Advanced options", open=False):
118
+ steps = gr.Slider(
119
+ label="Steps",
120
+ value=2 if is_openvino_device() else 3,
121
+ minimum=1,
122
+ maximum=6,
123
+ step=1,
124
+ )
125
+ seed = gr.Slider(
126
+ randomize=True,
127
+ minimum=0,
128
+ maximum=999999999,
129
+ label="Seed",
130
+ step=1,
131
+ )
132
+ gr.HTML(_get_footer_message())
133
+
134
+ inputs = [prompt, steps, seed]
135
+ prompt.input(fn=predict, inputs=inputs, show_progress=False)
136
+ generate_btn.click(
137
+ fn=predict, inputs=inputs, outputs=image, show_progress=False
138
+ )
139
+ steps.change(fn=predict, inputs=inputs, show_progress=False)
140
+ seed.change(fn=predict, inputs=inputs, show_progress=False)
141
+
142
+
143
+ def start_demo_text_to_image(share=False):
144
+ demo.queue()
145
+ demo.launch(share=share)