rupeshs commited on
Commit
1139a17
·
1 Parent(s): 7db87bf

updated model

Browse files
Files changed (1) hide show
  1. frontend/webui/hf_demo.py +7 -5
frontend/webui/hf_demo.py CHANGED
@@ -42,6 +42,8 @@ def predict(
42
  print(f"prompt - {prompt}")
43
  lcm_diffusion_setting = LCMDiffusionSetting()
44
  lcm_diffusion_setting.diffusion_task = DiffusionTask.text_to_image.value
 
 
45
  lcm_diffusion_setting.prompt = prompt
46
  lcm_diffusion_setting.guidance_scale = 1.0
47
  lcm_diffusion_setting.inference_steps = steps
@@ -51,17 +53,17 @@ def predict(
51
  lcm_diffusion_setting.use_tiny_auto_encoder = True
52
  # lcm_diffusion_setting.image_width = 320 if is_openvino_device() else 512
53
  # lcm_diffusion_setting.image_height = 320 if is_openvino_device() else 512
54
- lcm_diffusion_setting.image_width = 512
55
- lcm_diffusion_setting.image_height = 512
56
  lcm_diffusion_setting.use_openvino = True
57
- lcm_diffusion_setting.use_tiny_auto_encoder = False
58
  pprint(lcm_diffusion_setting.model_dump())
59
  lcm_text_to_image.init(lcm_diffusion_setting=lcm_diffusion_setting)
60
  start = perf_counter()
61
  images = lcm_text_to_image.generate(lcm_diffusion_setting)
62
  latency = perf_counter() - start
63
  print(f"Latency: {latency:.2f} seconds")
64
- return images[0]
65
 
66
 
67
  css = """
@@ -123,7 +125,7 @@ with gr.Blocks(css=css) as demo:
123
  with gr.Accordion("Advanced options", open=False):
124
  steps = gr.Slider(
125
  label="Steps",
126
- value=1,
127
  minimum=1,
128
  maximum=4,
129
  step=1,
 
42
  print(f"prompt - {prompt}")
43
  lcm_diffusion_setting = LCMDiffusionSetting()
44
  lcm_diffusion_setting.diffusion_task = DiffusionTask.text_to_image.value
45
+ lcm_diffusion_setting.openvino_lcm_model_id = "rupeshs/LCM-dreamshaper-v7-openvino"
46
+ lcm_diffusion_setting.use_lcm_lora = True
47
  lcm_diffusion_setting.prompt = prompt
48
  lcm_diffusion_setting.guidance_scale = 1.0
49
  lcm_diffusion_setting.inference_steps = steps
 
53
  lcm_diffusion_setting.use_tiny_auto_encoder = True
54
  # lcm_diffusion_setting.image_width = 320 if is_openvino_device() else 512
55
  # lcm_diffusion_setting.image_height = 320 if is_openvino_device() else 512
56
+ lcm_diffusion_setting.image_width = 320
57
+ lcm_diffusion_setting.image_height = 320
58
  lcm_diffusion_setting.use_openvino = True
59
+ lcm_diffusion_setting.use_tiny_auto_encoder = True
60
  pprint(lcm_diffusion_setting.model_dump())
61
  lcm_text_to_image.init(lcm_diffusion_setting=lcm_diffusion_setting)
62
  start = perf_counter()
63
  images = lcm_text_to_image.generate(lcm_diffusion_setting)
64
  latency = perf_counter() - start
65
  print(f"Latency: {latency:.2f} seconds")
66
+ return images[0].resize([512, 512], PIL.Image.ANTIALIAS)
67
 
68
 
69
  css = """
 
125
  with gr.Accordion("Advanced options", open=False):
126
  steps = gr.Slider(
127
  label="Steps",
128
+ value=3,
129
  minimum=1,
130
  maximum=4,
131
  step=1,