HelloSun commited on
Commit
200226c
·
verified ·
1 Parent(s): 772cf94

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -87
app.py CHANGED
@@ -1,58 +1,18 @@
1
  import gradio as gr
2
- import numpy as np
3
- from optimum.intel import OVStableDiffusionPipeline, OVStableDiffusionXLPipeline, OVLatentConsistencyModelPipeline
4
- from optimum.intel.openvino.modeling_diffusion import OVModelVaeDecoder, OVBaseModel
5
- from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
6
- from diffusers import DiffusionPipeline
7
- from diffusers.schedulers import EulerDiscreteScheduler
8
- import openvino.runtime as ov
9
- from typing import Optional, Dict
10
- from huggingface_hub import snapshot_download
11
 
12
- #model_id = "echarlaix/sdxl-turbo-openvino-int8"
13
- #model_id = "echarlaix/LCM_Dreamshaper_v7-openvino"
14
- #model_id = "OpenVINO/LCM_Dreamshaper_v7-int8-ov"
15
  model_id = "yujiepan/dreamshaper-8-lcm-openvino-w8a8"
16
-
17
- #safety_checker = StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-safety-checker")
18
-
19
-
20
- #pipeline = OVLatentConsistencyModelPipeline.from_pretrained(model_id, compile=False, safety_checker=safety_checker)
21
- #pipeline = OVLatentConsistencyModelPipeline.from_pretrained(model_id, compile=False, device='CPU',)
22
- pipeline = OVStableDiffusionPipeline.from_pretrained(model_id, device='CPU',)
23
- #batch_size, num_images, height, width = 1, 1, 512, 512
24
- #pipeline.reshape(batch_size=batch_size, height=height, width=width, num_images_per_prompt=num_images)
25
-
26
- #不可用lora
27
- #pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel")
28
- #pipeline.set_adapters("pixel")
29
-
30
-
31
- # 选择采样方法(调度器) 可以新增但是跑就死
32
- #scheduler = EulerDiscreteScheduler()
33
- #pipeline.scheduler = scheduler
34
-
35
- #badhandv4
36
- #pipeline.load_textual_inversion("./badhandv4.pt", "badhandv4")
37
- #hiten1
38
- #pipeline.load_textual_inversion("./hiten1.pt", "hiten1")
39
- #pipeline.compile()
40
-
41
- #TypeError: LatentConsistencyPipelineMixin.__call__() got an unexpected keyword argument 'negative_prompt'
42
- #negative_prompt="easynegative,bad anatomy, bad hands, missing fingers, extra fingers, three hands, three legs, bad arms, missing legs, missing arms, poorly drawn face, bad face, fused face, cloned face, three crus, fused feet, fused thigh, extra crus, ugly fingers, horn, cartoon, cg, 3d, unreal, animate, amputation, disconnected limbs, nsfw, nude, censored, "
43
 
44
  def infer(prompt, num_inference_steps):
45
-
46
  image = pipeline(
47
- prompt = prompt,
48
- #negative_prompt = negative_prompt,
49
- guidance_scale = 1.0,
50
- num_inference_steps = num_inference_steps,
51
- width = 512,
52
- height = 512,
53
  num_images_per_prompt=1,
54
  ).images[0]
55
-
56
  return image
57
 
58
  examples = [
@@ -61,62 +21,45 @@ examples = [
61
  "A delicious ceviche cheesecake slice",
62
  ]
63
 
64
- css="""
65
  #col-container {
66
  margin: 0 auto;
67
  max-width: 520px;
68
  }
69
  """
70
 
71
-
72
  with gr.Blocks(css=css) as demo:
73
-
74
  with gr.Column(elem_id="col-container"):
75
- gr.Markdown(f"""
76
- # Demo : yujiepan/dreamshaper-8-lcm-openvino-w8a8 ⚡
77
- """)
78
 
79
- with gr.Row():
80
-
81
- prompt = gr.Text(
82
- label="Prompt",
83
- show_label=False,
84
- max_lines=1,
85
- placeholder="Enter your prompt",
86
- container=False,
87
- )
88
-
89
- run_button = gr.Button("Run", scale=0)
90
 
 
91
  result = gr.Image(label="Result", show_label=False)
92
 
93
  with gr.Accordion("Advanced Settings", open=False):
94
- #with gr.Row():
95
- # negative_prompt = gr.Text(
96
- # label="Negative prompt",
97
- # max_lines=1,
98
- # placeholder="Enter a negative prompt",
99
- # )
100
-
101
- with gr.Row():
102
-
103
- num_inference_steps = gr.Slider(
104
- label="Number of inference steps",
105
- minimum=1,
106
- maximum=50,
107
- step=1,
108
- value=8,
109
- )
110
-
111
  gr.Examples(
112
- examples = examples,
113
- inputs = [prompt]
114
  )
115
 
116
  run_button.click(
117
- fn = infer,
118
- inputs = [prompt, num_inference_steps],
119
- outputs = [result]
120
  )
121
 
122
- demo.queue().launch(share=True)
 
1
  import gradio as gr
2
+ from optimum.intel import OVStableDiffusionPipeline
 
 
 
 
 
 
 
 
3
 
 
 
 
4
  model_id = "yujiepan/dreamshaper-8-lcm-openvino-w8a8"
5
+ pipeline = OVStableDiffusionPipeline.from_pretrained(model_id, device='CPU')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
  def infer(prompt, num_inference_steps):
 
8
  image = pipeline(
9
+ prompt=prompt,
10
+ guidance_scale=1.0,
11
+ num_inference_steps=num_inference_steps,
12
+ width=512,
13
+ height=512,
 
14
  num_images_per_prompt=1,
15
  ).images[0]
 
16
  return image
17
 
18
  examples = [
 
21
  "A delicious ceviche cheesecake slice",
22
  ]
23
 
24
+ css = """
25
  #col-container {
26
  margin: 0 auto;
27
  max-width: 520px;
28
  }
29
  """
30
 
 
31
  with gr.Blocks(css=css) as demo:
 
32
  with gr.Column(elem_id="col-container"):
33
+ gr.Markdown("# Demo : yujiepan/dreamshaper-8-lcm-openvino-w8a8 ⚡")
 
 
34
 
35
+ prompt = gr.Text(
36
+ label="Prompt",
37
+ show_label=False,
38
+ placeholder="Enter your prompt",
39
+ container=False,
40
+ )
 
 
 
 
 
41
 
42
+ run_button = gr.Button("Run", scale=0)
43
  result = gr.Image(label="Result", show_label=False)
44
 
45
  with gr.Accordion("Advanced Settings", open=False):
46
+ num_inference_steps = gr.Slider(
47
+ label="Number of inference steps",
48
+ minimum=1,
49
+ maximum=50,
50
+ step=1,
51
+ value=8,
52
+ )
53
+
 
 
 
 
 
 
 
 
 
54
  gr.Examples(
55
+ examples=examples,
56
+ inputs=[prompt]
57
  )
58
 
59
  run_button.click(
60
+ fn=infer,
61
+ inputs=[prompt, num_inference_steps],
62
+ outputs=[result]
63
  )
64
 
65
+ demo.queue().launch(share=True)