HelloSun commited on
Commit
d80760a
1 Parent(s): 0f45713

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -11
app.py CHANGED
@@ -14,7 +14,7 @@ model_id = "OpenVINO/LCM_Dreamshaper_v7-int8-ov"
14
  #pipeline = OVLatentConsistencyModelPipeline.from_pretrained(model_id, compile=False, safety_checker=safety_checker)
15
  pipeline = OVLatentConsistencyModelPipeline.from_pretrained(model_id, compile=False)
16
 
17
- batch_size, num_images, height, width = 1, 1, 512, 512
18
  pipeline.reshape(batch_size=batch_size, height=height, width=width, num_images_per_prompt=num_images)
19
  pipeline.load_textual_inversion("./badhandv4.pt", "badhandv4")
20
  pipeline.compile()
@@ -22,17 +22,17 @@ pipeline.compile()
22
  #TypeError: LatentConsistencyPipelineMixin.__call__() got an unexpected keyword argument 'negative_prompt'
23
  #negative_prompt="easynegative,bad anatomy, bad hands, missing fingers, extra fingers, three hands, three legs, bad arms, missing legs, missing arms, poorly drawn face, bad face, fused face, cloned face, three crus, fused feet, fused thigh, extra crus, ugly fingers, horn, cartoon, cg, 3d, unreal, animate, amputation, disconnected limbs, nsfw, nude, censored, "
24
 
25
- def infer(prompt, negative_prompt, num_inference_steps):
26
 
27
  image = pipeline(
28
  prompt = prompt,
29
- negative_prompt = negative_prompt,
30
  # guidance_scale = guidance_scale,
31
  num_inference_steps = num_inference_steps,
32
  width = width,
33
  height = height,
34
  num_images_per_prompt=num_images,
35
- ).images[0]
36
 
37
  return image
38
 
@@ -72,12 +72,12 @@ with gr.Blocks(css=css) as demo:
72
  result = gr.Image(label="Result", show_label=False)
73
 
74
  with gr.Accordion("Advanced Settings", open=False):
75
- with gr.Row():
76
- negative_prompt = gr.Text(
77
- label="Negative prompt",
78
- max_lines=1,
79
- placeholder="Enter a negative prompt",
80
- )
81
 
82
  with gr.Row():
83
 
@@ -96,7 +96,7 @@ with gr.Blocks(css=css) as demo:
96
 
97
  run_button.click(
98
  fn = infer,
99
- inputs = [prompt, negative_prompt, num_inference_steps],
100
  outputs = [result]
101
  )
102
 
 
14
  #pipeline = OVLatentConsistencyModelPipeline.from_pretrained(model_id, compile=False, safety_checker=safety_checker)
15
  pipeline = OVLatentConsistencyModelPipeline.from_pretrained(model_id, compile=False)
16
 
17
+ batch_size, num_images, height, width = 1, 2, 512, 512
18
  pipeline.reshape(batch_size=batch_size, height=height, width=width, num_images_per_prompt=num_images)
19
  pipeline.load_textual_inversion("./badhandv4.pt", "badhandv4")
20
  pipeline.compile()
 
22
  #TypeError: LatentConsistencyPipelineMixin.__call__() got an unexpected keyword argument 'negative_prompt'
23
  #negative_prompt="easynegative,bad anatomy, bad hands, missing fingers, extra fingers, three hands, three legs, bad arms, missing legs, missing arms, poorly drawn face, bad face, fused face, cloned face, three crus, fused feet, fused thigh, extra crus, ugly fingers, horn, cartoon, cg, 3d, unreal, animate, amputation, disconnected limbs, nsfw, nude, censored, "
24
 
25
+ def infer(prompt, num_inference_steps):
26
 
27
  image = pipeline(
28
  prompt = prompt,
29
+ #negative_prompt = negative_prompt,
30
  # guidance_scale = guidance_scale,
31
  num_inference_steps = num_inference_steps,
32
  width = width,
33
  height = height,
34
  num_images_per_prompt=num_images,
35
+ ).images
36
 
37
  return image
38
 
 
72
  result = gr.Image(label="Result", show_label=False)
73
 
74
  with gr.Accordion("Advanced Settings", open=False):
75
+ #with gr.Row():
76
+ # negative_prompt = gr.Text(
77
+ # label="Negative prompt",
78
+ # max_lines=1,
79
+ # placeholder="Enter a negative prompt",
80
+ # )
81
 
82
  with gr.Row():
83
 
 
96
 
97
  run_button.click(
98
  fn = infer,
99
+ inputs = [prompt, num_inference_steps],
100
  outputs = [result]
101
  )
102