ysharma HF staff commited on
Commit
f5929dc
β€’
1 Parent(s): f31b35e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -28,7 +28,7 @@ def inference(input_img, num_inference_steps, guidance_scale, seed ):
28
  #result = pipeline(cond, num_inference_steps=75).images[0]
29
  result = pipeline(cond, num_inference_steps=num_inference_steps,
30
  guidance_scale=guidance_scale,
31
- generator=torch.Generator(pipeline.device).manual_seed(seed)).images[0]
32
 
33
  # for general real and synthetic images of general objects
34
  # usually it is enough to have around 28 inference steps
@@ -49,7 +49,7 @@ import gradio as gr
49
  with gr.Blocks() as demo:
50
  gr.Markdown("<h1><center> Zero123++ Demo</center></h1>")
51
  with gr.Column():
52
- input_img = gr.Image(label='Input Image', tyoe='filepath')
53
  with gr.Column():
54
  output_img = gr.Image(label='Zero123++ Output')
55
  with gr.Accordion("Advanced options:", open=False):
@@ -73,7 +73,7 @@ with gr.Blocks() as demo:
73
  ],
74
  inputs=[input_img, num_inference_steps, guidance_scale, seed],
75
  outputs=output_img,
76
- fn=dummy,
77
  cache_examples=True,
78
  )
79
 
 
28
  #result = pipeline(cond, num_inference_steps=75).images[0]
29
  result = pipeline(cond, num_inference_steps=num_inference_steps,
30
  guidance_scale=guidance_scale,
31
+ generator=torch.Generator(pipeline.device).manual_seed(int(seed))).images[0]
32
 
33
  # for general real and synthetic images of general objects
34
  # usually it is enough to have around 28 inference steps
 
49
  with gr.Blocks() as demo:
50
  gr.Markdown("<h1><center> Zero123++ Demo</center></h1>")
51
  with gr.Column():
52
+ input_img = gr.Image(label='Input Image', type='filepath')
53
  with gr.Column():
54
  output_img = gr.Image(label='Zero123++ Output')
55
  with gr.Accordion("Advanced options:", open=False):
 
73
  ],
74
  inputs=[input_img, num_inference_steps, guidance_scale, seed],
75
  outputs=output_img,
76
+ fn=inference,
77
  cache_examples=True,
78
  )
79