fffiloni commited on
Commit
41b5a1b
β€’
1 Parent(s): 56173d6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -3
app.py CHANGED
@@ -8,17 +8,20 @@ token = os.environ.get('HF_TOKEN')
8
  caption = gr.Blocks.load(name="spaces/SRDdev/Image-Caption")
9
  audio_gen = gr.Blocks.load(name="spaces/fffiloni/audioldm-text-to-audio-generation-clone", api_key=token)
10
 
 
11
  def infer(image_input, manual_caption, duration_in):
12
  print(duration_in)
13
  if manual_caption == "":
14
  cap = caption(image_input, fn_index=0)
15
  print("gpt2 caption: " + cap)
 
16
  else:
17
  cap = manual_caption
18
  print("manual captiony: " + cap)
 
19
  sound = audio_gen(cap, duration_in, 2.5, 45, 3, fn_index=0)
20
 
21
- return cap, sound[1], gr.Group.update(visible=True)
22
 
23
  title = """
24
  <div style="text-align: center; max-width: 700px; margin: 0 auto;">
@@ -76,7 +79,7 @@ with gr.Blocks(css="style.css") as demo:
76
  gr.HTML(title)
77
 
78
  input_img = gr.Image(type="filepath", elem_id="input-img")
79
- manual_cap = gr.Textbox(label="Manual Image description (optional)", lines=2, placeholder="If you're not happy with sound result, you can manually describe the scene depicted in your image :)")
80
  duration_in = gr.Slider(minimum=5, maximum=30, step=5, value=10, label="Duration")
81
  caption_output = gr.Textbox(label="Caption", visible=False, elem_id="text-caption")
82
  sound_output = gr.Audio(label="Result", elem_id="sound-output")
@@ -90,7 +93,7 @@ with gr.Blocks(css="style.css") as demo:
90
 
91
  gr.HTML(article)
92
 
93
- generate.click(infer, inputs=[input_img, manual_cap, duration_in], outputs=[caption_output, sound_output, share_group], api_name="i2fx")
94
  share_button.click(None, [], [], _js=share_js)
95
 
96
  demo.queue(max_size=32).launch(debug=True)
 
8
  caption = gr.Blocks.load(name="spaces/SRDdev/Image-Caption")
9
  audio_gen = gr.Blocks.load(name="spaces/fffiloni/audioldm-text-to-audio-generation-clone", api_key=token)
10
 
11
+ ph_message="If you're not happy with sound result, you can manually describe the scene depicted in your image :)"
12
  def infer(image_input, manual_caption, duration_in):
13
  print(duration_in)
14
  if manual_caption == "":
15
  cap = caption(image_input, fn_index=0)
16
  print("gpt2 caption: " + cap)
17
+ ph_update = "GP2 Caption: " + cap
18
  else:
19
  cap = manual_caption
20
  print("manual captiony: " + cap)
21
+ ph_update=""
22
  sound = audio_gen(cap, duration_in, 2.5, 45, 3, fn_index=0)
23
 
24
+ return cap, sound[1], gr.Textbox.update(placeholder=f"{ph_update} {ph_message}"), gr.Group.update(visible=True)
25
 
26
  title = """
27
  <div style="text-align: center; max-width: 700px; margin: 0 auto;">
 
79
  gr.HTML(title)
80
 
81
  input_img = gr.Image(type="filepath", elem_id="input-img")
82
+ manual_cap = gr.Textbox(label="Manual Image description (optional)", lines=2, placeholder=ph_message)
83
  duration_in = gr.Slider(minimum=5, maximum=30, step=5, value=10, label="Duration")
84
  caption_output = gr.Textbox(label="Caption", visible=False, elem_id="text-caption")
85
  sound_output = gr.Audio(label="Result", elem_id="sound-output")
 
93
 
94
  gr.HTML(article)
95
 
96
+ generate.click(infer, inputs=[input_img, manual_cap, duration_in], outputs=[caption_output, sound_output, manual_cap, share_group], api_name="i2fx")
97
  share_button.click(None, [], [], _js=share_js)
98
 
99
  demo.queue(max_size=32).launch(debug=True)