fffiloni commited on
Commit
be460ce
β€’
1 Parent(s): 7b5386c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -9
app.py CHANGED
@@ -9,7 +9,7 @@ caption = gr.Blocks.load(name="spaces/SRDdev/Image-Caption")
9
  audio_gen = gr.Blocks.load(name="spaces/fffiloni/audioldm-text-to-audio-generation-clone", api_key=token)
10
 
11
  ph_message="If you're not happy with sound result, you can manually describe the scene depicted in your image :)"
12
- def infer(image_input, manual_caption, duration_in):
13
  print(duration_in)
14
  if manual_caption == "":
15
  cap = caption(image_input, fn_index=0)
@@ -20,7 +20,7 @@ def infer(image_input, manual_caption, duration_in):
20
  print("manual captiony: " + cap)
21
  ph_update=""
22
 
23
- sound = audio_gen(cap, duration_in, 2.5, 440, 3, fn_index=0)
24
 
25
  return cap, sound[1], gr.Textbox.update(placeholder=f"{ph_update}{ph_message}"), gr.Group.update(visible=True)
26
 
@@ -81,12 +81,12 @@ with gr.Blocks(css="style.css") as demo:
81
 
82
  input_img = gr.Image(type="filepath", elem_id="input-img")
83
 
84
- with gr.Group():
85
- with gr.Column():
86
- manual_cap = gr.Textbox(label="Manual Image description (optional)", lines=3, placeholder=ph_message)
87
- with gr.Row():
88
- duration_in = gr.Slider(minimum=5, maximum=10, step=5, value=5, label="Duration")
89
- #seed_in = gr.Number(label="Seed", value=45)
90
  caption_output = gr.Textbox(label="Caption", visible=False, elem_id="text-caption")
91
  sound_output = gr.Audio(label="Result", elem_id="sound-output")
92
 
@@ -99,7 +99,7 @@ with gr.Blocks(css="style.css") as demo:
99
 
100
  gr.HTML(article)
101
 
102
- generate.click(infer, inputs=[input_img, manual_cap, duration_in], outputs=[caption_output, sound_output, manual_cap, share_group], api_name="i2fx")
103
  share_button.click(None, [], [], _js=share_js)
104
 
105
  demo.queue(max_size=32).launch(debug=True)
 
9
  audio_gen = gr.Blocks.load(name="spaces/fffiloni/audioldm-text-to-audio-generation-clone", api_key=token)
10
 
11
  ph_message="If you're not happy with sound result, you can manually describe the scene depicted in your image :)"
12
+ def infer(image_input, manual_caption, duration_in, seed):
13
  print(duration_in)
14
  if manual_caption == "":
15
  cap = caption(image_input, fn_index=0)
 
20
  print("manual captiony: " + cap)
21
  ph_update=""
22
 
23
+ sound = audio_gen(cap, duration_in, 2.5, seed, 3, fn_index=0)
24
 
25
  return cap, sound[1], gr.Textbox.update(placeholder=f"{ph_update}{ph_message}"), gr.Group.update(visible=True)
26
 
 
81
 
82
  input_img = gr.Image(type="filepath", elem_id="input-img")
83
 
84
+ with gr.Column():
85
+ manual_cap = gr.Textbox(label="Manual Image description (optional)", lines=3, placeholder=ph_message)
86
+ with gr.Row():
87
+ duration_in = gr.Slider(minimum=5, maximum=10, step=5, value=5, label="Duration")
88
+ seed_in = gr.Slider(label="Seed", value=440, minimum=45, maximum=10000, step=1)
89
+
90
  caption_output = gr.Textbox(label="Caption", visible=False, elem_id="text-caption")
91
  sound_output = gr.Audio(label="Result", elem_id="sound-output")
92
 
 
99
 
100
  gr.HTML(article)
101
 
102
+ generate.click(infer, inputs=[input_img, manual_cap, duration_in, seed_in], outputs=[caption_output, sound_output, manual_cap, share_group], api_name="i2fx")
103
  share_button.click(None, [], [], _js=share_js)
104
 
105
  demo.queue(max_size=32).launch(debug=True)