Tonic gongouveia commited on
Commit
b6a8499
1 Parent(s): 4743f10

Update layout, as in "Create a Huggingface demo page #39" @gongouveia (#7)

Browse files

- Update layout, as in "Create a Huggingface demo page #39"

@gongouveia

(86e09d9a3ce7b70162967ec410bd20839e77e41a)


Co-authored-by: Gouveia <gongouveia@users.noreply.huggingface.co>

Files changed (1) hide show
  1. app.py +25 -27
app.py CHANGED
@@ -20,8 +20,8 @@ You can also use 🌬️💬📝WhisperSpeech by cloning this space. 🧬🔬
20
  We're **celebrating the release of the whisperspeech** at [the LAION community, if you love open source ai learn more here : https://laion.ai/](https://laion.ai/) big thanks to the folks at huggingface for the community grant 🤗
21
 
22
  ### How to Use
23
- Input text with tahe language identifiers provided to create a multilingual speech. Optionally you can add an audiosample to make a voice print.Scroll down and try the api <3 Gradio.
24
- This space runs on ZeroGPU, so **you need to be patient** while you acquire the GPU and load the model the first time you make a request !
25
  """
26
 
27
 
@@ -78,33 +78,31 @@ def whisper_speech_demo(multilingual_text, speaker_audio):
78
 
79
  return (24000, concatenated_audio.T)
80
 
81
-
82
  with gr.Blocks() as demo:
83
  gr.Markdown(title)
84
- output_audio = gr.Audio(label="🌟Collabora🌬️💬📝WhisperSpeech")
85
- generate_button = gr.Button("Try 🌟Collabora🌬️💬📝WhisperSpeech")
86
- with gr.Accordion("🌟Collabora🌬️WhisperSpeech💬Voice Print and📝Language List", open=False):
87
- with gr.Row():
88
- speaker_input = gr.Audio(label="Upload or Record Speaker Audio (optional)🌬️💬",
89
- sources=["upload", "microphone"])
90
- with gr.Row():
91
- with gr.Accordion("Available Languages and Their Tags", open=False):
92
- formatted_language_list = "\n".join([f"`<{lang}>` {LANGUAGES[lang]}" for lang in LANGUAGES])
93
- gr.Markdown(formatted_language_list)
94
- with gr.Row():
95
- text_input = gr.Textbox(label="Enter multilingual text💬📝",
96
- placeholder="e.g., <en> Hello <fr> Bonjour <es> Hola")
97
- with gr.Row():
98
- with gr.Accordion("Try Multilingual Text Examples", open=False):
99
- gr.Examples(
100
- examples=text_examples,
101
- inputs=[text_input],
102
- outputs=[output_audio],
103
- fn=whisper_speech_demo,
104
- cache_examples=False,
105
- label="Try these to get started !🌟🌬️"
106
- )
107
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
  generate_button.click(whisper_speech_demo, inputs=[text_input, speaker_input], outputs=output_audio)
109
 
110
- demo.launch()
 
 
 
20
  We're **celebrating the release of the whisperspeech** at [the LAION community, if you love open source ai learn more here : https://laion.ai/](https://laion.ai/) big thanks to the folks at huggingface for the community grant 🤗
21
 
22
  ### How to Use
23
+ Input text with the language identifiers provided to create a multilingual speech. Optionally you can add an audiosample to make a voice print.Scroll down and try the api <3 Gradio.
24
+ This space runs on ZeroGPU, so **you need to be patient** while you acquire the GPU and load the model the first time you make a request!
25
  """
26
 
27
 
 
78
 
79
  return (24000, concatenated_audio.T)
80
 
 
81
  with gr.Blocks() as demo:
82
  gr.Markdown(title)
83
+ output_audio = gr.Audio(label="🌟Collabora🌬️💬📝WhisperSpeech", show_download_button=True)
84
+ generate_button = gr.Button("🌟Collabora🌬️💬📝WhisperSpeech")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
 
86
+ with gr.Row(equal_height=True):
87
+
88
+ with gr.Column():
89
+ text_input = gr.Textbox(label="Enter multilingual text💬📝", placeholder="e.g., <en> Hello <fr> Bonjour <es> Hola", lines = 10 )
90
+ with gr.Accordion("Available Languages and Their Tags", open=False):
91
+ gr.Markdown("\n".join([f"`<{lang}>` {LANGUAGES[lang]}" for lang in LANGUAGES])),
92
+ with gr.Accordion("Try Multilingual Text Examples", open=False):
93
+ gr.Examples(
94
+ examples=text_examples,
95
+ inputs=[text_input],
96
+ outputs=[output_audio],
97
+ fn=whisper_speech_demo,
98
+ cache_examples=True,
99
+ label="Try these text examples to get started !🌟🌬️"
100
+ )
101
+ speaker_input = gr.Audio( sources=["upload", "microphone"],label="Upload or Record Speaker Audio (optional)🌬️💬")
102
+
103
+ # gr.Examples(examples=examples, inputs=[text_input, speaker_input], outputs=output_audio,cache_examples=True, label ='Plug and Play with this Examples !🌟🌬️'),
104
  generate_button.click(whisper_speech_demo, inputs=[text_input, speaker_input], outputs=output_audio)
105
 
106
+
107
+
108
+ demo.launch()