Tonic commited on
Commit
94d2571
โ€ข
1 Parent(s): a175fb2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -8
app.py CHANGED
@@ -25,8 +25,12 @@ Input text with tahe language identifiers provided to create a multilingual spee
25
  This space runs on ZeroGPU, so **you need to be patient** while you acquire the GPU and load the model the first time you make a request !
26
  """
27
 
28
- # text examples=["<en> Hello, how are you? <fr> Bonjour, comment รงa va?", "<de> Guten Tag <it> Buongiorno <jp> ใ“ใ‚“ใซใกใฏ"]
29
- # audio examples=["path/to/tonic.wav"]
 
 
 
 
30
 
31
  # Function to parse the multilingual input text
32
  def parse_multilingual_text(input_text):
@@ -76,14 +80,25 @@ def whisper_speech_demo(multilingual_text, speaker_audio):
76
 
77
  with gr.Blocks() as demo:
78
  gr.Markdown(title)
79
- output_audio = gr.Audio(label="Generated Speech")
80
  generate_button = gr.Button("Try ๐ŸŒŸCollabora๐ŸŒฌ๏ธ๐Ÿ’ฌ๐Ÿ“WhisperSpeech")
81
  with gr.Row():
82
- text_input = gr.Textbox(label="Enter multilingual text", placeholder="e.g., <en> Hello <fr> Bonjour <es> Hola")
83
- speaker_input = gr.Audio(label="Upload or Record Speaker Audio (optional)", sources=["upload", "microphone"])
84
- with gr.Accordion("Available Languages and Their Tags"):
85
- language_list = "\n".join([f"{lang}: {LANGUAGES[lang]}" for lang in LANGUAGES])
86
- gr.Markdown(language_list)
 
 
 
 
 
 
 
 
 
 
 
87
  generate_button.click(whisper_speech_demo, inputs=[text_input, speaker_input], outputs=output_audio)
88
 
89
  demo.launch()
 
25
  This space runs on ZeroGPU, so **you need to be patient** while you acquire the GPU and load the model the first time you make a request !
26
  """
27
 
28
+
29
+ text_examples = [
30
+ ["<en> WhisperSpeech is an opensource library that helps you hack whisper."],
31
+ ["<de> WhisperSpeech is multi-lingual <es> y puede cambiar de idioma <hi> เคฎเคงเฅเคฏ เคตเคพเค•เฅเคฏ เคฎเฅ‡เค‚"],
32
+ ["<en> The big difference between Europe <fr> et les Etats Unis <pl> jest to, ลผe mamy tak wiele jฤ™zykรณw <uk> ั‚ัƒั‚, ะฒ ะ„ะฒั€ะพะฟั–"]
33
+ ]
34
 
35
  # Function to parse the multilingual input text
36
  def parse_multilingual_text(input_text):
 
80
 
81
  with gr.Blocks() as demo:
82
  gr.Markdown(title)
83
+ output_audio = gr.Audio(label="๐ŸŒŸCollabora๐ŸŒฌ๏ธ๐Ÿ’ฌ๐Ÿ“WhisperSpeech")
84
  generate_button = gr.Button("Try ๐ŸŒŸCollabora๐ŸŒฌ๏ธ๐Ÿ’ฌ๐Ÿ“WhisperSpeech")
85
  with gr.Row():
86
+ text_input = gr.Textbox(label="Enter multilingual text๐Ÿ’ฌ๐Ÿ“", placeholder="e.g., <en> Hello <fr> Bonjour <es> Hola")
87
+ speaker_input = gr.Audio(label="Upload or Record Speaker Audio (optional)๐ŸŒฌ๏ธ๐Ÿ’ฌ", sources=["upload", "microphone"])
88
+ with gr.Row():
89
+ with gr.Accordion("Available Languages and Their Tags", open=False):
90
+ formatted_language_list = "\n".join([f"<{lang}> {LANGUAGES[lang]}" for lang in LANGUAGES])
91
+ gr.Markdown(formatted_language_list)
92
+ with gr.Row():
93
+ with gr.Accordion("Try Multilingual Text Examples", open=False):
94
+ gr.Examples(
95
+ examples=text_examples,
96
+ inputs=[text_input],
97
+ outputs=[output_audio],
98
+ fn=whisper_speech_demo,
99
+ cache_examples=True,
100
+ label="Try these to get started !๐ŸŒŸ๐ŸŒฌ๏ธ"
101
+ )
102
  generate_button.click(whisper_speech_demo, inputs=[text_input, speaker_input], outputs=output_audio)
103
 
104
  demo.launch()