XuminYu commited on
Commit
af04ad9
2 Parent(s): f819f0f 3828755
Files changed (1) hide show
  1. app.py +10 -5
app.py CHANGED
@@ -71,7 +71,7 @@ def predict(prompt, style, audio_file_pth, speed, agree):
71
  "54ad3237d7_3fef5adc6f_zh_default" : "cached_outputs/3.wav",
72
  "8190e911f8_9897b60a4e_jp_default" : "cached_outputs/4.wav"
73
  }
74
- unique_code = hash_code_for_cached_output.get_unique_code(audio_file_pth, style, prompt)
75
  print("unique_code is", unique_code)
76
  if unique_code in list(cached_outputs.keys()):
77
  return (
@@ -181,9 +181,12 @@ description = """
181
  In December 2023, we released [OpenVoice V1](https://huggingface.co/spaces/myshell-ai/OpenVoice), an instant voice cloning approach that replicates a speaker's voice and generates speech in multiple languages using only a short audio clip. OpenVoice V1 enables granular control over voice styles, replicates the tone color of the reference speaker and achieves zero-shot cross-lingual voice cloning.
182
  """
183
 
184
- # description_v2 = """
185
- # In April 2024, we released **OpenVoice V2**, which includes all features in V1 and has: **Better Audio Quality**. OpenVoice V2 adopts a different training strategy that delivers better audio quality. **Native Multi-lingual Support**. English, Spanish, French, Chinese, Japanese and Korean are natively supported in OpenVoice V2. **Free Commercial Use**. Starting from April 2024, both V2 and V1 are released under MIT License. Free for commercial use.
186
- # """
 
 
 
187
 
188
  markdown_table = """
189
  <div align="center" style="margin-bottom: 10px;">
@@ -211,7 +214,6 @@ markdown_table_v2 = """
211
  """
212
  content = """
213
  <div>
214
- In April 2024, we released <strong>OpenVoice V2</strong>, which includes all features in V1 and has: <strong>Better Audio Quality</strong>: OpenVoice V2 adopts a different training strategy that delivers better audio quality. <strong>Native Multi-lingual Support</strong>: English, Spanish, French, Chinese, Japanese, and Korean are natively supported in OpenVoice V2.<strong>Free Commercial Use</strong>: Starting from April 2024, both V2 and V1 are released under the MIT License, free for commercial use.
215
  <strong>If the generated voice does not sound like the reference voice, please refer to <a href='https://github.com/myshell-ai/OpenVoice/blob/main/docs/QA.md'>this QnA</a>.</strong> <strong>If you want to deploy the model by yourself and perform inference, please refer to <a href='https://github.com/myshell-ai/OpenVoice/blob/main/demo_part3.ipynb'>this jupyter notebook</a>.</strong>
216
  </div>
217
  """
@@ -259,6 +261,9 @@ with gr.Blocks(analytics_enabled=False) as demo:
259
  with gr.Column():
260
  gr.Video('./openvoicev2.mp4', autoplay=True)
261
 
 
 
 
262
  with gr.Row():
263
  gr.HTML(wrapped_markdown_content)
264
 
 
71
  "54ad3237d7_3fef5adc6f_zh_default" : "cached_outputs/3.wav",
72
  "8190e911f8_9897b60a4e_jp_default" : "cached_outputs/4.wav"
73
  }
74
+ unique_code = hash_code_for_cached_output.get_unique_code(audio_file_pth, prompt, style)
75
  print("unique_code is", unique_code)
76
  if unique_code in list(cached_outputs.keys()):
77
  return (
 
181
  In December 2023, we released [OpenVoice V1](https://huggingface.co/spaces/myshell-ai/OpenVoice), an instant voice cloning approach that replicates a speaker's voice and generates speech in multiple languages using only a short audio clip. OpenVoice V1 enables granular control over voice styles, replicates the tone color of the reference speaker and achieves zero-shot cross-lingual voice cloning.
182
  """
183
 
184
+ description_v2 = """
185
+ In April 2024, we released **OpenVoice V2**, which includes all features in V1 and has:
186
+ - **Better Audio Quality**. OpenVoice V2 adopts a different training strategy that delivers better audio quality.
187
+ - **Native Multi-lingual Support**. English, Spanish, French, Chinese, Japanese and Korean are natively supported in OpenVoice V2.
188
+ - **Free Commercial Use**. Starting from April 2024, both V2 and V1 are released under MIT License. Free for commercial use.
189
+ """
190
 
191
  markdown_table = """
192
  <div align="center" style="margin-bottom: 10px;">
 
214
  """
215
  content = """
216
  <div>
 
217
  <strong>If the generated voice does not sound like the reference voice, please refer to <a href='https://github.com/myshell-ai/OpenVoice/blob/main/docs/QA.md'>this QnA</a>.</strong> <strong>If you want to deploy the model by yourself and perform inference, please refer to <a href='https://github.com/myshell-ai/OpenVoice/blob/main/demo_part3.ipynb'>this jupyter notebook</a>.</strong>
218
  </div>
219
  """
 
261
  with gr.Column():
262
  gr.Video('./openvoicev2.mp4', autoplay=True)
263
 
264
+ with gr.Row():
265
+ gr.Markdown(description_v2)
266
+
267
  with gr.Row():
268
  gr.HTML(wrapped_markdown_content)
269