XuminYu commited on
Commit
f819f0f
1 Parent(s): 34435aa
Files changed (2) hide show
  1. app.py +6 -9
  2. hash_code_for_cached_output.py +0 -1
app.py CHANGED
@@ -72,6 +72,7 @@ def predict(prompt, style, audio_file_pth, speed, agree):
72
  "8190e911f8_9897b60a4e_jp_default" : "cached_outputs/4.wav"
73
  }
74
  unique_code = hash_code_for_cached_output.get_unique_code(audio_file_pth, style, prompt)
 
75
  if unique_code in list(cached_outputs.keys()):
76
  return (
77
  'We get the cached output for you, since you are try to generating an example cloning.',
@@ -178,17 +179,12 @@ title = "MyShell OpenVoice V2"
178
 
179
  description = """
180
  In December 2023, we released [OpenVoice V1](https://huggingface.co/spaces/myshell-ai/OpenVoice), an instant voice cloning approach that replicates a speaker's voice and generates speech in multiple languages using only a short audio clip. OpenVoice V1 enables granular control over voice styles, replicates the tone color of the reference speaker and achieves zero-shot cross-lingual voice cloning.
181
-
182
- In April 2024, we released **OpenVoice V2**, which includes all features in V1 and has:
183
-
184
- - **Better Audio Quality**. OpenVoice V2 adopts a different training strategy that delivers better audio quality.
185
-
186
- - **Native Multi-lingual Support**. English, Spanish, French, Chinese, Japanese and Korean are natively supported in OpenVoice V2.
187
-
188
- - **Free Commercial Use**. Starting from April 2024, both V2 and V1 are released under MIT License. Free for commercial use.
189
-
190
  """
191
 
 
 
 
 
192
  markdown_table = """
193
  <div align="center" style="margin-bottom: 10px;">
194
 
@@ -215,6 +211,7 @@ markdown_table_v2 = """
215
  """
216
  content = """
217
  <div>
 
218
  <strong>If the generated voice does not sound like the reference voice, please refer to <a href='https://github.com/myshell-ai/OpenVoice/blob/main/docs/QA.md'>this QnA</a>.</strong> <strong>If you want to deploy the model by yourself and perform inference, please refer to <a href='https://github.com/myshell-ai/OpenVoice/blob/main/demo_part3.ipynb'>this jupyter notebook</a>.</strong>
219
  </div>
220
  """
 
72
  "8190e911f8_9897b60a4e_jp_default" : "cached_outputs/4.wav"
73
  }
74
  unique_code = hash_code_for_cached_output.get_unique_code(audio_file_pth, style, prompt)
75
+ print("unique_code is", unique_code)
76
  if unique_code in list(cached_outputs.keys()):
77
  return (
78
  'We get the cached output for you, since you are try to generating an example cloning.',
 
179
 
180
  description = """
181
  In December 2023, we released [OpenVoice V1](https://huggingface.co/spaces/myshell-ai/OpenVoice), an instant voice cloning approach that replicates a speaker's voice and generates speech in multiple languages using only a short audio clip. OpenVoice V1 enables granular control over voice styles, replicates the tone color of the reference speaker and achieves zero-shot cross-lingual voice cloning.
 
 
 
 
 
 
 
 
 
182
  """
183
 
184
+ # description_v2 = """
185
+ # In April 2024, we released **OpenVoice V2**, which includes all features in V1 and has: **Better Audio Quality**. OpenVoice V2 adopts a different training strategy that delivers better audio quality. **Native Multi-lingual Support**. English, Spanish, French, Chinese, Japanese and Korean are natively supported in OpenVoice V2. **Free Commercial Use**. Starting from April 2024, both V2 and V1 are released under MIT License. Free for commercial use.
186
+ # """
187
+
188
  markdown_table = """
189
  <div align="center" style="margin-bottom: 10px;">
190
 
 
211
  """
212
  content = """
213
  <div>
214
+ In April 2024, we released <strong>OpenVoice V2</strong>, which includes all features in V1 and has: <strong>Better Audio Quality</strong>: OpenVoice V2 adopts a different training strategy that delivers better audio quality. <strong>Native Multi-lingual Support</strong>: English, Spanish, French, Chinese, Japanese, and Korean are natively supported in OpenVoice V2.<strong>Free Commercial Use</strong>: Starting from April 2024, both V2 and V1 are released under the MIT License, free for commercial use.
215
  <strong>If the generated voice does not sound like the reference voice, please refer to <a href='https://github.com/myshell-ai/OpenVoice/blob/main/docs/QA.md'>this QnA</a>.</strong> <strong>If you want to deploy the model by yourself and perform inference, please refer to <a href='https://github.com/myshell-ai/OpenVoice/blob/main/demo_part3.ipynb'>this jupyter notebook</a>.</strong>
216
  </div>
217
  """
hash_code_for_cached_output.py CHANGED
@@ -1,4 +1,3 @@
1
- from pydub.utils import mediainfo
2
  import hashlib
3
 
4
  def audio_hash(audio_path):
 
 
1
  import hashlib
2
 
3
  def audio_hash(audio_path):