Spaces:
Runtime error
Runtime error
Update cosyvoice/cli/frontend.py
Browse files- cosyvoice/cli/frontend.py +19 -10
cosyvoice/cli/frontend.py
CHANGED
|
@@ -21,7 +21,12 @@ import torchaudio.compliance.kaldi as kaldi
|
|
| 21 |
import torchaudio
|
| 22 |
import os
|
| 23 |
import inflect
|
| 24 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
from cosyvoice.utils.frontend_utils import contains_chinese, replace_blank, replace_corner_mark, remove_bracket, spell_out_number, split_paragraph
|
| 26 |
|
| 27 |
|
|
@@ -42,18 +47,20 @@ class CosyVoiceFrontEnd:
|
|
| 42 |
option.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
|
| 43 |
option.intra_op_num_threads = 1
|
| 44 |
self.campplus_session = onnxruntime.InferenceSession(campplus_model, sess_options=option, providers=["CPUExecutionProvider"])
|
| 45 |
-
self.speech_tokenizer_session = onnxruntime.InferenceSession(speech_tokenizer_model, sess_options=option, providers=["CUDAExecutionProvider"])
|
| 46 |
if os.path.exists(spk2info):
|
| 47 |
self.spk2info = torch.load(spk2info, map_location=self.device)
|
| 48 |
self.instruct = instruct
|
| 49 |
self.allowed_special = allowed_special
|
| 50 |
self.inflect_parser = inflect.engine()
|
| 51 |
-
self.
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
|
|
|
|
|
|
| 57 |
|
| 58 |
def _extract_text_token(self, text):
|
| 59 |
text_token = self.tokenizer.encode(text, allowed_special=self.allowed_special)
|
|
@@ -88,7 +95,9 @@ class CosyVoiceFrontEnd:
|
|
| 88 |
def text_normalize(self, text, split=True):
|
| 89 |
text = text.strip()
|
| 90 |
if contains_chinese(text):
|
| 91 |
-
|
|
|
|
|
|
|
| 92 |
text = replace_blank(text)
|
| 93 |
text = replace_corner_mark(text)
|
| 94 |
text = text.replace(".", "、")
|
|
@@ -143,4 +152,4 @@ class CosyVoiceFrontEnd:
|
|
| 143 |
instruct_text_token, instruct_text_token_len = self._extract_text_token(instruct_text + '<endofprompt>')
|
| 144 |
model_input['prompt_text'] = instruct_text_token
|
| 145 |
model_input['prompt_text_len'] = instruct_text_token_len
|
| 146 |
-
return model_input
|
|
|
|
| 21 |
import torchaudio
|
| 22 |
import os
|
| 23 |
import inflect
|
| 24 |
+
try:
|
| 25 |
+
import ttsfrd
|
| 26 |
+
use_ttsfrd = True
|
| 27 |
+
except:
|
| 28 |
+
print("failed to import ttsfrd, please normalize input text manually")
|
| 29 |
+
use_ttsfrd = False
|
| 30 |
from cosyvoice.utils.frontend_utils import contains_chinese, replace_blank, replace_corner_mark, remove_bracket, spell_out_number, split_paragraph
|
| 31 |
|
| 32 |
|
|
|
|
| 47 |
option.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
|
| 48 |
option.intra_op_num_threads = 1
|
| 49 |
self.campplus_session = onnxruntime.InferenceSession(campplus_model, sess_options=option, providers=["CPUExecutionProvider"])
|
| 50 |
+
self.speech_tokenizer_session = onnxruntime.InferenceSession(speech_tokenizer_model, sess_options=option, providers=["CUDAExecutionProvider"if torch.cuda.is_available() else "CPUExecutionProvider"])
|
| 51 |
if os.path.exists(spk2info):
|
| 52 |
self.spk2info = torch.load(spk2info, map_location=self.device)
|
| 53 |
self.instruct = instruct
|
| 54 |
self.allowed_special = allowed_special
|
| 55 |
self.inflect_parser = inflect.engine()
|
| 56 |
+
self.use_ttsfrd = use_ttsfrd
|
| 57 |
+
if self.use_ttsfrd:
|
| 58 |
+
self.frd = ttsfrd.TtsFrontendEngine()
|
| 59 |
+
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
|
| 60 |
+
assert self.frd.initialize('{}/../../pretrained_models/CosyVoice-ttsfrd/resource'.format(ROOT_DIR)) is True, 'failed to initialize ttsfrd resource'
|
| 61 |
+
self.frd.set_lang_type('pinyin')
|
| 62 |
+
self.frd.enable_pinyin_mix(True)
|
| 63 |
+
self.frd.set_breakmodel_index(1)
|
| 64 |
|
| 65 |
def _extract_text_token(self, text):
|
| 66 |
text_token = self.tokenizer.encode(text, allowed_special=self.allowed_special)
|
|
|
|
| 95 |
def text_normalize(self, text, split=True):
|
| 96 |
text = text.strip()
|
| 97 |
if contains_chinese(text):
|
| 98 |
+
if self.use_ttsfrd:
|
| 99 |
+
text = self.frd.get_frd_extra_info(text, 'input')
|
| 100 |
+
text = text.replace("\n", "")
|
| 101 |
text = replace_blank(text)
|
| 102 |
text = replace_corner_mark(text)
|
| 103 |
text = text.replace(".", "、")
|
|
|
|
| 152 |
instruct_text_token, instruct_text_token_len = self._extract_text_token(instruct_text + '<endofprompt>')
|
| 153 |
model_input['prompt_text'] = instruct_text_token
|
| 154 |
model_input['prompt_text_len'] = instruct_text_token_len
|
| 155 |
+
return model_input
|