chat-tts / test /audio_test.py
chenjgtea
拆分gpu、cpu模式运行模式
a536c15
import os, sys
if sys.platform == "darwin":
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
now_dir = os.getcwd()
sys.path.append(now_dir)
import Chat2TTS
from tool.av import load_audio
from tool.logger import get_logger
logger = get_logger("audio_test")
# Initialize and load the model:
chat = Chat2TTS.Chat()
def init_chat():
global chat
source = "local"
# 获取启动模式
MODEL = os.getenv('MODEL')
# huggingface 部署模式下,模型则直接使用hf的模型数据
if MODEL == "HF":
source = "huggingface"
logger.info("loading Chat2TTS model..., start source:" + source)
if chat.load_models(source=source, local_path="D:\\chenjgspace\\ai-model\\chattts"):
print("Models loaded successfully.")
logger.info("Models loaded end.")
# else:
# logger.error("=========Models load failed.")
# sys.exit(1)
def audo_encode():
sample_audio = load_audio("D:\\Download\\audio_test.wav",24000)
logger.info("================sample_audio:"+str(sample_audio))
spk_smp=chat.sample_audio_speaker(sample_audio)
logger.info("================spk_smp:"+str(spk_smp))
if __name__ == "__main__":
init_chat()
# 还需要继续调试
audo_encode()