from funasr import AutoModel
from funasr.utils.postprocess_utils import rich_transcription_postprocess
import ChatTTS
import torch
import torchaudio
import os


class AudioProcessor:
    """语音处理工具类"""

    def __init__(self):
        # 初始化设置
        os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"
        self.model_dir = "iic/SenseVoiceSmall"

        # 预加载ASR模型
        self.asr_model = AutoModel(
            model=self.model_dir,
            vad_model="fsmn-vad",
            vad_kwargs={"max_single_segment_time": 30000},
            device="cuda:0",
            disable_update=True,
        )

        # 预加载TTS模型
        tts_model = ChatTTS.Chat()
        tts_model.load(source="local", custom_path="model/ChatTTS", compile=True)
        self.tts_model = tts_model

    def speech_to_text(self, audio_path):
        """语音转文字"""
        try:
            res = self.asr_model.generate(
                input=f"{audio_path}",
                cache={},
                language="auto",
                use_itn=True,
                batch_size_s=30,
                merge_vad=True,
                merge_length_s=10,
                hotword="",
            )
            return rich_transcription_postprocess(res[0]["text"])
        except Exception as e:
            print(f"语音识别出错: {str(e)}")
            return ""

    def text_to_speech(self, text, output_path):
        """文字转语音"""
        try:
            os.makedirs(os.path.dirname(output_path), exist_ok=True)
            wavs = self.tts_model.infer(text)
            torchaudio.save(
                f"{output_path}", torch.from_numpy(wavs[0]).unsqueeze(0), 24000
            )
            return True
        except Exception as e:
            print(f"语音合成出错: {str(e)}")
            return False


# 创建全局实例
audio_processor = AudioProcessor()
