class VoiceModel:
    """语音合成和识别工具类"""

    def __init__(self, config: AIConfig):
        self.config = config

    def text_to_speech(self, text: str, voice_id: str = "Rachel", model: str = "elevenlabs"):
        """文本转语音"""
        if model == "elevenlabs":
            return self._elevenlabs_tts(text, voice_id)
        elif model == "openai":
            return self._openai_tts(text, voice_id)

    def _elevenlabs_tts(self, text: str, voice_id: str):
        """使用ElevenLabs TTS"""
        api_key = self.config.configs["elevenlabs"]["api_key"]

        response = requests.post(
            f"{self.config.configs['elevenlabs']['base_url']}/text-to-speech/{voice_id}",
            headers={
                "Authorization": f"Bearer {api_key}",
                "Content-Type": "application/json"
            },
            json={
                "text": text,
                "model_id": "eleven_monolingual_v1",
                "voice_settings": {
                    "stability": 0.5,
                    "similarity_boost": 0.5
                }
            }
        )

        if response.status_code == 200:
            return response.content  # 返回音频二进制数据
        else:
            raise Exception(f"TTS失败: {response.text}")

    def speech_to_text(self, audio_path: str, model: str = "openai"):
        """语音转文本"""
        if model == "openai":
            return self._openai_whisper(audio_path)

    def _openai_whisper(self, audio_path: str):
        """使用OpenAI Whisper"""
        import openai

        client = openai.OpenAI(api_key=self.config.configs["openai"]["api_key"])

        with open(audio_path, "rb") as audio_file:
            transcript = client.audio.transcriptions.create(
                model="whisper-1",
                file=audio_file,
                response_format="text"
            )

        return transcript