"""
语音服务
处理语音识别（STT）和语音合成（TTS）功能
"""

import asyncio
import base64
import io
import json
import logging
from typing import Optional, Dict, Any, Tuple
import aiohttp
import numpy as np
from pydub import AudioSegment
from src.config.settings import settings
from src.utils.logging import get_logger

logger = get_logger(__name__)

class VoiceService:
    """语音服务类，处理语音识别和语音合成"""
    
    def __init__(self):
        """初始化语音服务"""
        self.stt_provider = getattr(settings, 'STT_PROVIDER', 'whisper')
        self.tts_provider = getattr(settings, 'TTS_PROVIDER', 'edge')
        self.openai_api_key = getattr(settings, 'OPENAI_API_KEY', '')
        self.azure_speech_key = getattr(settings, 'AZURE_SPEECH_KEY', '')
        self.azure_region = getattr(settings, 'AZURE_SPEECH_REGION', 'eastus')
        
        logger.info(f"语音服务初始化完成 - STT: {self.stt_provider}, TTS: {self.tts_provider}")
    
    async def speech_to_text(self, audio_data: bytes, language: str = "zh-CN") -> Tuple[str, Optional[Dict[str, Any]]]:
        """
        语音转文本
        
        Args:
            audio_data: 音频数据（PCM格式）
            language: 语言代码
            
        Returns:
            Tuple[识别文本, 元数据]
        """
        try:
            if self.stt_provider == 'whisper':
                return await self._whisper_stt(audio_data, language)
            elif self.stt_provider == 'azure':
                return await self._azure_stt(audio_data, language)
            else:
                # 默认使用本地Whisper
                return await self._whisper_stt(audio_data, language)
                
        except Exception as e:
            logger.error(f"语音识别失败: {e}", exc_info=True)
            raise
    
    async def text_to_speech(self, text: str, language: str = "zh-CN", 
                           voice: str = "zh-CN-XiaoxiaoNeural") -> Tuple[bytes, Optional[Dict[str, Any]]]:
        """
        文本转语音
        
        Args:
            text: 要转换的文本
            language: 语言代码
            voice: 语音名称
            
        Returns:
            Tuple[音频数据, 元数据]
        """
        try:
            if self.tts_provider == 'edge':
                return await self._edge_tts(text, language, voice)
            elif self.tts_provider == 'azure':
                return await self._azure_tts(text, language, voice)
            else:
                # 默认使用Edge TTS
                return await self._edge_tts(text, language, voice)
                
        except Exception as e:
            logger.error(f"语音合成失败: {e}", exc_info=True)
            raise
    
    async def _whisper_stt(self, audio_data: bytes, language: str) -> Tuple[str, Optional[Dict[str, Any]]]:
        """使用OpenAI Whisper进行语音识别"""
        try:
            # 检查是否安装了openai-whisper
            try:
                import whisper
            except ImportError:
                logger.error("未安装openai-whisper，请运行: pip install openai-whisper")
                raise ImportError("openai-whisper not installed")
            
            # 加载模型（使用基础模型以减少内存占用）
            model = whisper.load_model("base")
            
            # 将音频数据转换为numpy数组
            audio = AudioSegment.from_file(io.BytesIO(audio_data), format="wav")
            audio_array = np.array(audio.get_array_of_samples())
            
            # 进行语音识别
            result = model.transcribe(
                audio_array.astype(np.float32) / 32768.0,  # 转换为float32并归一化
                language=language,
                fp16=False  # 不使用FP16以兼容更多设备
            )
            
            # 确保返回正确的类型
            text = str(result["text"])
            metadata = {"model": "whisper-base", "language": language}
            return text, metadata
            
        except Exception as e:
            logger.error(f"Whisper语音识别失败: {e}", exc_info=True)
            # 回退到API调用（如果有API密钥）
            if self.openai_api_key:
                return await self._openai_api_stt(audio_data, language)
            raise
    
    async def _openai_api_stt(self, audio_data: bytes, language: str) -> Tuple[str, Optional[Dict[str, Any]]]:
        """使用OpenAI API进行语音识别"""
        if not self.openai_api_key:
            raise ValueError("OpenAI API密钥未配置")
        
        try:
            async with aiohttp.ClientSession() as session:
                # 将音频数据转换为base64
                audio_base64 = base64.b64encode(audio_data).decode('utf-8')
                
                headers = {
                    "Authorization": f"Bearer {self.openai_api_key}",
                    "Content-Type": "application/json"
                }
                
                data = {
                    "model": "whisper-1",
                    "language": language,
                    "audio": audio_base64
                }
                
                async with session.post(
                    "https://api.openai.com/v1/audio/transcriptions",
                    headers=headers,
                    json=data
                ) as response:
                    if response.status == 200:
                        result = await response.json()
                        return result["text"], {"model": "whisper-1", "language": language}
                    else:
                        error_text = await response.text()
                        raise Exception(f"OpenAI API错误: {response.status} - {error_text}")
                        
        except Exception as e:
            logger.error(f"OpenAI API语音识别失败: {e}")
            raise
    
    async def _azure_stt(self, audio_data: bytes, language: str) -> Tuple[str, Optional[Dict[str, Any]]]:
        """使用Azure Speech Services进行语音识别"""
        if not self.azure_speech_key:
            raise ValueError("Azure Speech密钥未配置")
        
        try:
            # 检查是否安装了azure-cognitiveservices-speech
            try:
                import azure.cognitiveservices.speech as speechsdk
            except ImportError:
                logger.error("未安装azure-cognitiveservices-speech，请运行: pip install azure-cognitiveservices-speech")
                raise ImportError("azure-cognitiveservices-speech not installed")
            
            speech_config = speechsdk.SpeechConfig(
                subscription=self.azure_speech_key,
                region=self.azure_region
            )
            speech_config.speech_recognition_language = language
            
            # 创建音频流
            audio_stream = speechsdk.audio.PushAudioInputStream()
            audio_config = speechsdk.audio.AudioConfig(stream=audio_stream)
            
            # 创建识别器
            recognizer = speechsdk.SpeechRecognizer(
                speech_config=speech_config,
                audio_config=audio_config
            )
            
            # 写入音频数据
            audio_stream.write(audio_data)
            audio_stream.close()
            
            # 进行识别（简化实现）
            # 实际使用时需要更复杂的异步处理
            result = recognizer.recognize_once()
            
            if result.reason == speechsdk.ResultReason.RecognizedSpeech:
                return result.text, {"provider": "azure", "language": language}
            else:
                raise Exception(f"Azure语音识别失败: {result.reason}")
                
        except ImportError:
            logger.error("未安装azure-cognitiveservices-speech")
            raise
        except Exception as e:
            logger.error(f"Azure语音识别失败: {e}")
            raise
    
    async def _edge_tts(self, text: str, language: str, voice: str) -> Tuple[bytes, Optional[Dict[str, Any]]]:
        """使用Edge TTS进行语音合成"""
        try:
            import edge_tts
            
            # 根据语言选择默认语音
            if language.startswith('zh'):
                voice = "zh-CN-XiaoxiaoNeural"
            elif language.startswith('en'):
                voice = "en-US-AriaNeural"
            else:
                voice = "zh-CN-XiaoxiaoNeural"  # 默认中文语音
            
            # 使用edge-tts生成语音
            communicate = edge_tts.Communicate(text, voice)
            
            # 收集音频数据
            audio_chunks = []
            async for chunk in communicate.stream():
                if chunk["type"] == "audio":
                    audio_chunks.append(chunk["data"])
            
            # 合并所有音频数据
            audio_data = b''.join(audio_chunks)
            
            return audio_data, {"provider": "edge", "voice": voice, "language": language}
            
        except ImportError:
            logger.error("未安装edge-tts，请运行: pip install edge-tts")
            raise
        except Exception as e:
            logger.error(f"Edge TTS语音合成失败: {e}")
            raise
    
    async def _azure_tts(self, text: str, language: str, voice: str) -> Tuple[bytes, Optional[Dict[str, Any]]]:
        """使用Azure TTS进行语音合成"""
        if not self.azure_speech_key:
            raise ValueError("Azure Speech密钥未配置")
        
        try:
            # 检查是否安装了azure-cognitiveservices-speech
            try:
                import azure.cognitiveservices.speech as speechsdk
            except ImportError:
                logger.error("未安装azure-cognitiveservices-speech，请运行: pip install azure-cognitiveservices-speech")
                raise ImportError("azure-cognitiveservices-speech not installed")
            
            speech_config = speechsdk.SpeechConfig(
                subscription=self.azure_speech_key,
                region=self.azure_region
            )
            
            # 设置语音
            speech_config.speech_synthesis_voice_name = voice
            
            # 创建合成器
            synthesizer = speechsdk.SpeechSynthesizer(speech_config=speech_config)
            
            # 合成语音
            result = synthesizer.speak_text_async(text).get()
            
            # 检查结果是否为None
            if result is not None:
                if result.reason == speechsdk.ResultReason.SynthesizingAudioCompleted:
                    # 确保返回正确的类型
                    audio_data = bytes(result.audio_data)
                    metadata: Dict[str, Any] = {"provider": "azure", "voice": voice, "language": language}
                    return audio_data, metadata
                else:
                    raise Exception(f"Azure TTS失败: {result.reason}")
            else:
                raise Exception("Azure TTS返回空结果")
                
        except ImportError:
            logger.error("未安装azure-cognitiveservices-speech")
            raise
        except Exception as e:
            logger.error(f"Azure TTS失败: {e}")
            raise
    
    def convert_audio_format(self, audio_data: bytes, from_format: str, to_format: str) -> bytes:
        """
        转换音频格式
        
        Args:
            audio_data: 原始音频数据
            from_format: 原始格式（如 'wav', 'mp3'）
            to_format: 目标格式（如 'wav', 'mp3'）
            
        Returns:
            转换后的音频数据
        """
        try:
            audio = AudioSegment.from_file(io.BytesIO(audio_data), format=from_format)
            
            # 转换为目标格式
            output = io.BytesIO()
            audio.export(output, format=to_format)
            
            return output.getvalue()
            
        except Exception as e:
            logger.error(f"音频格式转换失败: {e}")
            raise

# 全局语音服务实例
voice_service = VoiceService()
