import whisper
import torch
from pathlib import Path
from typing import List, Dict, Optional
import logging

from app.core.config import settings

logger = logging.getLogger(__name__)


class ASRService:
    """自动语音识别服务"""

    def __init__(self):
        self.model = None
        self.model_name = settings.WHISPER_MODEL
        self.device = settings.WHISPER_DEVICE
        self.language = settings.WHISPER_LANGUAGE

    async def load_model(self):
        """加载Whisper模型"""
        try:
            if self.model is None:
                logger.info(f"Loading Whisper model: {self.model_name}")
                self.model = whisper.load_model(
                    self.model_name,
                    device=self.device
                )
                logger.info("Whisper model loaded successfully")
        except Exception as e:
            logger.error(f"Failed to load Whisper model: {e}")
            raise

    async def transcribe_file(
        self,
        audio_path: Path,
        language: Optional[str] = None,
        task: str = "transcribe"
    ) -> Dict:
        """转录音频文件"""
        await self.load_model()

        try:
            logger.info(f"Transcribing audio file: {audio_path}")

            # 设置转录参数
            options = {
                "task": task,
                "fp16": torch.cuda.is_available(),
                "verbose": False
            }

            if language or self.language:
                options["language"] = language or self.language

            # 执行转录
            result = self.model.transcribe(str(audio_path), **options)

            logger.info(f"Transcription completed for {audio_path}")
            return result

        except Exception as e:
            logger.error(f"Transcription failed for {audio_path}: {e}")
            raise

    async def transcribe_with_timestamps(
        self,
        audio_path: Path,
        language: Optional[str] = None
    ) -> List[Dict]:
        """带时间戳的转录"""
        result = await self.transcribe_file(audio_path, language)

        transcripts = []
        for segment in result.get("segments", []):
            transcripts.append({
                "text": segment["text"].strip(),
                "timestamp": segment["start"],
                "duration": segment["end"] - segment["start"],
                "confidence": segment.get("avg_logprob", 0.0)
            })

        return transcripts

    async def detect_language(self, audio_path: Path) -> str:
        """检测音频语言"""
        await self.load_model()

        try:
            # 加载音频
            audio = whisper.load_audio(str(audio_path))

            # 检测语言
            audio = whisper.pad_or_trim(audio)
            mel = whisper.log_mel_spectrogram(audio).to(self.model.device)

            _, probs = self.model.detect_language(mel)
            detected_language = max(probs, key=probs.get)

            logger.info(f"Detected language: {detected_language}")
            return detected_language

        except Exception as e:
            logger.error(f"Language detection failed: {e}")
            return "en"  # 默认返回英语

    async def transcribe_stream(
        self,
        audio_data: bytes,
        language: Optional[str] = None
    ) -> str:
        """实时转录音频流"""
        await self.load_model()

        try:
            # 将字节数据转换为音频文件
            import tempfile
            import os

            with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as temp_file:
                temp_file.write(audio_data)
                temp_path = temp_file.name

            try:
                result = await self.transcribe_file(Path(temp_path), language)
                return result.get("text", "")
            finally:
                # 清理临时文件
                os.unlink(temp_path)

        except Exception as e:
            logger.error(f"Stream transcription failed: {e}")
            raise

    def get_supported_languages(self) -> List[str]:
        """获取支持的语言列表"""
        # Whisper支持的语言
        return [
            "af", "ar", "hy", "az", "be", "bs", "bg", "ca", "zh", "hr", "cs", "da",
            "nl", "en", "et", "fi", "fr", "gl", "de", "el", "he", "hi", "hu", "is",
            "id", "it", "ja", "kn", "kk", "ko", "lv", "lt", "mk", "ms", "mr", "mi",
            "ne", "no", "fa", "pl", "pt", "ro", "ru", "sr", "sk", "sl", "es", "sw",
            "sv", "tl", "ta", "th", "tr", "uk", "ur", "vi", "cy"
        ]

    async def health_check(self) -> Dict:
        """健康检查"""
        try:
            await self.load_model()
            return {
                "status": "healthy",
                "model_loaded": self.model is not None,
                "model_name": self.model_name,
                "device": self.device,
                "supported_languages": len(self.get_supported_languages())
            }
        except Exception as e:
            return {
                "status": "unhealthy",
                "error": str(e)
            }