# core/asr_engine.py

import logging
from abc import ABC, abstractmethod
from typing import Dict, Optional, Any, Union
from pathlib import Path
from core.config import settings
from datetime import datetime
import random
from core.real_translation_engine import RealTranslationEngine

logger = logging.getLogger(__name__)


class ASREngine(ABC):
    """抽象 ASR 引擎接口"""

    @abstractmethod
    def load_model(self) -> None:
        """加载 ASR 模型"""
        pass

    @abstractmethod
    def transcribe(
        self,
        audio_path: Path,
        language: Optional[str] = None,
        task: str = "transcribe",  # "transcribe" 或 "translate"
        return_segments: bool = True
    ) -> Dict[str, Any]:
        """
        执行语音识别
        :param audio_path: 音频文件路径
        :param language: 指定语言，如 'zh', 'en'，None 表示自动检测
        :param task: transcribe（原文）或 translate（转英文）
        :param return_segments: 是否返回时间分段信息
        :return: 包含文本、语言、时间戳等信息的字典
        """
        pass

    @abstractmethod
    def is_ready(self) -> bool:
        """检查引擎是否就绪"""
        pass


class MockASREngine(ASREngine):
    """模拟 ASR 引擎（用于开发测试）"""

    def __init__(self):
        self._is_loaded = False
        self.load_model()

    def load_model(self):
        if settings.DEBUG:
            logger.info(f"模拟加载 ASR 模型: {settings.ASR_MODEL_PATH}")
        self._is_loaded = True

    def transcribe(
        self,
        audio_path: Path,
        language: Optional[str] = None,
        task: str = "transcribe",
        return_segments: bool = True
    ) -> Dict[str, Any]:
        if not self._is_loaded:
            raise RuntimeError("ASR 模型未加载")
        if not audio_path.exists():
            raise FileNotFoundError(f"音频文件不存在: {audio_path}")

        detected_lang = language or random.choice(["en", "zh"])
        result = {
            "text": "Hello, this is a test transcription. 你好，这是测试文本。",
            "detected_language": detected_lang,
            "language_confidence": 0.95,
            "duration": 120,
            "model": "mock-whisper-tiny",
            "timestamp": datetime.now().isoformat(timespec='seconds')
        }

        if return_segments:
            result["segments"] = [
                {"start": 0.0, "end": 2.5, "text": "Hello,"},
                {"start": 2.5, "end": 5.0, "text": "this is a test"},
                {"start": 5.0, "end": 8.0, "text": "transcription."}
            ]

        if settings.DEBUG:
            logger.info(f"模拟识别完成: {audio_path}")
        return result

    def is_ready(self) -> bool:
        return self._is_loaded
        
    def transcribe_realtime(self, audio_chunk: bytes, language: Optional[str] = None) -> str:
        """模拟实时音频转写"""
        if not self._is_loaded:
            raise RuntimeError("ASR 模型未加载")
            
        # 模拟识别结果
        return "这是一段模拟的实时识别文本。This is a simulated real-time transcription."


# ===================================================================
# 真实 Whisper ASR 引擎（实现 ASREngine 接口）
# ===================================================================

class WhisperASREngine(ASREngine):
    """
    基于 RealTranslationEngine 的真实 ASR 引擎
    适配 ASREngine 接口，使用新的统一翻译接口
    """

    def __init__(self):
        self.engine = RealTranslationEngine()
        self._is_loaded = self.engine.is_ready()

    def load_model(self) -> None:
        if not self._is_loaded:
            try:
                self.engine.load_model()
                self._is_loaded = True
                logger.info("WhisperASREngine: 模型加载完成")
            except Exception as e:
                logger.error(f"模型加载失败: {e}")
                raise

    def transcribe(
        self,
        audio_path: Union[str, Path],
        language: Optional[str] = None,
        task: str = "transcribe",
        return_segments: bool = True
    ) -> Dict[str, Any]:
        if not self.is_ready():
            raise RuntimeError("ASR 引擎未就绪，请先调用 load_model()")

        # 确保 audio_path 是 Path 对象
        if isinstance(audio_path, str):
            audio_path = Path(audio_path)
            
        if not audio_path.exists():
            raise FileNotFoundError(f"音频文件不存在: {audio_path}")

        try:
            logger.info(f"开始识别音频: {audio_path.name} | 语言={language}, 任务={task}")

            # 判断是否启用翻译
            enable_translation = (task == "translate")
            target_lang = "en"  # Whisper 只支持翻译为英文

            # 调用新的统一接口方法
            result = self.engine.translate_audio(
                audio_path=str(audio_path),
                source_lang=language or "auto",
                target_lang=target_lang,
                enable_translation=enable_translation,
                return_segments=return_segments
            )

            # 构建符合接口要求的输出
            output = {
                "text": result["translated_text"] if enable_translation else result["source_text"],
                "detected_language": result["source_lang"],
                "language_confidence": result.get("confidence", 0.9),
                "duration": result.get("duration", 0),
                "model": result["engine"],
                "timestamp": result["timestamp"]
            }

            # 添加分段信息（如果支持）
            if return_segments and "segments" in result:
                output["segments"] = result["segments"]

            logger.info(f"识别完成: {output['text'][:60]}...")
            return output

        except Exception as e:
            logger.error(f"识别失败 {audio_path}: {e}")
            raise

    def is_ready(self) -> bool:
        return self._is_loaded and self.engine.is_ready()

    def get_supported_languages(self) -> Dict[str, list]:
        """获取支持的语言列表"""
        return {
            "source_languages": ["auto", "zh", "en", "ja", "ko", "fr", "es", "ru", "de"],
            "target_languages": ["en"],  # Whisper 只支持翻译为英文
            "detection_supported": True
        }
        
    def transcribe_realtime(self, audio_chunk: bytes, language: Optional[str] = None) -> str:
        """
        实时转写音频块
        :param audio_chunk: 音频二进制数据
        :param language: 指定语言，None表示自动检测
        :return: 识别的文本
        """
        if not self.is_ready():
            raise RuntimeError("ASR 引擎未就绪")
            
        try:
            # 创建临时文件保存音频块
            import tempfile
            import os
            
            with tempfile.NamedTemporaryFile(suffix='.webm', delete=False) as temp:
                temp_path = temp.name
                temp.write(audio_chunk)
            
            try:
                # 使用 transcribe 方法处理音频
                result = self.transcribe(
                    audio_path=temp_path, 
                    language=language,
                    return_segments=False
                )
                recognized_text = result.get("text", "").strip()
                return recognized_text
            finally:
                # 删除临时文件
                if os.path.exists(temp_path):
                    os.remove(temp_path)
                    
        except Exception as e:
            logger.error(f"实时转写失败: {e}")
            return ""


# ===================================================================
# 全局 ASR 引擎实例（根据配置自动切换）
# ===================================================================

def create_asr_engine() -> ASREngine:
    """根据配置创建 ASR 引擎"""
    if not settings.USE_REAL_ASR:
        logger.info("使用模拟 ASR 引擎")
        return MockASREngine()
    
    # 根据引擎类型选择
    if settings.ASR_ENGINE.lower() == "paraformer":
        try:
            from core.paraformer_engine import ParaformerASREngine, is_funasr_available
            if not is_funasr_available():
                logger.warning("FunASR 未安装，回退到 Whisper 引擎")
                return WhisperASREngine()
            logger.info("使用 Paraformer ASR 引擎")
            return ParaformerASREngine()
        except ImportError as e:
            logger.warning(f"Paraformer 引擎初始化失败: {e}，回退到 Whisper 引擎")
            return WhisperASREngine()
    else:
        logger.info("使用 Whisper ASR 引擎")
        return WhisperASREngine()

# 创建全局 ASR 引擎实例
asr_engine: ASREngine = create_asr_engine()