# core/real_translation_engine.py

import logging
import os
from pathlib import Path
from typing import Dict, Optional, List
from datetime import datetime
import tempfile
import wave
import numpy as np

# 正确导入 faster-whisper
from faster_whisper import WhisperModel

from core.config import settings
from core.translation_interface import TranslationEngine

logger = logging.getLogger(__name__)


class RealTranslationEngine(TranslationEngine):
    """
    基于 faster-whisper 的统一翻译引擎
    功能：
        - 音频转录为原文（支持自动语言检测）
        - 音频直接翻译为目标语言
        - 文本翻译（通过TTS+ASR实现）
    注意：
        - 此为 ASR + 翻译引擎，支持音频和文本输入
        - 模型路径、设备等由 settings 控制
        - 支持 CPU + int8 量化，适合无 GPU 环境
    """

    def __init__(self):
        self.model = None
        self._is_loaded = False

        # 从配置中读取参数
        self.model_name = settings.ASR_MODEL_NAME
        self.model_root_dir = settings.ASR_MODEL_ROOT_DIR
        self.device = settings.ASR_DEVICE
        self.language = settings.ASR_LANGUAGE  # 默认语言

        # CPU 优化参数（可从 settings 提取）
        self.cpu_threads = getattr(settings, "ASR_CPU_THREADS", os.cpu_count())
        self.compute_type = getattr(settings, "ASR_COMPUTE_TYPE", "int8")  # int8, int16, float16, float32
        
        # 自动加载模型
        self.load_model()

    def load_model(self):
        """加载 faster-whisper 模型"""
        if self._is_loaded:
            return

        try:
            logger.info(
                f"正在加载 faster-whisper 模型: {self.model_name} "
                f"| 设备: {self.device} "
                f"| 量化: {self.compute_type} "
                f"| 路径: {self.model_root_dir}"
            )

            # 使用 WhisperModel 类加载
            self.model = WhisperModel(
                self.model_name,
                device=self.device,
                compute_type=self.compute_type,
                cpu_threads=self.cpu_threads,
                download_root=str(self.model_root_dir)
            )
            self._is_loaded = True
            logger.info(f"faster-whisper 模型 '{self.model_name}' 加载成功，运行在 {self.device.upper()}")
        except Exception as e:
            logger.error(f"模型加载失败: {e}")
            raise

    def is_ready(self) -> bool:
        """检查模型是否已加载"""
        return self._is_loaded and self.model is not None

    def _text_to_speech_simple(self, text: str, sample_rate: int = 16000) -> bytes:
        """
        简单的文本转语音实现（用于文本翻译）
        这里使用一个简单的TTS方法，实际项目中可以集成更专业的TTS引擎
        """
        try:
            # 使用edge-tts或其他TTS引擎
            import edge_tts
            import asyncio
            
            async def generate_speech():
                communicate = edge_tts.Communicate(text, "zh-CN-XiaoxiaoNeural")
                audio_data = b""
                async for chunk in communicate.stream():
                    if chunk["type"] == "audio":
                        audio_data += chunk["data"]
                return audio_data
            
            # 运行异步函数
            loop = asyncio.new_event_loop()
            asyncio.set_event_loop(loop)
            try:
                audio_data = loop.run_until_complete(generate_speech())
            finally:
                loop.close()
            
            return audio_data
            
        except ImportError:
            logger.warning("edge-tts未安装，使用模拟音频数据")
            # 如果没有TTS引擎，创建模拟音频数据
            duration = len(text) * 0.1  # 每个字符0.1秒
            samples = int(duration * sample_rate)
            audio_data = np.zeros(samples, dtype=np.int16)
            
            # 创建WAV格式的音频数据
            with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as temp_file:
                with wave.open(temp_file.name, 'wb') as wav_file:
                    wav_file.setnchannels(1)  # 单声道
                    wav_file.setsampwidth(2)  # 16位
                    wav_file.setframerate(sample_rate)
                    wav_file.writeframes(audio_data.tobytes())
                
                # 读取音频数据
                with open(temp_file.name, 'rb') as f:
                    audio_bytes = f.read()
                
                # 清理临时文件
                try:
                    os.unlink(temp_file.name)
                except (OSError, PermissionError):
                    # 如果删除失败，忽略错误
                    pass
            
            return audio_bytes

    def translate_text(
        self, 
        text: str, 
        source_lang: str = "auto", 
        target_lang: str = "en"
    ) -> Dict:
        """
        文本翻译接口（通过TTS+ASR实现）
        
        Args:
            text: 源文本
            source_lang: 源语言，'auto' 表示自动检测
            target_lang: 目标语言
            
        Returns:
            Dict: 包含翻译结果的字典
        """
        if not self.is_ready():
            raise RuntimeError("模型未加载，请先调用 load_model()")

        if not text.strip():
            raise ValueError("文本不能为空")

        try:
            logger.info(f"开始文本翻译: {text[:50]}... | 源={source_lang}, 目标={target_lang}")
            
            # 检测源语言
            detected_lang = self._detect_language_from_text(text) if source_lang == "auto" else source_lang
            
            # 如果目标语言是英文，使用Whisper的翻译功能
            if target_lang.lower() == "en":
                # 将文本转换为音频
                audio_data = self._text_to_speech_simple(text)
                
                # 保存为临时音频文件
                with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as temp_audio:
                    temp_audio.write(audio_data)
                    temp_audio_path = temp_audio.name
                
                try:
                    # 使用Whisper进行翻译
                    segments, info = self.model.transcribe(
                        temp_audio_path,
                        task="translate",  # 翻译任务
                        language=detected_lang,
                        beam_size=5,
                        vad_filter=False
                    )
                    
                    # 提取翻译结果
                    translated_text = "".join(segment.text for segment in segments).strip()
                    
                    # 如果没有翻译结果，尝试使用转录任务
                    if not translated_text:
                        segments, info = self.model.transcribe(
                            temp_audio_path,
                            task="transcribe",
                            language=detected_lang,
                            beam_size=5,
                            vad_filter=False
                        )
                        translated_text = "".join(segment.text for segment in segments).strip()
                    
                finally:
                    # 清理临时文件
                    try:
                        os.unlink(temp_audio_path)
                    except (OSError, PermissionError):
                        # 如果删除失败，忽略错误
                        pass
            else:
                # 对于非英文目标语言，暂时返回原文（可以后续集成其他翻译模型）
                translated_text = text
                logger.warning(f"暂不支持翻译到 {target_lang}，返回原文")
            
            # 构建结果
            result = {
                "source_text": text,
                "translated_text": translated_text,
                "source_lang": detected_lang,
                "target_lang": target_lang,
                "confidence": 0.9,  # 默认置信度
                "engine": f"faster-whisper-{self.model_name}",
                "timestamp": datetime.now().isoformat(timespec='seconds')
            }
            
            logger.info(f"文本翻译完成 | 源语言: {detected_lang} | 翻译: {translated_text[:50]}...")
            return result
            
        except Exception as e:
            logger.error(f"文本翻译失败: {e}")
            raise

    def translate_audio(
        self, 
        audio_path: str, 
        source_lang: str = "auto",
        target_lang: str = "en",
        enable_translation: bool = True,
        return_segments: bool = False
    ) -> Dict:
        """
        音频翻译接口
        
        Args:
            audio_path: 音频文件路径
            source_lang: 源语言，'auto' 表示自动检测
            target_lang: 目标语言
            enable_translation: 是否启用翻译
            return_segments: 是否返回分段信息
            
        Returns:
            Dict: 包含翻译结果的字典
        """
        if not self.is_ready():
            raise RuntimeError("模型未加载，请先调用 load_model()")

        audio_path = Path(audio_path)
        if not audio_path.exists():
            raise FileNotFoundError(f"音频文件不存在: {audio_path}")

        try:
            logger.info(
                f"开始处理音频: {audio_path.name} | "
                f"源={source_lang}, 翻译={enable_translation}, 分段={return_segments}"
            )

            # 自动检测语言
            detect_lang = None if source_lang == "auto" else source_lang

            # 执行 ASR（语音识别）
            segments, info = self.model.transcribe(
                str(audio_path),
                language=detect_lang,
                task="transcribe",
                beam_size=5,
                vad_filter=False  # 禁用静音过滤
            )

            # 提取文本（segments 是 generator，需遍历）
            source_text = "".join(segment.text for segment in segments).strip()
            detected_lang = info.language

            # 执行翻译（仅支持英文）
            translated_text = source_text
            if enable_translation and target_lang.lower() == "en":
                logger.debug("正在翻译为英文...")
                translate_segments, _ = self.model.transcribe(
                    str(audio_path),
                    task="translate",
                    language=detected_lang,
                    beam_size=5,
                    vad_filter=False
                )
                translated_text = "".join(segment.text for segment in translate_segments).strip()

            # 构建结果
            result = {
                "source_text": source_text,
                "translated_text": translated_text,
                "source_lang": detected_lang,
                "target_lang": target_lang,
                "confidence": 0.95,  # 默认置信度
                "duration": info.duration,
                "engine": f"faster-whisper-{self.model_name}",
                "timestamp": datetime.now().isoformat(timespec='seconds'),
                "segments": []
            }

            # 添加时间分段
            if return_segments:
                segments_list = []
                for i, segment in enumerate(segments):
                    segments_list.append({
                        "id": i,
                        "start": round(segment.start, 3),
                        "end": round(segment.end, 3),
                        "text": segment.text.strip()
                    })
                result["segments"] = segments_list

            logger.info(f"音频翻译完成 | 原文: {source_text[:60]}... → 翻译: {translated_text[:60]}...")
            return result

        except Exception as e:
            logger.error(f"音频处理失败 {audio_path}: {e}")
            raise

    def batch_translate_text(self, texts: List[str], source_lang: str = "auto", target_lang: str = "en") -> List[Dict]:
        """批量文本翻译"""
        results = []
        for text in texts:
            try:
                result = self.translate_text(text, source_lang, target_lang)
                results.append(result)
            except Exception as e:
                logger.error(f"批量翻译失败，文本: {text[:30]}..., 错误: {e}")
                # 返回错误结果
                results.append({
                    "source_text": text,
                    "translated_text": f"[翻译失败: {str(e)}]",
                    "source_lang": source_lang,
                    "target_lang": target_lang,
                    "confidence": 0.0,
                    "engine": f"faster-whisper-{self.model_name}",
                    "timestamp": datetime.now().isoformat(timespec='seconds')
                })
        return results

    def get_supported_languages(self) -> Dict[str, list]:
        """获取支持的语言列表"""
        return {
            "source_languages": ["auto", "zh", "en", "ja", "ko", "fr", "de", "es", "it", "pt", "ru", "ar"],
            "target_languages": ["en"],  # Whisper 只支持翻译为英文
            "detection_supported": True
        }

    def _detect_language_from_text(self, text: str) -> str:
        """从文本检测语言（简单实现）"""
        # 简单的语言检测逻辑
        chinese_chars = sum(1 for char in text if '\u4e00' <= char <= '\u9fff')
        if chinese_chars > len(text) * 0.3:
            return "zh"
        elif any(word in text.lower() for word in ["hello", "world", "test", "the", "is", "and"]):
            return "en"
        else:
            return "en"  # 默认英文

    # --------------------------------------------------
    # 兼容方法：旧版接口（建议迁移）
    # --------------------------------------------------
    def transcribe_audio(self, audio_path: str, source_lang: str = "auto", target_lang: str = "en", 
                        enable_translation: bool = False, return_segments: bool = True) -> Dict:
        """
        已弃用：请使用 translate_audio()
        保留用于兼容旧代码
        """
        logger.warning("[Deprecated] 'transcribe_audio()' 方法已弃用，建议使用 'translate_audio()'")
        return self.translate_audio(
            audio_path=audio_path,
            source_lang=source_lang,
            target_lang=target_lang,
            enable_translation=enable_translation,
            return_segments=return_segments
        )

    def translate(self, audio_path: str, source_lang: str = "auto", target_lang: str = "en"):
        """
        已弃用：请使用 translate_audio()
        保留用于兼容旧代码
        """
        logger.warning("[Deprecated] 'translate()' 方法已弃用，建议使用 'translate_audio()'")
        return self.translate_audio(
            audio_path=audio_path,
            source_lang=source_lang,
            target_lang=target_lang,
            enable_translation=True,
            return_segments=True
        )