#!/usr/bin/env python3
"""
使用transformers库的本地Whisper模型端到端翻译引擎
"""

import os
import torch
import time
import logging
from typing import Dict, List
from datetime import datetime

try:
    from transformers import WhisperProcessor, WhisperForConditionalGeneration, pipeline
    import librosa
    TRANSFORMERS_AVAILABLE = True
except ImportError as e:
    TRANSFORMERS_AVAILABLE = False
    logging.warning(f"Transformers库未安装: {e}")

from core.translation_interface import TranslationEngine
from core.config import settings

logger = logging.getLogger(__name__)

class WhisperTransformersEngine(TranslationEngine):
    """使用transformers库的本地Whisper模型端到端翻译引擎"""
    
    def __init__(self, model_path: str = "models/whisper-base"):
        self.model_path = model_path
        self._is_loaded = False
        self.processor = None
        self.model = None
        self.pipeline = None
        self.device = "cuda" if torch.cuda.is_available() else "cpu"
        
        # 支持的语言
        self.supported_languages = ["zh", "en", "ja", "ko", "fr", "de", "es", "it", "pt", "ru", "ar"]
        
        self.load_model()
    
    def load_model(self):
        """加载Whisper模型"""
        if not TRANSFORMERS_AVAILABLE:
            raise ImportError("Transformers库未安装")
        
        if not os.path.exists(self.model_path):
            raise FileNotFoundError(f"模型路径不存在: {self.model_path}")
        
        try:
            logger.info(f"正在加载本地Whisper模型: {self.model_path}")
            start_time = time.time()
            
            # 加载处理器和模型
            self.processor = WhisperProcessor.from_pretrained(self.model_path)
            self.model = WhisperForConditionalGeneration.from_pretrained(self.model_path)
            self.model = self.model.to(self.device)
            
            # 创建pipeline
            self.pipeline = pipeline(
                "automatic-speech-recognition",
                model=self.model_path,
                device=0 if self.device == "cuda" else -1
            )
            
            load_time = time.time() - start_time
            logger.info(f"✅ Whisper模型加载成功，耗时: {load_time:.2f}秒")
            
            self._is_loaded = True
            
        except Exception as e:
            logger.error(f"❌ Whisper模型加载失败: {e}")
            raise
    
    def translate_audio(
        self, 
        audio_path: str, 
        source_lang: str = "auto",
        target_lang: str = "en",
        enable_translation: bool = True,
        return_segments: bool = False
    ) -> Dict:
        """音频转录和翻译"""
        if not self._is_loaded:
            raise RuntimeError("模型未加载")
        
        try:
            start_time = time.time()
            
            # 加载音频
            audio, sr = librosa.load(audio_path, sr=16000)
            duration = len(audio) / sr
            
            # 处理音频
            input_features = self.processor(audio, sampling_rate=16000, return_tensors="pt").input_features
            input_features = input_features.to(self.device)
            
            # 语言代码映射
            lang_map = {
                "auto": "chinese",  # 默认为中文
                "zh": "chinese",
                "en": "english", 
                "ja": "japanese",
                "ko": "korean",
                "fr": "french",
                "de": "german",
                "es": "spanish",
                "it": "italian",
                "pt": "portuguese",
                "ru": "russian",
                "ar": "arabic"
            }
            
            # 转录
            if enable_translation and source_lang != target_lang:
                task = "translate"
                language = lang_map.get(target_lang, "english")
            else:
                task = "transcribe"
                language = lang_map.get(source_lang, "chinese")
            
            # 生成转录/翻译
            forced_decoder_ids = self.processor.get_decoder_prompt_ids(language=language, task=task)
            predicted_ids = self.model.generate(input_features, forced_decoder_ids=forced_decoder_ids)
            transcription = self.processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]
            
            # 处理结果
            source_text = transcription
            translated_text = transcription
            
            if enable_translation and source_lang != target_lang:
                # 获取原文转录
                source_language = lang_map.get(source_lang, "chinese")
                forced_decoder_ids = self.processor.get_decoder_prompt_ids(language=source_language, task="transcribe")
                source_ids = self.model.generate(input_features, forced_decoder_ids=forced_decoder_ids)
                source_text = self.processor.batch_decode(source_ids, skip_special_tokens=True)[0]
                translated_text = transcription
            
            process_time = time.time() - start_time
            
            result = {
                "source_text": source_text,
                "translated_text": translated_text,
                "source_lang": source_lang,
                "target_lang": target_lang,
                "confidence": 0.95,
                "duration": duration,
                "engine": "whisper-transformers",
                "timestamp": datetime.now().isoformat(timespec='seconds'),
                "processing_time": process_time
            }
            
            if return_segments:
                result["segments"] = [
                    {"id": 0, "start": 0.0, "end": duration, "text": source_text}
                ]
            
            logger.info(f"✅ 音频处理完成，耗时: {process_time:.2f}秒")
            return result
            
        except Exception as e:
            logger.error(f"❌ 音频处理失败: {e}")
            raise
    
    def translate_text(self, text: str, source_lang: str = "auto", target_lang: str = "en") -> Dict:
        """文本翻译（Whisper主要用于音频）"""
        return {
            "source_text": text,
            "translated_text": text,
            "source_lang": source_lang,
            "target_lang": target_lang,
            "confidence": 0.8,
            "engine": "whisper-transformers",
            "timestamp": datetime.now().isoformat(timespec='seconds'),
            "note": "Whisper主要用于音频转录和翻译"
        }
    
    def batch_translate_text(self, texts: List[str], source_lang: str = "auto", target_lang: str = "en") -> List[Dict]:
        """批量文本翻译"""
        results = []
        for text in texts:
            try:
                result = self.translate_text(text, source_lang, target_lang)
                results.append(result)
            except Exception as e:
                results.append({
                    "source_text": text,
                    "translated_text": f"[翻译失败: {str(e)}]",
                    "source_lang": source_lang,
                    "target_lang": target_lang,
                    "confidence": 0.0,
                    "engine": "whisper-transformers",
                    "timestamp": datetime.now().isoformat(timespec='seconds')
                })
        return results
    
    def get_supported_languages(self) -> Dict[str, list]:
        """获取支持的语言列表"""
        return {
            "source_languages": ["auto"] + self.supported_languages,
            "target_languages": self.supported_languages,
            "detection_supported": True
        }
    
    def is_ready(self) -> bool:
        return self._is_loaded
    
    # 兼容性方法
    def translate(self, text: str, src_lang: str = "auto", tgt_lang: str = "en") -> Dict:
        return self.translate_text(text, src_lang, tgt_lang)
    
    def transcribe(self, audio_path: str, language: str = "auto") -> Dict:
        result = self.translate_audio(audio_path, language, language, enable_translation=False)
        return {
            "text": result["source_text"],
            "language": result["source_lang"],
            "duration": result["duration"],
            "confidence": result["confidence"]
        }
