#!/usr/bin/env python3
"""
Whisper模型API接口
"""

import os
import numpy as np
import whisper
import librosa
import torch
from typing import Dict, Any, Optional
import logging

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class WhisperAPI:
    """Whisper模型API封装"""
    
    def __init__(self, model_name: str = "base", device: Optional[str] = None, model_path: Optional[str] = None):
        """
        初始化Whisper API
        
        Args:
            model_name: 模型名称 (tiny, base, small, medium, large) 或本地路径
            device: 设备类型 (cpu, cuda)
            model_path: 本地模型路径（如果指定，优先使用）
        """
        self.model_name = model_name
        self.model_path = model_path
        self.device = device or ("cuda" if torch.cuda.is_available() else "cpu")
        self.model = None
        self._load_model()
    
    def _load_model(self):
        """加载Whisper模型"""
        try:
            # 优先使用指定的本地模型路径
            if self.model_path and os.path.exists(self.model_path):
                logger.info(f"正在加载本地Whisper模型: {self.model_path}")
                self.model = whisper.load_model(self.model_path)
            else:
                logger.info(f"正在加载Whisper模型: {self.model_name}")
                self.model = whisper.load_model(self.model_name)
            
            logger.info(f"Whisper模型加载成功，设备: {self.device}")
        except Exception as e:
            logger.error(f"Whisper模型加载失败: {e}")
            raise
    
    def load_audio(self, audio_path: str, target_sr: int = 16000) -> Optional[np.ndarray]:
        """
        加载音频文件
        
        Args:
            audio_path: 音频文件路径
            target_sr: 目标采样率
            
        Returns:
            音频数据数组
        """
        try:
            if not os.path.exists(audio_path):
                logger.error(f"音频文件不存在: {audio_path}")
                return None
            
            audio, sr = librosa.load(audio_path, sr=target_sr)
            logger.info(f"音频加载成功: {audio_path}, 长度: {len(audio)/target_sr:.2f}秒")
            return audio
        except Exception as e:
            logger.error(f"音频加载失败: {e}")
            return None
    
    def transcribe(self, audio_path: str, **kwargs) -> Dict[str, Any]:
        """
        转录音频文件
        
        Args:
            audio_path: 音频文件路径
            **kwargs: 转录参数
            
        Returns:
            转录结果字典
        """
        try:
            # 加载音频
            audio = self.load_audio(audio_path)
            if audio is None:
                return {"error": "音频加载失败"}
            
            # 执行转录
            logger.info(f"开始转录: {audio_path}")
            result = self.model.transcribe(audio, **kwargs)
            
            # 格式化结果
            formatted_result = {
                "text": result.get("text", ""),
                "language": result.get("language", "unknown"),
                "duration": result.get("duration", 0),
                "segments": result.get("segments", []),
                "success": True
            }
            
            logger.info(f"转录完成: {audio_path}")
            return formatted_result
            
        except Exception as e:
            logger.error(f"转录失败: {e}")
            return {"error": str(e), "success": False}
    
    def transcribe_audio_data(self, audio_data: np.ndarray, **kwargs) -> Dict[str, Any]:
        """
        转录音频数据
        
        Args:
            audio_data: 音频数据数组
            **kwargs: 转录参数
            
        Returns:
            转录结果字典
        """
        try:
            logger.info("开始转录音频数据")
            result = self.model.transcribe(audio_data, **kwargs)
            
            formatted_result = {
                "text": result.get("text", ""),
                "language": result.get("language", "unknown"),
                "duration": result.get("duration", 0),
                "segments": result.get("segments", []),
                "success": True
            }
            
            logger.info("音频数据转录完成")
            return formatted_result
            
        except Exception as e:
            logger.error(f"音频数据转录失败: {e}")
            return {"error": str(e), "success": False}
    
    def get_model_info(self) -> Dict[str, Any]:
        """获取模型信息"""
        if self.model is None:
            return {"error": "模型未加载"}
        
        return {
            "model_name": self.model_name,
            "device": self.device,
            "parameters": sum(p.numel() for p in self.model.parameters()),
            "model_type": type(self.model).__name__,
            "cuda_available": torch.cuda.is_available()
        }
    
    def get_available_models(self) -> list:
        """获取可用模型列表"""
        return [
            'tiny.en', 'tiny', 'base.en', 'base', 'small.en', 'small',
            'medium.en', 'medium', 'large-v1', 'large-v2', 'large-v3',
            'large', 'large-v3-turbo', 'turbo'
        ]

# 全局Whisper实例
_whisper_instance = None

def get_whisper_instance(model_name: str = "base") -> WhisperAPI:
    """
    获取Whisper实例（单例模式）
    
    Args:
        model_name: 模型名称
        
    Returns:
        WhisperAPI实例
    """
    global _whisper_instance
    if _whisper_instance is None or _whisper_instance.model_name != model_name:
        _whisper_instance = WhisperAPI(model_name)
    return _whisper_instance

def transcribe_audio(audio_path: str, model_name: str = "base", **kwargs) -> Dict[str, Any]:
    """
    便捷的音频转录函数
    
    Args:
        audio_path: 音频文件路径
        model_name: 模型名称
        **kwargs: 转录参数
        
    Returns:
        转录结果
    """
    whisper_api = get_whisper_instance(model_name)
    return whisper_api.transcribe(audio_path, **kwargs)

def transcribe_audio_data(audio_data: np.ndarray, model_name: str = "base", **kwargs) -> Dict[str, Any]:
    """
    便捷的音频数据转录函数
    
    Args:
        audio_data: 音频数据
        model_name: 模型名称
        **kwargs: 转录参数
        
    Returns:
        转录结果
    """
    whisper_api = get_whisper_instance(model_name)
    return whisper_api.transcribe_audio_data(audio_data, **kwargs)

# 测试函数
def test_whisper_api():
    """测试Whisper API"""
    print("=== Whisper API 测试 ===")
    
    # 创建API实例
    api = WhisperAPI("base")
    
    # 获取模型信息
    model_info = api.get_model_info()
    print(f"模型信息: {model_info}")
    
    # 获取可用模型
    available_models = api.get_available_models()
    print(f"可用模型: {available_models}")
    
    # 测试转录
    test_files = ["test_speech_1.wav", "test_speech_2.wav"]
    
    for test_file in test_files:
        if os.path.exists(test_file):
            print(f"\n测试文件: {test_file}")
            result = api.transcribe(test_file)
            if result.get("success"):
                print(f"转录结果: {result['text']}")
                print(f"检测语言: {result['language']}")
            else:
                print(f"转录失败: {result.get('error')}")

if __name__ == "__main__":
    import numpy as np
    test_whisper_api()
