import os
import time
import json
import uuid
from typing import Dict, Any, List, Optional, Tuple
from fastapi import HTTPException
from loguru import logger
import requests
from pydub import AudioSegment
import speech_recognition as sr

from app.models.transcription import Transcription, TranscriptionSegment
from app.utils.database import db_manager
from config import config


class ASRService:
    """自动语音识别服务"""
    
    def __init__(self):
        """初始化ASR服务"""
        self.recognizer = sr.Recognizer()
        
        # 从配置文件获取ASR服务设置
        self.primary_service = config.get_primary_asr_service()
        self.fallback_service = config.get_fallback_asr_service()
        
        # 获取各服务的配置
        self.google_config = config.get_asr_service_config("google")
        self.whisper_config = config.get_asr_service_config("whisper")
        self.azure_config = config.get_asr_service_config("azure")
        self.baidu_config = config.get_asr_service_config("baidu")
        self.aliyun_config = config.get_asr_service_config("aliyun")
        
        # 说话人识别配置
        self.speaker_diarization_config = config.SPEAKER_DIARIZATION
        
        logger.info(f"ASR服务初始化完成 - 主要服务: {self.primary_service}, 备用服务: {self.fallback_service}")
    
    def transcribe_audio(self, video_id: str, audio_path: str, language: Optional[str] = None) -> Dict[str, Any]:
        """
        转录音频文件
        
        Args:
            video_id: 视频ID
            audio_path: 音频文件路径
            language: 语言代码 (可选，自动检测)
            
        Returns:
            转录结果
        """
        start_time = time.time()
        
        try:
            # 创建转录记录
            transcription_id = self._create_transcription_record(video_id)
            
            # 更新状态为处理中
            self._update_transcription_status(transcription_id, "processing")
            
            # 获取音频时长
            audio_duration = self._get_audio_duration(audio_path)
            
            # 执行ASR转录
            transcript_result = self._perform_asr(audio_path, language)
            
            # 执行说话人识别 (暂时禁用)
            speaker_result = {"speakers": [], "segments": []}
            
            # 合并结果
            final_result = self._merge_results(transcript_result, speaker_result)
            
            # 保存转录结果
            self._save_transcription_result(
                transcription_id, 
                final_result, 
                audio_duration,
                time.time() - start_time
            )
            
            # 保存转录片段
            self._save_transcription_segments(transcription_id, final_result.get("segments", []))
            
            # 更新状态为完成
            self._update_transcription_status(transcription_id, "completed")
            
            logger.info(f"音频转录完成: {transcription_id}")
            
            return {
                'success': True,
                'transcription_id': transcription_id,
                'message': '音频转录成功',
                'result': final_result
            }
            
        except Exception as e:
            error_msg = f"音频转录失败: {str(e)}"
            logger.error(error_msg)
            
            # 更新状态为失败
            if 'transcription_id' in locals():
                self._update_transcription_status(transcription_id, "failed", error_msg)
            
            raise HTTPException(status_code=500, detail=error_msg)
    
    def get_transcription(self, transcription_id: str) -> Dict[str, Any]:
        """
        获取转录结果
        
        Args:
            transcription_id: 转录ID
            
        Returns:
            转录结果
        """
        try:
            with db_manager.get_session() as session:
                transcription = session.query(Transcription).filter(
                    Transcription.id == transcription_id
                ).first()
                
                if not transcription:
                    raise HTTPException(status_code=404, detail="转录记录不存在")
                
                # 获取转录片段
                segments = session.query(TranscriptionSegment).filter(
                    TranscriptionSegment.transcription_id == transcription_id
                ).order_by(TranscriptionSegment.start_time).all()
                
                result = transcription.to_dict()
                result['segments'] = [segment.to_dict() for segment in segments]
                
                return {
                    'success': True,
                    'transcription': result
                }
                
        except HTTPException:
            raise
        except Exception as e:
            logger.error(f"获取转录结果失败: {e}")
            raise HTTPException(status_code=500, detail=f"获取转录结果失败: {str(e)}")
    
    def get_video_transcription(self, video_id: str) -> Dict[str, Any]:
        """
        获取视频的转录结果
        
        Args:
            video_id: 视频ID
            
        Returns:
            转录结果
        """
        try:
            with db_manager.get_session() as session:
                transcription = session.query(Transcription).filter(
                    Transcription.video_id == video_id
                ).order_by(Transcription.created_at.desc()).first()
                
                if not transcription:
                    raise HTTPException(status_code=404, detail="未找到该视频的转录记录")
                
                # 获取转录片段
                segments = session.query(TranscriptionSegment).filter(
                    TranscriptionSegment.transcription_id == transcription.id
                ).order_by(TranscriptionSegment.start_time).all()
                
                result = transcription.to_dict()
                result['segments'] = [segment.to_dict() for segment in segments]
                
                return {
                    'success': True,
                    'transcription': result
                }
                
        except HTTPException:
            raise
        except Exception as e:
            logger.error(f"获取视频转录结果失败: {e}")
            raise HTTPException(status_code=500, detail=f"获取视频转录结果失败: {str(e)}")
    
    def _create_transcription_record(self, video_id: str) -> str:
        """创建转录记录"""
        transcription_id = str(uuid.uuid4())
        
        with db_manager.get_session() as session:
            transcription = Transcription(
                id=transcription_id,
                video_id=video_id,
                status="pending"
            )
            session.add(transcription)
            session.commit()
        
        return transcription_id
    
    def _update_transcription_status(self, transcription_id: str, status: str, error_message: Optional[str] = None):
        """更新转录状态"""
        with db_manager.get_session() as session:
            transcription = session.query(Transcription).filter(
                Transcription.id == transcription_id
            ).first()
            
            if transcription:
                transcription.status = status
                if error_message:
                    transcription.error_message = error_message
                session.commit()
    
    def _get_audio_duration(self, audio_path: str) -> float:
        """获取音频时长"""
        try:
            audio = AudioSegment.from_file(audio_path)
            return len(audio) / 1000.0  # 转换为秒
        except Exception as e:
            logger.warning(f"无法获取音频时长: {e}")
            return 0.0
    
    def _perform_asr(self, audio_path: str, language: Optional[str] = None) -> Dict[str, Any]:
        """执行ASR转录"""
        try:
            # 尝试主要服务
            try:
                if self.primary_service == "google":
                    return self._google_transcribe(audio_path, language)
                elif self.primary_service == "whisper" and self.whisper_config["enabled"]:
                    return self._whisper_transcribe(audio_path, language)
                elif self.primary_service == "azure" and self.azure_config["enabled"]:
                    return self._azure_transcribe(audio_path, language)
                elif self.primary_service == "baidu" and self.baidu_config["enabled"]:
                    return self._baidu_transcribe(audio_path, language)
                elif self.primary_service == "aliyun" and self.aliyun_config["enabled"]:
                    return self._aliyun_transcribe(audio_path, language)
                else:
                    raise Exception(f"主要ASR服务 {self.primary_service} 不可用")
                    
            except Exception as primary_error:
                logger.warning(f"主要ASR服务 {self.primary_service} 失败: {primary_error}")
                
                # 尝试备用服务
                if self.fallback_service != self.primary_service:
                    try:
                        if self.fallback_service == "google":
                            return self._google_transcribe(audio_path, language)
                        elif self.fallback_service == "whisper" and self.whisper_config["enabled"]:
                            return self._whisper_transcribe(audio_path, language)
                        elif self.fallback_service == "azure" and self.azure_config["enabled"]:
                            return self._azure_transcribe(audio_path, language)
                        elif self.fallback_service == "baidu" and self.baidu_config["enabled"]:
                            return self._baidu_transcribe(audio_path, language)
                        elif self.fallback_service == "aliyun" and self.aliyun_config["enabled"]:
                            return self._aliyun_transcribe(audio_path, language)
                    except Exception as fallback_error:
                        logger.error(f"备用ASR服务 {self.fallback_service} 也失败: {fallback_error}")
                
                # 最后尝试Google Speech Recognition作为兜底方案
                logger.info("使用Google Speech Recognition作为兜底方案")
                return self._google_transcribe(audio_path, language)
                
        except Exception as e:
            logger.error(f"所有ASR服务都失败: {e}")
            raise
    
    def _whisper_transcribe(self, audio_path: str, language: Optional[str] = None) -> Dict[str, Any]:
        """使用OpenAI Whisper API转录"""
        if not self.whisper_config["enabled"]:
            raise Exception("Whisper服务未启用")
            
        config_data = self.whisper_config["config"]
        headers = {
            "Authorization": f"Bearer {config_data['api_key']}"
        }
        
        data = {
            "model": config_data["model"],
            "response_format": config_data["response_format"],
            "timestamp_granularities": config_data["timestamp_granularities"]
        }
        
        if language:
            data["language"] = language
        
        with open(audio_path, "rb") as audio_file:
            files = {"file": audio_file}
            response = requests.post(config_data["api_url"], headers=headers, data=data, files=files)
        
        if response.status_code != 200:
            raise Exception(f"Whisper API错误: {response.text}")
        
        result = response.json()
        
        logger.info(f"Whisper API转录成功: {len(result.get('text', ''))}字符")
        
        return {
            "text": result.get("text", ""),
            "language": result.get("language", language or "unknown"),
            "segments": result.get("segments", []),
            "words": result.get("words", [])
        }
    
    def _google_transcribe(self, audio_path: str, language: Optional[str] = None) -> Dict[str, Any]:
        """使用Google Speech Recognition转录（免费）"""
        try:
            config_data = self.google_config["config"]
            
            with sr.AudioFile(audio_path) as source:
                # 调整音频参数以提高识别率
                if config_data.get("use_ambient_noise_reduction", True):
                    self.recognizer.adjust_for_ambient_noise(source, duration=config_data.get("ambient_duration", 0.5))
                audio = self.recognizer.record(source)
            
            # 设置语言
            lang = language or config.ASR_CONFIG["default_language"]
            lang = config.get_mapped_language(lang)
            
            # 执行转录
            text = self.recognizer.recognize_google(audio, language=lang)
            
            logger.info(f"Google Speech Recognition转录成功: {len(text)}字符")
            
            return {
                "text": text,
                "language": lang,
                "confidence": 0.8,  # Google Speech Recognition不提供置信度
                "segments": [{"start": 0, "end": len(text), "text": text, "confidence": 0.8}],
                "words": []
            }
            
        except sr.UnknownValueError:
            logger.warning("Google Speech Recognition无法识别音频内容")
            return {
                "text": "",
                "language": lang,
                "confidence": 0.0,
                "segments": [],
                "words": []
            }
        except sr.RequestError as e:
            logger.error(f"Google Speech Recognition请求失败: {e}")
            raise Exception(f"Google Speech Recognition服务错误: {e}")
    
    def _azure_transcribe(self, audio_path: str, language: Optional[str] = None) -> Dict[str, Any]:
        """使用Azure Speech Services转录"""
        if not self.azure_config["enabled"]:
            raise Exception("Azure Speech Services未启用")
            
        # TODO: 实现Azure Speech Services API调用
        logger.info("Azure Speech Services功能待实现")
        raise Exception("Azure Speech Services功能暂未实现")
    
    def _baidu_transcribe(self, audio_path: str, language: Optional[str] = None) -> Dict[str, Any]:
        """使用百度语音识别转录"""
        if not self.baidu_config["enabled"]:
            raise Exception("百度语音识别服务未启用")
            
        # TODO: 实现百度语音识别API调用
        logger.info("百度语音识别功能待实现")
        raise Exception("百度语音识别功能暂未实现")
    
    def _aliyun_transcribe(self, audio_path: str, language: Optional[str] = None) -> Dict[str, Any]:
        """使用阿里云智能语音转录"""
        if not self.aliyun_config["enabled"]:
            raise Exception("阿里云智能语音服务未启用")
            
        # TODO: 实现阿里云智能语音API调用
        logger.info("阿里云智能语音功能待实现")
        raise Exception("阿里云智能语音功能暂未实现")
    
    def _perform_speaker_diarization(self, audio_path: str) -> Dict[str, Any]:
        """执行说话人识别 (暂时禁用)"""
        logger.info("说话人识别功能暂时禁用")
        return {"speakers": [], "segments": []}
    
    def _merge_results(self, asr_result: Dict[str, Any], speaker_result: Dict[str, Any]) -> Dict[str, Any]:
        """合并ASR和说话人识别结果"""
        merged_segments = []
        
        # 简单的合并策略：将ASR片段与说话人片段对齐
        asr_segments = asr_result.get("segments", [])
        speaker_segments = speaker_result.get("segments", [])
        
        for asr_seg in asr_segments:
            segment = {
                "start_time": asr_seg.get("start", 0),
                "end_time": asr_seg.get("end", 0),
                "text": asr_seg.get("text", ""),
                "confidence": asr_seg.get("confidence", 0.0),
                "speaker_id": None,
                "language": asr_result.get("language", "unknown")
            }
            
            # 找到对应的说话人
            for spk_seg in speaker_segments:
                if (spk_seg["start"] <= segment["start_time"] <= spk_seg["end"] or
                    spk_seg["start"] <= segment["end_time"] <= spk_seg["end"]):
                    segment["speaker_id"] = spk_seg["speaker"]
                    break
            
            merged_segments.append(segment)
        
        return {
            "text": asr_result.get("text", ""),
            "language": asr_result.get("language", "unknown"),
            "confidence": asr_result.get("confidence", 0.0),
            "speakers": speaker_result.get("speakers", []),
            "segments": merged_segments,
            "word_count": len(asr_result.get("text", "").split())
        }
    
    def _save_transcription_result(self, transcription_id: str, result: Dict[str, Any], duration: float, processing_time: float):
        """保存转录结果"""
        with db_manager.get_session() as session:
            transcription = session.query(Transcription).filter(
                Transcription.id == transcription_id
            ).first()
            
            if transcription:
                transcription.transcript_text = result.get("text", "")
                transcription.language_detected = result.get("language", "unknown")
                transcription.confidence_score = result.get("confidence", 0.0)
                transcription.speaker_count = len(result.get("speakers", []))
                transcription.speaker_mapping = {"speakers": result.get("speakers", [])}
                transcription.duration = duration
                transcription.word_count = result.get("word_count", 0)
                transcription.processing_time = processing_time
                
                session.commit()
    
    def _save_transcription_segments(self, transcription_id: str, segments: List[Dict[str, Any]]):
        """保存转录片段"""
        with db_manager.get_session() as session:
            for segment_data in segments:
                segment = TranscriptionSegment(
                    transcription_id=transcription_id,
                    start_time=segment_data.get("start_time", 0),
                    end_time=segment_data.get("end_time", 0),
                    text=segment_data.get("text", ""),
                    confidence=segment_data.get("confidence", 0.0),
                    speaker_id=segment_data.get("speaker_id"),
                    speaker_label=f"Speaker {segment_data.get('speaker_id', 'Unknown')}",
                    language=segment_data.get("language", "unknown")
                )
                session.add(segment)
            
            session.commit() 