"""
对话处理服务 - 主要业务逻辑
"""

import asyncio
import json
import logging
import time
import uuid
from pathlib import Path
from typing import Dict, Optional, Tuple, Any
from datetime import datetime
import aiofiles
import subprocess

from ..core.config import settings
from ..core.logging import setup_task_logging, get_performance_logger
from ..models.models import TaskStatus, ProcessingMode, ProcessingResult, FileInfo
from ..stages import Stage1Processor, Stage2SemanticAnalyzer, Stage3ReportGenerator

logger = logging.getLogger(__name__)
perf_logger = get_performance_logger()

class DialogueProcessingService:
    """对话处理服务"""
    
    def __init__(self):
        self.tasks: Dict[str, Dict] = {}  # 任务存储
        self.active_tasks = 0
        self.stats = {
            "total_tasks": 0,
            "completed_tasks": 0,
            "failed_tasks": 0,
            "total_processing_time": 0.0,
            "total_audio_duration": 0.0
        }
    
    async def create_task(
        self, 
        file_path: Path, 
        original_filename: str,
        processing_mode: ProcessingMode = ProcessingMode.AUTO,
        chunk_size: int = 900,
        timeout: int = 3600
    ) -> str:
        """创建处理任务"""
        task_id = str(uuid.uuid4())
        
        # 获取文件信息
        file_info = await self._get_file_info(file_path, original_filename)
        
        # 创建任务记录
        task_data = {
            "task_id": task_id,
            "status": TaskStatus.PENDING,
            "file_path": str(file_path),
            "original_filename": original_filename,
            "file_info": file_info,
            "processing_mode": processing_mode,
            "chunk_size": chunk_size,
            "timeout": timeout,
            "created_at": datetime.now(),
            "updated_at": None,
            "progress": 0.0,
            "stages": {},
            "result": None,
            "error_message": None
        }
        
        self.tasks[task_id] = task_data
        self.stats["total_tasks"] += 1
        
        logger.info(f"创建任务: {task_id}, 文件: {original_filename}")
        return task_id
    
    async def process_task(self, task_id: str) -> Dict:
        """处理任务"""
        if task_id not in self.tasks:
            raise ValueError(f"任务不存在: {task_id}")
        
        task_data = self.tasks[task_id]
        
        if task_data["status"] != TaskStatus.PENDING:
            raise ValueError(f"任务状态错误: {task_data['status']}")
        
        # 检查并发任务数
        if self.active_tasks >= settings.MAX_CONCURRENT_TASKS:
            raise ValueError("达到最大并发任务数限制")
        
        # 设置任务日志记录器
        task_logger = setup_task_logging(task_id)
        
        self.active_tasks += 1
        
        try:
            # 更新状态
            await self._update_task_status(task_id, TaskStatus.PROCESSING, 0.0)
            task_logger.info("任务开始处理")
            
            # 开始处理
            start_time = time.time()
            result = await self._execute_processing_pipeline(task_id, task_logger)
            processing_time = time.time() - start_time
            
            # 记录性能指标
            perf_logger.info(f"Task {task_id} completed in {processing_time:.2f}s")
            
            # 更新统计信息
            self.stats["completed_tasks"] += 1
            self.stats["total_processing_time"] += processing_time
            if "duration" in task_data["file_info"]:
                self.stats["total_audio_duration"] += task_data["file_info"]["duration"]
            
            # 更新任务结果
            task_data["result"] = result
            task_data["processing_time"] = processing_time
            task_data["completed_at"] = datetime.now()
            
            await self._update_task_status(task_id, TaskStatus.COMPLETED, 100.0)
            
            task_logger.info(f"任务处理完成，耗时: {processing_time:.2f}秒")
            logger.info(f"任务完成: {task_id}, 耗时: {processing_time:.2f}秒")
            return result
            
        except Exception as e:
            task_logger.error(f"任务处理失败: {str(e)}")
            logger.error(f"任务处理失败: {task_id}, 错误: {str(e)}")
            
            # 更新错误信息
            task_data["error_message"] = str(e)
            self.stats["failed_tasks"] += 1
            
            await self._update_task_status(task_id, TaskStatus.FAILED, task_data.get("progress", 0.0))
            raise
            
        finally:
            self.active_tasks -= 1
    
    async def _execute_processing_pipeline(self, task_id: str, task_logger) -> Dict:
        """执行处理流水线"""
        task_data = self.tasks[task_id]
        file_path = Path(task_data["file_path"])
        output_dir = settings.OUTPUT_DIR / task_id
        output_dir.mkdir(parents=True, exist_ok=True)
        
        stages_result = {}
        
        try:
            # Stage 1: 语音转录
            task_logger.info("开始Stage1语音转录")
            await self._update_task_progress(task_id, 10.0, "开始语音转录...")
            
            stage1_processor = Stage1Processor(file_path, output_dir)
            stage1_result = stage1_processor.process()
            stages_result["stage1"] = stage1_result
            task_data["stages"]["stage1"] = stage1_result
            
            await self._update_task_progress(task_id, 40.0, "语音转录完成")
            task_logger.info("Stage1转录完成")
            
            # Stage 2: 语义分析
            task_logger.info("开始Stage2语义分析")
            await self._update_task_progress(task_id, 50.0, "开始语义分析...")
            
            stage2_analyzer = Stage2SemanticAnalyzer(
                stage1_result, 
                output_dir, 
                mode=task_data["processing_mode"]
            )
            stage2_result = stage2_analyzer.process()
            stages_result["stage2"] = stage2_result
            task_data["stages"]["stage2"] = stage2_result
            
            await self._update_task_progress(task_id, 75.0, "语义分析完成")
            task_logger.info("Stage2分析完成")
            
            # Stage 3: 报告生成
            task_logger.info("开始Stage3报告生成")
            await self._update_task_progress(task_id, 85.0, "开始生成报告...")
            
            stage3_generator = Stage3ReportGenerator(stage2_result, output_dir)
            stage3_result = stage3_generator.process()
            stages_result["stage3"] = stage3_result
            task_data["stages"]["stage3"] = stage3_result
            
            await self._update_task_progress(task_id, 95.0, "报告生成完成")
            task_logger.info("Stage3报告生成完成")
            
            # 构建最终结果
            final_result = self._build_final_result(task_id, stages_result)
            
            await self._update_task_progress(task_id, 100.0, "处理完成")
            task_logger.info("整体处理流程完成")
            return final_result
            
        except Exception as e:
            task_logger.error(f"处理流水线失败: {str(e)}")
            logger.error(f"任务 {task_id} 处理流水线失败: {str(e)}")
            await self._update_task_progress(task_id, task_data.get("progress", 0.0), f"处理失败: {str(e)}")
            raise
    
    def _build_final_result(self, task_id: str, stages_result: Dict) -> Dict:
        """构建最终结果"""
        task_data = self.tasks[task_id]
        
        result = {
            "task_id": task_id,
            "status": TaskStatus.COMPLETED,
            "file_info": task_data["file_info"],
            "processing_time": task_data.get("processing_time", 0.0),
            "stages": stages_result,
            "created_at": task_data["created_at"],
            "completed_at": task_data.get("completed_at")
        }
        
        # Stage1 结果
        if "stage1" in stages_result:
            stage1_data = stages_result["stage1"]["data"]
            result.update({
                "transcript_file": stages_result["stage1"]["stage1_file"],
                "total_segments": stage1_data["total_segments"],
                "speaker_count": stage1_data["speaker_count"]
            })
        
        # Stage2 结果
        if "stage2" in stages_result:
            stage2_data = stages_result["stage2"]["data"]
            result.update({
                "analysis_file": stages_result["stage2"]["stage2_file"],
                "prd_file": stages_result["stage2"].get("prd_file"),
                "meeting_notes_file": stages_result["stage2"].get("meeting_notes_file"),
                "total_requirements": stage2_data["total_requirements"],
                "requirements": stage2_data["requirements"],
                "conflicts": stage2_data.get("requirement_conflicts", [])
            })
            
            # 提取说话人信息
            speakers = []
            for speaker_id, role in stage2_data["speaker_roles"].items():
                segments_count = sum(1 for req in stage2_data["requirements"] if req["speaker"] == speaker_id)
                speakers.append({
                    "speaker_id": speaker_id,
                    "role": role,
                    "segments_count": segments_count
                })
            result["speakers"] = speakers
        
        # Stage3 结果
        if "stage3" in stages_result:
            stage3_data = stages_result["stage3"]["data"]
            result.update({
                "report_files": stage3_data["generated_files"],
                "final_summary": stage3_data["summary"]
            })
        
        return result
    
    async def get_task_status(self, task_id: str, include_details: bool = False) -> Dict:
        """获取任务状态"""
        if task_id not in self.tasks:
            raise ValueError(f"任务不存在: {task_id}")
        
        task_data = self.tasks[task_id]
        
        result = {
            "task_id": task_id,
            "status": task_data["status"],
            "progress": task_data["progress"],
            "message": task_data.get("message", ""),
            "created_at": task_data["created_at"],
            "updated_at": task_data.get("updated_at")
        }
        
        if task_data["status"] == TaskStatus.FAILED:
            result["error_message"] = task_data.get("error_message")
        
        if include_details and task_data["status"] == TaskStatus.COMPLETED:
            result["result"] = task_data.get("result")
        
        return result
    
    async def get_task_result(self, task_id: str) -> Dict:
        """获取任务结果"""
        if task_id not in self.tasks:
            raise ValueError(f"任务不存在: {task_id}")
        
        task_data = self.tasks[task_id]
        
        if task_data["status"] != TaskStatus.COMPLETED:
            raise ValueError(f"任务未完成: {task_data['status']}")
        
        return task_data["result"]
    
    async def get_file_content(self, task_id: str, file_type: str) -> Tuple[str, str]:
        """获取文件内容"""
        if task_id not in self.tasks:
            raise ValueError(f"任务不存在: {task_id}")
        
        task_data = self.tasks[task_id]
        if task_data["status"] != TaskStatus.COMPLETED:
            raise ValueError(f"任务未完成: {task_data['status']}")
        
        result = task_data["result"]
        file_path = None
        
        # 根据文件类型获取路径
        if file_type == "transcript":
            file_path = result.get("transcript_file")
        elif file_type == "analysis":
            file_path = result.get("analysis_file")
        elif file_type == "prd":
            file_path = result.get("prd_file")
        elif file_type == "meeting_notes":
            file_path = result.get("meeting_notes_file")
        elif file_type in result.get("report_files", {}):
            file_path = result["report_files"][file_type]
        
        if not file_path or not Path(file_path).exists():
            raise ValueError(f"文件不存在: {file_type}")
        
        # 读取文件内容
        async with aiofiles.open(file_path, 'r', encoding='utf-8') as f:
            content = await f.read()
        
        # 确定 MIME 类型
        mime_type = "text/markdown" if file_path.endswith('.md') else "application/json"
        
        return content, mime_type
    
    async def _get_file_info(self, file_path: Path, original_filename: str) -> Dict:
        """获取文件信息"""
        file_info = {
            "filename": original_filename,
            "size": file_path.stat().st_size,
            "format": file_path.suffix.lower()
        }
        
        # 尝试获取音频时长
        try:
            duration = await self._get_audio_duration(file_path)
            file_info["duration"] = duration
        except Exception as e:
            logger.warning(f"无法获取音频时长: {str(e)}")
        
        return file_info
    
    async def _get_audio_duration(self, file_path: Path) -> float:
        """获取音频时长"""
        try:
            result = await asyncio.create_subprocess_exec(
                'ffprobe', '-v', 'error', '-show_entries', 
                'format=duration', '-of', 'default=noprint_wrappers=1:nokey=1',
                str(file_path),
                stdout=asyncio.subprocess.PIPE,
                stderr=asyncio.subprocess.PIPE
            )
            stdout, stderr = await result.communicate()
            return float(stdout.decode().strip())
        except:
            # 回退方案
            import wave
            with wave.open(str(file_path), 'rb') as wav_file:
                frames = wav_file.getnframes()
                rate = wav_file.getframerate()
                return frames / float(rate)
    
    async def _update_task_status(self, task_id: str, status: TaskStatus, progress: float):
        """更新任务状态"""
        if task_id in self.tasks:
            self.tasks[task_id]["status"] = status
            self.tasks[task_id]["progress"] = progress
            self.tasks[task_id]["updated_at"] = datetime.now()
    
    async def _update_task_progress(self, task_id: str, progress: float, message: str = ""):
        """更新任务进度"""
        if task_id in self.tasks:
            self.tasks[task_id]["progress"] = progress
            self.tasks[task_id]["message"] = message
            self.tasks[task_id]["updated_at"] = datetime.now()
    
    def get_stats(self) -> Dict:
        """获取服务统计信息"""
        stats = self.stats.copy()
        stats["active_tasks"] = self.active_tasks
        
        if stats["completed_tasks"] > 0:
            stats["average_processing_time"] = stats["total_processing_time"] / stats["completed_tasks"]
        else:
            stats["average_processing_time"] = 0.0
        
        return stats
    
    def cleanup_old_tasks(self, days: int = 7):
        """清理旧任务"""
        cutoff_time = datetime.now().timestamp() - (days * 24 * 3600)
        
        to_remove = []
        for task_id, task_data in self.tasks.items():
            if task_data["created_at"].timestamp() < cutoff_time:
                to_remove.append(task_id)
        
        for task_id in to_remove:
            # 清理输出文件
            try:
                output_dir = settings.OUTPUT_DIR / task_id
                if output_dir.exists():
                    import shutil
                    shutil.rmtree(output_dir)
            except Exception as e:
                logger.warning(f"清理任务文件失败 {task_id}: {str(e)}")
            
            # 移除任务记录
            del self.tasks[task_id]
        
        logger.info(f"清理了 {len(to_remove)} 个旧任务")

# 创建全局服务实例
dialogue_service = DialogueProcessingService()