"""
研究服务
提供研究助理功能的统一接口
"""

import subprocess
import os
from typing import Dict, Any, Optional, List
import asyncio
from datetime import datetime

from src.research_core.workflow_manager import WorkflowManager
from src.research_core.cache_manager import cache_manager
from src.services.intelligent_decision_service import get_intelligent_decision_service
from src.utils.logging import get_logger
from src.config.settings import settings

logger = get_logger(__name__)

class ResearchService:
    """研究服务类"""
    
    def __init__(self):
        """初始化研究服务"""
        self.workflow_manager = WorkflowManager()
        self.cache_manager = cache_manager
        self.intelligent_decision_service = get_intelligent_decision_service()
        # 初始化MCP进程字典
        self.mcp_processes: Dict[str, subprocess.Popen] = {}
        
        # 启动MCP服务器
        self._start_mcp_servers()
        
        logger.info("研究服务初始化完成")
    
    def _start_mcp_servers(self):
        """启动配置的MCP服务器"""
        if not settings.MCP_ENABLED:
            return
        
        for server_name in settings.MCP_SERVERS:
            server_name = server_name.strip()
            if server_name and settings.is_mcp_server_enabled(server_name):
                try:
                    # 根据服务器类型启动对应的服务
                    if server_name == "search-server":
                        self._start_search_server()
                    # 可以在这里添加更多服务器类型
                except Exception as e:
                    logger.error(f"启动MCP服务器 {server_name} 失败: {e}")
    
    def _start_search_server(self):
        """启动搜索服务器"""
        # 这里应该根据实际的搜索服务器实现来启动进程
        # 示例代码，实际实现可能需要根据具体情况调整
        try:
            # 示例：启动一个子进程
            # process = subprocess.Popen(["python", "path/to/search_server.py"])
            # self.mcp_processes["search-server"] = process
            pass
        except Exception as e:
            logger.error(f"启动搜索服务器失败: {e}")
    
    def __del__(self):
        """析构函数，清理MCP进程"""
        for server_name, process in self.mcp_processes.items():
            try:
                if process and process.poll() is None:  # 进程仍在运行
                    process.terminate()
                    process.wait(timeout=5)  # 等待最多5秒
            except Exception as e:
                logger.error(f"终止MCP进程 {server_name} 失败: {e}")
    
    async def execute_research(self, question: str, 
                             user_id: Optional[int] = None,
                             workflow_type: Optional[str] = None,
                             context: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
        """
        执行研究任务
        
        Args:
            question: 研究问题
            user_id: 用户ID（可选）
            workflow_type: 工作流类型（可选）
            context: 上下文信息（可选）
            
        Returns:
            Dict[str, Any]: 研究结果
        """
        start_time = datetime.utcnow()
        
        try:
            # 构建上下文
            context = context or {}
            
            # 检查缓存
            cache_key = self._generate_cache_key(question, workflow_type)
            cached_result = self.cache_manager.get(cache_key)
            if cached_result:
                logger.info(f"缓存命中: {question[:50]}...")
                return {
                    **cached_result,
                    "from_cache": True,
                    "execution_time": 0
                }
            
            # 智能选择工作流（如果没有指定）
            if not workflow_type and user_id:
                workflow_type = self.intelligent_decision_service.select_optimal_workflow(
                    user_id, question, context
                )
                logger.info(f"智能选择工作流: {workflow_type}")
            
            # 默认使用多代理工作流
            if not workflow_type:
                workflow_type = "multi-agent"
            
            # 获取工作流
            workflow = self.workflow_manager.get_workflow(workflow_type)
            
            # 编译工作流（如果尚未编译）
            from langchain_core.runnables import RunnableConfig
            # 增加递归限制到50，以避免递归限制错误
            config = RunnableConfig(recursion_limit=50)
            
            # 设置超时时间
            # 先编译工作流确保它是可执行的
            # 检查workflow是否已经编译（通过检查是否存在ainvoke方法）
            from langgraph.graph.state import CompiledStateGraph
            
            if isinstance(workflow, CompiledStateGraph):
                # 已经是编译后的工作流
                compiled_app = workflow  
            else:
                # 需要编译工作流
                compiled_app = workflow.compile()
            
            # 根据环境变量设置超时时间，默认为300秒（5分钟）
            timeout = int(os.getenv("RESEARCH_WORKFLOW_TIMEOUT", 300))
            print(f"⏳ 工作流执行超时时间设置为: {timeout}秒")
            
            try:
                result = await asyncio.wait_for(
                    compiled_app.ainvoke({"question": question}, config=config),  
                    timeout=timeout
                )
            except asyncio.TimeoutError:
                logger.warning(f"工作流执行超时: {question[:50]}...")
                # 记录超时交互
                raise Exception(f"工作流执行超时（{timeout}秒），请稍后重试或尝试更简单的问题")
            except asyncio.CancelledError:
                logger.warning(f"工作流执行被取消: {question[:50]}...")
                # 记录取消的交互
                # 重新抛出CancelledError而不是普通Exception，以便调用者可以正确处理
                raise
            
            # 计算执行时间
            execution_time = (datetime.utcnow() - start_time).total_seconds()
            
            # 构建结果
            final_result = {
                "question": question,
                "answer": result.get("final_answer", result.get("answer", "")),
                "analysis": result.get("analysis_results", result.get("analysis", {})),
                "search_results": result.get("search_results", ""),
                "success": True,
                "execution_time": execution_time,
                "workflow_used": workflow_type,
                "timestamp": datetime.utcnow().isoformat()
            }
            
            # 更新智能决策模型（如果有用户ID）
            if user_id:
                execution_result = {
                    "success": True,
                    "result_type": "research",
                    "execution_time": execution_time,
                    "quality_score": self._evaluate_result_quality(final_result),
                    "topics": self._extract_topics(question, final_result)
                }
                
                self.intelligent_decision_service.update_decision_model(
                    user_id, question, workflow_type, execution_result
                )
            
            # 缓存结果
            self.cache_manager.set(cache_key, final_result)
            
            logger.info(f"研究任务执行完成: {question[:50]}..., 耗时: {execution_time:.2f}秒")
            return final_result
            
        except Exception as e:
            execution_time = (datetime.utcnow() - start_time).total_seconds()
            logger.error(f"研究任务执行失败: {e}", exc_info=True)
            
            error_result = {
                "question": question,
                "answer": f"研究任务执行失败: {str(e)}",
                "analysis": {},
                "search_results": "",
                "success": False,
                "execution_time": execution_time,
                "workflow_used": workflow_type or "multi-agent",
                "timestamp": datetime.utcnow().isoformat()
            }
            
            # 更新智能决策模型（即使失败也记录）
            if user_id:
                execution_result = {
                    "success": False,
                    "result_type": "research",
                    "execution_time": execution_time,
                    "error": str(e)
                }
                
                self.intelligent_decision_service.update_decision_model(
                    user_id, question, workflow_type or "multi-agent", execution_result
                )
            
            return error_result
    
    def _calculate_complexity_factor(self, question: str) -> float:
        """
        计算问题复杂度因子
        
        Args:
            question: 研究问题
            
        Returns:
            复杂度因子 (0.5-2.0)
        """
        # 基础复杂度
        complexity = 1.0
        
        # 根据问题长度调整复杂度
        if len(question) > 50:
            complexity *= 1.5
        elif len(question) > 50:
            complexity *= 1.2
        elif len(question) < 10:
            complexity *= 0.8
            
        # 根据关键词调整复杂度
        complex_keywords = ["全面", "深入", "详细", "综合", "系统", "全方位", "多维度"]
        medium_keywords = ["研究", "分析", "探讨", "调查", "了解"]
        
        if any(keyword in question for keyword in complex_keywords):
            complexity *= 1.5
        elif any(keyword in question for keyword in medium_keywords):
            complexity *= 1.2
            
        # 限制复杂度因子范围
        return max(0.5, min(2.0, complexity))
    
    def _calculate_length_factor(self, question: str) -> float:
        """
        计算问题长度因子
        
        Args:
            question: 研究问题
            
        Returns:
            长度因子 (0.5-2.0)
        """
        length = len(question)
        
        if length > 100:
            return 2.0
        elif length > 50:
            return 1.5
        elif length > 20:
            return 1.2
        else:
            return 0.8
    
    def _calculate_dynamic_timeout(self, question: str) -> int:
        """
        计算动态超时时间，不限制答案长度
        
        Args:
            question: 研究问题
            
        Returns:
            超时时间（秒）
        """
        # 基础超时时间（秒）
        base_timeout = 300  # 5分钟
        
        # 计算复杂度因子和长度因子
        complexity_factor = self._calculate_complexity_factor(question)
        length_factor = self._calculate_length_factor(question)
        
        # 计算最终超时时间
        timeout = int(base_timeout * complexity_factor * length_factor)
        
        # 限制超时时间范围 (1分钟到30分钟)
        return max(60, min(1800, timeout))
    
    def _generate_cache_key(self, question: str, workflow_type: Optional[str]) -> str:
        """生成缓存键"""
        import hashlib
        # 确保问题字符串是有效的UTF-8编码
        try:
            if isinstance(question, bytes):
                question = question.decode('utf-8')
            else:
                question = str(question)
        except Exception:
            question = str(question).encode('utf-8', errors='replace').decode('utf-8')
            
        key_string = f"{question}:{workflow_type or 'default'}"
        return hashlib.md5(key_string.encode('utf-8')).hexdigest()
    
    def _evaluate_result_quality(self, result: Dict[str, Any]) -> float:
        """
        评估结果质量
        
        Args:
            result: 研究结果
            
        Returns:
            float: 质量评分（0-1）
        """
        quality_score = 0.0
        
        # 基于答案长度的评分
        answer = result.get("answer", "")
        if len(answer) > 100:
            quality_score += 0.3
        if len(answer) > 500:
            quality_score += 0.3
        if len(answer) > 1000:
            quality_score += 0.4
        
        # 基于分析内容的评分
        analysis = result.get("analysis", {})
        if analysis and isinstance(analysis, dict) and analysis.get("content"):
            quality_score += 0.5
        
        # 基于搜索结果的评分
        search_results = result.get("search_results", "")
        if search_results and len(search_results) > 50:
            quality_score += 0.2
        
        # 限制在0-1范围内
        return min(quality_score, 1.0)
    
    def _extract_topics(self, question: str, result: Dict[str, Any]) -> List[str]:
        """
        从问题和结果中提取主题
        
        Args:
            question: 研究问题
            result: 研究结果
            
        Returns:
            List[str]: 主题列表
        """
        topics = []
        
        # 从问题中提取关键词作为主题
        import re
        words = re.findall(r'\w+', question.lower())
        # 过滤常见停用词
        stop_words = {'的', '是', '在', '与', '和', '或', '但', '而', '了', '着', '过', 'the', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for', 'of', 'with', 'by'}
        topics = [word for word in words if word not in stop_words and len(word) > 1]
        
        return list(set(topics))  # 去重

# 全局实例
research_service = ResearchService()


def get_research_service() -> ResearchService:
    """获取研究服务实例"""
    return research_service