"""
LangGraph节点定义
包含各种工作流节点
"""
from typing import Dict, Any, List
import asyncio
from app.core.logging import get_logger
from app.graphs.core import GraphState
from app.services.ai_service import AIService

logger = get_logger(__name__)


class GraphNodes:
    """图节点管理器"""
    
    def __init__(self):
        self.ai_service = AIService()
    
    async def validate_input(self, state: GraphState) -> GraphState:
        """验证输入节点"""
        logger.info(f"验证输入 - 状态: {state['current_step']}")
        
        # 检查消息是否为空
        if not state.get('messages') or len(state['messages']) == 0:
            state['error_occurred'] = True
            state['error_message'] = "消息列表为空"
            state['next_step'] = "error_handling"
            return state
        
        # 检查最后一条消息是否来自用户
        last_message = state['messages'][-1]
        if last_message.get('role') != 'user':
            state['error_occurred'] = True
            state['error_message'] = "最后一条消息不是用户消息"
            state['next_step'] = "error_handling"
            return state
        
        # 检查消息内容长度
        content = last_message.get('content', '')
        if len(content.strip()) == 0:
            state['error_occurred'] = True
            state['error_message'] = "消息内容为空"
            state['next_step'] = "error_handling"
            return state
        
        if len(content) > 10000:
            state['error_occurred'] = True
            state['error_message'] = "消息内容过长"
            state['next_step'] = "error_handling"
            return state
        
        state['current_step'] = "validate_input"
        state['next_step'] = "tool_selection" if state.get('workflow_type') == 'tool_enhanced' else 'generate_response'
        
        logger.info("输入验证通过")
        return state
    
    async def tool_selection(self, state: GraphState) -> GraphState:
        """工具选择节点"""
        logger.info("执行工具选择")
        
        last_message = state['messages'][-1]
        content = last_message.get('content', '').lower()
        
        # 简单的关键词匹配工具选择
        tools_to_use = []
        
        if any(word in content for word in ['搜索', '查找', '查询', 'search', 'find']):
            tools_to_use.append('search')
        
        if any(word in content for word in ['计算', '算数', 'calculate', 'math']):
            tools_to_use.append('calculator')
        
        if any(word in content for word in ['代码', '编程', 'code', 'program']):
            tools_to_use.append('code_executor')
        
        state['tools_used'] = tools_to_use
        state['current_step'] = "tool_selection"
        
        if tools_to_use:
            state['next_step'] = "tool_execution"
        else:
            state['next_step'] = "generate_response"
        
        logger.info(f"选择的工具: {tools_to_use}")
        return state
    
    async def tool_execution(self, state: GraphState) -> GraphState:
        """工具执行节点"""
        logger.info("执行工具调用")
        
        function_calls = []
        
        for tool in state.get('tools_used', []):
            try:
                if tool == 'search':
                    # 模拟搜索工具
                    result = await self._mock_search(state)
                    function_calls.append({
                        'tool': 'search',
                        'result': result,
                        'success': True
                    })
                
                elif tool == 'calculator':
                    # 模拟计算工具
                    result = await self._mock_calculator(state)
                    function_calls.append({
                        'tool': 'calculator',
                        'result': result,
                        'success': True
                    })
                
                elif tool == 'code_executor':
                    # 模拟代码执行工具
                    result = await self._mock_code_executor(state)
                    function_calls.append({
                        'tool': 'code_executor',
                        'result': result,
                        'success': True
                    })
            
            except Exception as e:
                logger.error(f"工具执行失败 {tool}: {e}")
                function_calls.append({
                    'tool': tool,
                    'result': f"工具执行失败: {str(e)}",
                    'success': False
                })
        
        state['function_calls'] = function_calls
        state['current_step'] = "tool_execution"
        state['next_step'] = "generate_response"
        
        logger.info(f"工具执行完成: {len(function_calls)} 个调用")
        return state
    
    async def generate_response(self, state: GraphState) -> GraphState:
        """生成响应节点"""
        logger.info("生成LLM响应")
        
        try:
            # 构建消息上下文
            messages = state['messages'].copy()
            
            # 如果有工具调用结果，添加到系统消息中
            if state.get('function_calls'):
                tool_results = "\n".join([
                    f"工具 {call['tool']}: {call['result']}" 
                    for call in state['function_calls'] if call['success']
                ])
                
                if tool_results:
                    # 添加系统消息提供工具结果
                    system_message = {
                        'role': 'system',
                        'content': f"工具调用结果:\n{tool_results}\n请基于这些信息回答用户问题。"
                    }
                    messages.insert(-1, system_message)
            
            # 调用LLM服务
            client = self.llm_service.get_client(state['provider'])
            
            if state.get('is_streaming'):
                # 流式响应
                response_stream = await client.generate_response(
                    messages=messages,
                    stream=True,
                    max_tokens=state['max_tokens'],
                    temperature=state['temperature']
                )
                
                # 收集流式响应
                full_response = ""
                async for chunk in response_stream:
                    full_response += chunk
                    state['stream_buffer'].append(chunk)
                
                response_content = full_response
            
            else:
                # 普通响应
                response_content = await client.generate_response(
                    messages=messages,
                    stream=False,
                    max_tokens=state['max_tokens'],
                    temperature=state['temperature']
                )
            
            # 添加助手响应到消息历史
            assistant_message = {
                'role': 'assistant',
                'content': response_content
            }
            state['messages'].append(assistant_message)
            
            # 估算token使用量
            estimated_tokens = await client.count_tokens(response_content)
            state['total_tokens'] += estimated_tokens
            
            state['current_step'] = "generate_response"
            state['next_step'] = "complete"
            state['error_occurred'] = False
            
            logger.info(f"响应生成成功，预计token: {estimated_tokens}")
            
        except Exception as e:
            logger.error(f"LLM响应生成失败: {e}")
            state['error_occurred'] = True
            state['error_message'] = str(e)
            state['next_step'] = "error_handling"
        
        return state
    
    async def error_handling(self, state: GraphState) -> GraphState:
        """错误处理节点"""
        logger.info("执行错误处理")
        
        if state['retry_count'] < 3:
            # 重试逻辑
            state['retry_count'] += 1
            state['error_occurred'] = False
            state['error_message'] = None
            
            # 根据错误类型选择重试步骤
            if "token" in state.get('error_message', '').lower():
                state['next_step'] = "token_optimization"
            else:
                state['next_step'] = "generate_response"
            
            logger.info(f"错误处理: 第 {state['retry_count']} 次重试")
        
        else:
            # 达到最大重试次数，提供错误响应
            error_response = {
                'role': 'assistant',
                'content': f"抱歉，系统暂时无法处理您的请求。错误信息: {state.get('error_message', '未知错误')}"
            }
            state['messages'].append(error_response)
            state['next_step'] = "complete"
            
            logger.warning("达到最大重试次数，结束工作流")
        
        state['current_step'] = "error_handling"
        return state
    
    async def token_optimization(self, state: GraphState) -> GraphState:
        """Token优化节点"""
        logger.info("执行Token优化")
        
        # 减少max_tokens
        state['max_tokens'] = max(500, state['max_tokens'] // 2)
        
        # 截断消息历史
        if len(state['messages']) > 10:
            # 保留系统消息和最近的几条消息
            system_messages = [msg for msg in state['messages'] if msg.get('role') == 'system']
            user_assistant_messages = state['messages'][-6:]  # 保留最近6条
            state['messages'] = system_messages + user_assistant_messages
        
        state['current_step'] = "token_optimization"
        state['next_step'] = "generate_response"
        state['error_occurred'] = False
        
        logger.info(f"Token优化完成，新的max_tokens: {state['max_tokens']}")
        return state
    
    async def complete(self, state: GraphState) -> GraphState:
        """完成节点"""
        logger.info("工作流完成")
        
        state['current_step'] = "complete"
        state['end_time'] = datetime.now()
        state['next_step'] = None
        
        # 计算总耗时
        if state['start_time'] and state['end_time']:
            duration = (state['end_time'] - state['start_time']).total_seconds()
            logger.info(f"工作流完成，耗时: {duration:.2f}秒")
        
        return state
    
    # 模拟工具方法
    async def _mock_search(self, state: GraphState) -> str:
        """模拟搜索工具"""
        await asyncio.sleep(0.1)
        return "搜索到相关结果: 这是模拟的搜索结果"
    
    async def _mock_calculator(self, state: GraphState) -> str:
        """模拟计算工具"""
        await asyncio.sleep(0.05)
        return "计算结果: 42"
    
    async def _mock_code_executor(self, state: GraphState) -> str:
        """模拟代码执行工具"""
        await asyncio.sleep(0.2)
        return "代码执行结果: Hello, World!"


# 创建节点映射
NODE_MAPPING = {
    "validate_input": GraphNodes().validate_input,
    "tool_selection": GraphNodes().tool_selection,
    "tool_execution": GraphNodes().tool_execution,
    "generate_response": GraphNodes().generate_response,
    "error_handling": GraphNodes().error_handling,
    "token_optimization": GraphNodes().token_optimization,
    "complete": GraphNodes().complete
}