from biz.config.settings import openLLM, cache, get_sql_context, get_command_context, get_sql_function_context
from biz.core.ai.prompts import PROMPT_INTENT_ANALYSIS
from biz.utils.logger import logger
from biz.utils.filter_manager import build_filter_condition_prompt
from biz.core.ai.tools import TOOLS_INTENT_ANALYSIS
from llama_index.core.agent import ReActAgent, ReActChatFormatter
from llama_index.core.agent.react.prompts import REACT_CHAT_SYSTEM_HEADER
from llama_index.core.memory import ChatMemoryBuffer
from biz.services.command_factory import CommandFactory
from biz.core.workflow.workflow import MultiRoundDiagnosisWorkflow
from biz.core.workflow.sql_client import SQLService
import asyncio
import re

# 初始化全局变量
vn = get_sql_context()
vn_command = get_command_context()
sql_function_context = get_sql_function_context()
command_factory = CommandFactory()


def generate_sql_core(question, id=None, **kwargs):
    """
    生成SQL的核心函数
    
    Args:
        question: 用户问题
        id: 会话ID
        **kwargs: 其他参数，包括filter_conditions等
    
    Returns:
        tuple: (id, sql) 会话ID和生成的SQL
    """
    if id is None:
        id = cache.generate_id(question=question)

    question_sql_history = cache.get(id=id, field='question_sql_history') or []

    if len(question_sql_history) > 10:
        question_sql_history = question_sql_history[-5:]

    # 从模板生成SQL
    sql_template = sql_function_context.generate_sql_from_question(question=question)
    sql = sql_template.get('sql')
    
    # 如果模板没有生成SQL，则使用AI生成
    if sql is None or sql == '':
        # 传递所有参数给generate_sql方法，包括过滤条件
        sql = vn.generate_sql(
            question=question, 
            allow_llm_to_see_data=True, 
            question_sql_history=question_sql_history,
            **kwargs  # 包含filter_conditions等参数
        )
    
    # 缓存结果
    cache.set(id=id, field='question', value=question)
    cache.set(id=id, field='sql', value=sql)
    #question_sql_history.append({"question": question, "sql": sql})
    cache.set(id=id, field='question_sql_history', value=question_sql_history)
    
    return id, sql


def generate_sql_stream(question, id=None, **kwargs):
    """
    流式生成SQL的核心函数
    
    Args:
        question: 用户问题
        id: 会话ID
        **kwargs: 其他参数，包括filter_conditions等
    
    Yields:
        dict: 流式返回的数据块
            - {"type": "id", "id": str} - 会话ID
            - {"type": "template", "sql": str} - 模板SQL（如果有）
            - {"type": "chunk", "text": str} - SQL片段
            - {"type": "complete", "id": str, "sql": str} - 完整SQL
    """
    if id is None:
        id = cache.generate_id(question=question)
    
    # 先返回ID
    yield {"type": "id", "id": id}
    
    question_sql_history = cache.get(id=id, field='question_sql_history') or []
    if len(question_sql_history) > 10:
        question_sql_history = question_sql_history[-5:]

    # 从模板生成SQL
    sql_template = sql_function_context.generate_sql_from_question(question=question)
    sql = sql_template.get('sql')
    
    # 如果模板有SQL，直接返回
    if sql and sql.strip():
        yield {"type": "template", "sql": sql}
        cache.set(id=id, field='question', value=question)
        cache.set(id=id, field='sql', value=sql)
        cache.set(id=id, field='question_sql_history', value=question_sql_history)
        yield {"type": "complete", "id": id, "sql": sql}
        return
    
    # 使用AI流式生成SQL
    try:
        sql_chunks = []
        for chunk in _generate_sql_stream_from_llm(question, question_sql_history, **kwargs):
            sql_chunks.append(chunk)
            yield {"type": "chunk", "text": chunk}
        
        # 组装完整SQL
        full_sql = "".join(sql_chunks)
        
        # 缓存结果
        cache.set(id=id, field='question', value=question)
        cache.set(id=id, field='sql', value=full_sql)
        cache.set(id=id, field='question_sql_history', value=question_sql_history)
        
        yield {"type": "complete", "id": id, "sql": full_sql}
        
    except Exception as e:
        logger.error(f"流式生成SQL失败: {e}")
        yield {"type": "error", "error": str(e)}


def _generate_sql_stream_from_llm(question, question_sql_history, **kwargs):
    """
    调用LLM流式生成SQL
    
    Args:
        question: 用户问题
        question_sql_history: 历史问答
        **kwargs: 其他参数
    
    Yields:
        str: SQL片段
    """
    # 构建prompt（复用vn的逻辑）
    question_sql_list = vn.get_similar_question_sql(question, **kwargs)
    ddl_list = vn.get_related_ddl(question, **kwargs)
    doc_list = vn.get_related_documentation(question, **kwargs)
    
    # 如果配置了SQL分析，先执行分析
    if vn.config is not None:
        initial_prompt = vn.config.get("sql_analysis_prompt", None)
        if initial_prompt is not None:
            # 执行SQL分析（非流式）
            prompt = vn.get_sql_analysis_prompt(
                initial_prompt=initial_prompt,
                question=question,
                question_sql_list=question_sql_list,
                ddl_list=ddl_list,
                doc_list=doc_list,
                **kwargs,
            )
            llm_response = vn.submit_prompt(prompt, **kwargs)
            table_list = vn.extract_table_names(llm_response)
            
            # 补充DDL
            supplement_ddl_list = []
            for table_str in table_list:
                temp_ddl_list = vn.get_related_ddl(table_str, **kwargs)
                supplement_ddl_list.extend(temp_ddl_list)
            kwargs["supplement_ddl_list"] = supplement_ddl_list
            kwargs["sql_analysis_result"] = llm_response
    
    # 构建过滤条件
    filter_conditions_json = vn._build_filter_conditions(**kwargs)
    question_addition = build_filter_condition_prompt(filter_conditions_json)
    enhanced_question = question_addition + "\n 问题：" + question
    
    # 获取SQL生成的prompt
    initial_prompt = vn.config.get("initial_prompt", None) if vn.config else None
    prompt = vn.get_sql_prompt(
        initial_prompt=initial_prompt,
        question=enhanced_question,
        question_sql_list=question_sql_list,
        ddl_list=ddl_list,
        doc_list=doc_list,
        question_sql_history=question_sql_history,
        **kwargs
    )
    
    # 流式调用OpenAI API
    try:
        response = vn.client.chat.completions.create(
            model=vn.config.get("model", "gpt-3.5-turbo") if vn.config else "gpt-3.5-turbo",
            messages=prompt,
            stream=True,
            temperature=kwargs.get("temperature", 0.7),
            max_tokens=kwargs.get("max_tokens", 2000),
        )
        
        # 流式返回内容
        for chunk in response:
            if chunk.choices[0].delta.content:
                yield chunk.choices[0].delta.content
                
    except Exception as e:
        logger.error(f"LLM流式调用失败: {e}")
        raise

def generate_command_core(question, uuid=None, type=None):
    """
    生成命令的核心逻辑（重构版本）
    
    Args:
        question: 用户问题
        uuid: 会话ID，如果为None则生成新ID
        type: 命令类型
        
    Returns:
        tuple: (uuid, type, message) 会话ID、类型和生成的消息
    """
    try:
        return command_factory.process_command(question, uuid, type)
    except Exception as e:
        logger.error(f"命令生成失败: {e}")
        # 如果uuid为空，生成一个新的
        if uuid is None or uuid == '':
            uuid = command_factory.generate_uuid()
        return uuid, "command", "系统错误，请稍后重试"

def generate_answer_core(question, id=None):

    if cache.get(id=id, field='memory') is not None:
        memory = cache.get(id=id, field='memory')
    else:
        memory = ChatMemoryBuffer.from_defaults(token_limit=500)
        cache.set(id=id, field='memory', value=memory)
    formatted_prompt = PROMPT_INTENT_ANALYSIS.format(session_id=id)
    #all_history = memory.get_all()
    #print(all_history)
    max_attempts = 5
    attempt_count = 0
    type = 'other'
    while attempt_count < max_attempts:
        attempt_count += 1
        agent = ReActAgent.from_tools(
            llm=openLLM,
            tools=TOOLS_INTENT_ANALYSIS,
            react_chat_formatter=ReActChatFormatter(
                system_header=formatted_prompt + "\n" + REACT_CHAT_SYSTEM_HEADER + "\n 你回复的内容中必须包含**Action**和**Action Input**！",
            ),
            max_iterations=5,
            verbose=False,  # 关闭详细输出，避免中间步骤泄露给用户
            memory=memory
        )
        response = agent.chat(question)
        try:
            if(response.sources[-1].tool_name is None or  response.sources[-1].tool_name == ''):
                question = '回复信息格式验证失败，回复内容中必须包含**Action**和**Action Input**！'
            else:
                tool_name = response.sources[-1].tool_name
                if tool_name == 'data_analysis':
                    type = 'sql'
                elif tool_name == 'light_control':
                    type = 'command'
                elif tool_name == 'knowledge_base':
                    type = 'knowledge'
                elif tool_name == 'menu':
                    type = 'router'
                elif tool_name == 'user_intent_analysis':
                    type = 'other'
                elif tool_name == 'end_conversation' or tool_name == 'diagnosis_analysis':
                    type = 'complete'
                return type, response.response
        except:
            question = """回复信息格式验证失败，回复内容中必须包含**Action**和**Action Input**！"""
    return type, "抱歉，系统似乎遇到了一点小问题，建议您稍后重新尝试。"


def generate_answer_core_multi_agent(question, id=None):
    """
    多智能体版本的意图识别与分发
    
    采用协调者模式（Coordinator Pattern）：
    - 协调者 Agent 负责识别用户意图
    - 专家 Agent 负责具体任务处理
    - 每个专家有独立的工具集和处理逻辑
    
    Args:
        question: 用户问题
        id: 会话ID，如果为None则生成新ID
    
    Returns:
        tuple: (type, response)
            - type: 任务类型 ('sql' | 'command' | 'knowledge' | 'router' | 'diagnosis' | 'other' | 'complete')
            - response: 处理结果
    
    优势：
    1. 每个专家有独立的上下文和记忆
    2. 专家可以使用专门的工具集
    3. 协调者负责智能路由
    4. 更清晰的职责划分
    5. 更好的可维护性和可扩展性
    """
    from biz.core.agents.multi_agent_system import get_multi_agent_system
    
    try:
        # 获取多智能体系统实例
        mas = get_multi_agent_system()
        
        # 处理问题（自动识别 + 分发）
        task_type, response = mas.process(question, id)
        
        logger.info(f"多智能体处理完成 - 类型: {task_type}, 会话: {id}")
        
        return task_type, response
        
    except Exception as e:
        logger.error(f"多智能体处理异常: {e}", exc_info=True)
        return 'other', "抱歉，系统遇到问题，请稍后重试"

def generate_diagnosis_core(question, id=None, max_rounds=3, timeout=300.0):
    """
    生成诊断的核心逻辑
    
    Args:
        question: 用户问题
        id: 会话ID，如果为None则生成新ID
        max_rounds: 最大诊断轮次，默认3轮
        timeout: 超时时间（秒），默认300秒
        
    Returns:
        tuple: (id, diagnosis_dict) 会话ID和完整的诊断结果字典
    """
    # 参数验证
    if not question or not question.strip():
        error_msg = "问题描述不能为空"
        logger.error(error_msg)
        if id is None:
            id = cache.generate_id(question="empty_question")
        return id, {
            "id": id,
            "question": question,
            "diagnosis_result": error_msg,
            "status": "error",
            "error": error_msg
        }
    
    try:
        # 生成或使用现有的会话ID
        if id is None:
            id = cache.generate_id(question=question)
        
        logger.info(f"开始诊断分析，会话ID: {id}, 问题: {question[:100]}...")
        
        # 创建SQL服务实例
        sql_client = SQLService()
        
        # 创建多轮诊断工作流实例
        diagnosis_workflow = MultiRoundDiagnosisWorkflow(
            llm=openLLM,
            sql_client=sql_client,
            max_rounds=max_rounds,
            timeout=timeout
        )
        
        # 定义异步执行函数
        async def run_diagnosis():
            return await diagnosis_workflow.run(user_input=question)
        
        # 简化的事件循环处理 - 在Flask线程中创建新的事件循环
        try:
            loop = asyncio.get_event_loop()
            if loop.is_running():
                # 在已运行的事件循环中使用线程池执行
                import concurrent.futures
                with concurrent.futures.ThreadPoolExecutor() as executor:
                    future = executor.submit(asyncio.run, run_diagnosis())
                    diagnosis_result = future.result(timeout=timeout + 10)
            else:
                diagnosis_result = loop.run_until_complete(run_diagnosis())
        except RuntimeError:
            # Flask线程中没有事件循环，创建新的
            diagnosis_result = asyncio.run(run_diagnosis())
        
        # 处理诊断结果（DiagnosisResult对象）
        if hasattr(diagnosis_result, 'status') and diagnosis_result.status == "completed":
            # 处理成功的诊断结果
            diagnosis = {
                "id": id,
                "question": question,
                "diagnosis_result": diagnosis_result.llm_conclusion,
                "diagnosis_path": diagnosis_result.diagnosis_path,
                "processing_time": diagnosis_result.execution_time,
                "recommendations": diagnosis_result.recommendations,
                "status": "success",
                "analysis_rounds": diagnosis_result.analysis_rounds,
                "total_queries": diagnosis_result.total_queries,
                "findings": diagnosis_result.findings,
                "data_quality_summary": diagnosis_result.data_quality_summary
            }
            
            logger.info(
                f"诊断完成 - 会话ID: {id}, "
                f"轮次: {diagnosis_result.analysis_rounds}/{max_rounds}, "
                f"查询数: {diagnosis_result.total_queries}, "
                f"耗时: {diagnosis_result.execution_time:.2f}秒"
            )

            logger.info("诊断报告：%s",diagnosis.get("diagnosis_result"))
            return id, diagnosis.get("diagnosis_result", "诊断完成")
        else:
            # 处理失败情况
            error_message = getattr(diagnosis_result, 'llm_conclusion', "诊断过程中发生未知错误")
            diagnosis_path = getattr(diagnosis_result, 'diagnosis_path', ["❌ 诊断执行失败"])
            diagnosis = {
                "id": id,
                "question": question,
                "diagnosis_result": f"诊断失败: {error_message}",
                "diagnosis_path": diagnosis_path,
                "status": "error",
                "error": error_message
            }
            logger.error(f"诊断失败 - 会话ID: {id}, 错误: {error_message}")
            return id, diagnosis.get("diagnosis_result", "诊断完成")
    except asyncio.TimeoutError:
        error_msg = f"诊断超时（>{timeout}秒）"
        logger.error(f"{error_msg} - 会话ID: {id}")
        return id, error_msg
    except Exception as e:
        error_msg = f"系统错误: {str(e)}"
        logger.error(f"generate_diagnosis_core 执行失败 - {error_msg}", exc_info=True)
        return id,error_msg
