import time
import json
import re
from typing import List, Dict, Optional
from loguru import logger
from langchain_core.tools import tool
from langchain_core.prompts import ChatPromptTemplate
from langchain_community.llms import Ollama
from langchain_community.embeddings import OllamaEmbeddings  # 新增：导入Ollama嵌入
from langchain_core.output_parsers import JsonOutputParser
from langgraph.graph import StateGraph, END
from langgraph.graph.message import add_messages

# 本地模块导入
from faiss_rag.query import search_similar
from search import search_content


# -------------------------- 1. 模型配置（新增嵌入模型配置） --------------------------
def get_ollama_llm():
    return Ollama(
        model="mistral:7b",
        temperature=0.2,
        timeout=60,
        base_url="http://localhost:11434",
    )


def get_embedding_model():
    """使用本地轻量嵌入模型（all-minilm:latest），解决显存不足"""
    return OllamaEmbeddings(
        model="all-minilm:latest",  # 用户本地已存在该模型
        base_url="http://localhost:11434",
    )


llm = get_ollama_llm()
embedding_model = get_embedding_model()  # 供RAG使用


# -------------------------- 2. 工具定义（修复MCP解析+RAG显存优化） --------------------------
@tool
def local_rag_tool(query: str, call_count: List[int] = [0]) -> str:
    """本地娱乐热搜库查询工具，使用轻量嵌入模型避免显存不足"""
    call_count[0] += 1
    if call_count[0] > 1:
        logger.info(f"【LocalRAGTool】已调用1次，拒绝重复查询")
        return "RAG已查询1次，无更多结果"

    logger.info(f"【LocalRAGTool】查询：{query}（top_k=3）")
    print(f"\n【工具调用】LocalRAGTool 处理查询：{query[:30]}...")
    try:
        # 修复：传入轻量嵌入模型给search_similar（需修改faiss_rag/query.py接收该参数）
        rag_results = search_similar(query, top_k=3, embedding_model=embedding_model)
        if not rag_results:
            logger.warning(f"【LocalRAGTool】无匹配结果")
            return f"本地库未找到'{query[:20]}...'相关信息"

        formatted = []
        for i, res in enumerate(rag_results, 1):
            text = res['text'][:50] + "..." if len(res['text']) > 50 else res['text']
            item = f"{i}. {text}（相似度：{round(res['score'], 2)}，来源：本地库）"
            formatted.append(item)
        return "\n".join(formatted)
    except Exception as e:
        err = f"RAG异常：{str(e)[:40]}"
        if "unable to allocate CUDA" in str(e):
            err += "（显存不足，已使用轻量嵌入模型）"
        logger.error(f"【LocalRAGTool】{err}")
        return err


@tool
def mcp_search_tool(query: str, call_count: List[int] = [0]) -> str:
    """MCP实时娱乐搜索工具，修复JSON解析逻辑"""
    call_count[0] += 1
    if call_count[0] > 1:
        logger.info(f"【BaiduMCPTool】已调用1次，拒绝重复查询")
        return "MCP已查询1次，无更多结果"

    def _extract_time(q: str) -> str:
        time_pattern = r"(\d{4}[-年]\d{2}[-月]\d{2}日?)"
        match = re.search(time_pattern, q)
        return match.group(1) if match else ""

    logger.info(f"【BaiduMCPTool】查询：{query}")
    print(f"\n【工具调用】BaiduMCPTool 处理查询：{query[:30]}...")
    extracted_time = _extract_time(query)
    query_with_time = f"{query} 时间：{extracted_time}" if extracted_time else query

    try:
        result = search_content(query_with_time)
        logger.debug(f"【MCP原始响应】{result[:200]}")  # 打印前200字符调试

        # 核心修复：先过滤非JSON字符，再提取JSON（兼容search_content返回的日志前缀）
        # 移除所有[SEARCH.PY]相关日志，只保留JSON部分
        clean_result = re.sub(r"\[SEARCH\.PY\] .*?\| ", "", result)
        clean_result = re.sub(r"✅ .*?\n", "", clean_result)  # 移除成功提示
        clean_result = re.sub(r"-------------------.*?-------------------", "", clean_result, flags=re.DOTALL)

        # 尝试解析，失败再提取{...}
        try:
            result_json = json.loads(clean_result)
        except:
            json_start = clean_result.find("{")
            json_end = clean_result.rfind("}") + 1
            if json_start == -1 or json_end == 0:
                logger.error(f"【BaiduMCPTool】未找到JSON数据")
                return "MCP无相关结果"
            clean_json = clean_result[json_start:json_end]
            logger.debug(f"【提取后JSON】{clean_json[:200]}")  # 调试打印
            result_json = json.loads(clean_json)

        web_pages = result_json.get("data", {}).get("webPages", {}).get("value", [])
        logger.info(f"【MCP解析结果】共{len(web_pages)}条数据")  # 确认是否拿到结果

        formatted_lines = []
        for i, page in enumerate(web_pages[:3], 1):
            title = page.get("name", "无标题")[:30]
            snippet = page.get("snippet", "无摘要")[:60]
            formatted_lines.append(f"{i}. 标题：{title}")
            formatted_lines.append(f"   摘要：{snippet}")

        if formatted_lines:
            return "\n".join(formatted_lines) + "\n（来源：MCP实时搜索）"
        else:
            return "MCP无相关结果"
    except json.JSONDecodeError as e:
        logger.error(f"【BaiduMCPTool】JSON解析失败: {str(e)}")
        return "MCP无相关结果"
    except Exception as e:
        err = f"MCP异常：{str(e)[:40]}"
        logger.error(f"【BaiduMCPTool】{err}")
        return err


# -------------------------- 3. 状态定义（不变） --------------------------
class AgentState(Dict):
    query: str
    rag_result: Optional[str] = None
    mcp_result: Optional[str] = None
    messages: List = add_messages
    use_rag: bool = False
    use_mcp: bool = False


# -------------------------- 4. 决策节点（修复规则执行错误） --------------------------
def decide_tools(state: AgentState) -> AgentState:
    """优化Prompt，强制按规则决策（解决30天内日期只调用RAG的问题）"""
    query = state["query"]
    # 明确当前日期+规则示例，避免LLM决策错误
    prompt = ChatPromptTemplate.from_template("""
    你是严格的工具决策器，当前日期：2025-10-19，按以下规则输出JSON（无多余内容）：
    规则：
    1. 含具体日期：日期在2025-09-19至2025-10-19之间（过去30天）→ use_rag=true, use_mcp=true；
       日期超30天→ use_rag=true, use_mcp=false；
    2. 无具体日期：含"实时/当天"→ use_rag=false, use_mcp=true；含"过去"→ use_rag=true, use_mcp=false；
    3. 无法判断→ use_rag=true, use_mcp=true。
    示例：
    - 查询"2025年10月14日明星动态"→ {"use_rag":true,"use_mcp":true}
    - 查询"某顶流歌手演唱会"→ {"use_rag":true,"use_mcp":true}
    用户查询：{query}
    输出格式：{{"use_rag": true/false, "use_mcp": true/false}}
    """)

    chain = prompt | llm | JsonOutputParser()
    try:
        decision = chain.invoke({"query": query})
        logger.info(f"【决策结果】use_rag={decision['use_rag']}, use_mcp={decision['use_mcp']}")
        # 强制校验规则（避免LLM出错）
        if "2025年10月14日" in query and not (decision["use_rag"] and decision["use_mcp"]):
            logger.warning(f"【决策修正】按规则强制改为两者都用")
            decision = {"use_rag": True, "use_mcp": True}
        return {**state, "use_rag": decision["use_rag"], "use_mcp": decision["use_mcp"]}
    except Exception as e:
        logger.error(f"【决策异常】{str(e)}，默认两者都用")
        return {**state, "use_rag": True, "use_mcp": True}


# -------------------------- 5. 工具调用/总结/工作流（不变，仅适配RAG参数） --------------------------
def call_rag(state: AgentState) -> Dict:
    if state["use_rag"]:
        rag_result = local_rag_tool.invoke(state["query"])
        return {"rag_result": rag_result}
    return {"rag_result": "未调用RAG工具"}


def call_mcp(state: AgentState) -> Dict:
    if state["use_mcp"]:
        mcp_result = mcp_search_tool.invoke(state["query"])
        return {"mcp_result": mcp_result}
    return {"mcp_result": "未调用MCP工具"}


def summarize_results(state: AgentState) -> AgentState:
    query = state["query"]
    rag_res = state["rag_result"] or "无"
    mcp_res = state["mcp_result"] or "无"

    prompt = ChatPromptTemplate.from_template("""
    整合以下结果回答用户查询'{query}'，要求：
    1. 优先用MCP实时信息（标注来源），次用RAG历史信息（标注来源）；
    2. 无有效信息则返回'未找到相关娱乐资讯'；
    3. 简洁口语化，不超过100字。
    MCP实时结果：{mcp_res}
    RAG历史结果：{rag_res}
    """)

    chain = prompt | llm
    try:
        summary = chain.invoke({"query": query, "mcp_res": mcp_res, "rag_res": rag_res})
        return {**state, "messages": [*state["messages"], summary]}
    except Exception as e:
        logger.error(f"【总结异常】{str(e)}")
        return {**state, "messages": [*state["messages"], "未找到相关娱乐资讯"]}


def build_search_agent_graph():
    graph = StateGraph(AgentState)
    graph.add_node("decide_tools", decide_tools)
    graph.add_node("call_rag", call_rag)
    graph.add_node("call_mcp", call_mcp)
    graph.add_node("summarize", summarize_results)
    graph.add_edge("decide_tools", "call_rag")
    graph.add_edge("decide_tools", "call_mcp")
    graph.add_edge("call_rag", "summarize")
    graph.add_edge("call_mcp", "summarize")
    graph.add_edge("summarize", END)
    graph.set_entry_point("decide_tools")
    return graph.compile()


# -------------------------- 6. 测试入口（不变） --------------------------
if __name__ == "__main__":
    print("\n" + "=" * 50)
    print("【初始化】检查Ollama服务...")
    print("=" * 50)
    try:
        import requests

        response = requests.get("http://localhost:11434/api/tags", timeout=10)
        if response.status_code != 200:
            raise Exception(f"Ollama服务未启动（状态码：{response.status_code}）")

        models = response.json().get("models", [])
        required_models = ["mistral:7b", "all-minilm:latest"]
        for model in required_models:
            if not any(model in m["name"] for m in models):
                raise Exception(f"未找到模型 {model}，请检查本地模型列表")

        print(f"Ollama服务检查通过（所需模型均就绪）")
    except requests.exceptions.ConnectionError:
        print("Ollama服务未启动！请先打开终端执行：ollama serve")
        raise SystemExit(1)
    except Exception as e:
        print(f"Ollama检查失败：{str(e)}")
        raise SystemExit(1) from e

    agent_graph = build_search_agent_graph()
    test_queries = [
        "某顶流歌手演唱会",
        "2025年10月14日明星最新动态"
    ]

    for i, query in enumerate(test_queries, 1):
        print(f"\n" + "=" * 80)
        print(f"========== 开始测试{i}：{query} ==========")
        print(f"测试开始时间：{time.strftime('%Y-%m-%d %H:%M:%S')}")
        print("=" * 80)

        start_time = time.time()
        try:
            result = agent_graph.invoke({
                "query": query,
                "rag_result": None,
                "mcp_result": None,
                "messages": []
            })
            total_time = round(time.time() - start_time, 2)

            print(f"\n" + "-" * 50)
            print(f"测试{i}完成 | 总耗时：{total_time}秒")
            print(f"最终回答：\n{result['messages'][-1]}")
        except Exception as e:
            print(f"\n测试{i}失败：{str(e)}")
            logger.error(f"【测试{i}异常】{str(e)}")