from langgraph.graph import END, StateGraph
from typing import TypedDict, Annotated
from datetime import datetime
import operator
import sys
import os
from langchain.schema import HumanMessage
# 获取 user 目录的绝对路径
user_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'customize')
# 将 user 目录添加到 sys.path 中
sys.path.append(user_dir)


from customize.get_ollama import GetOllama
r1 = GetOllama(model_type=0, model_name="deepseek-r1:14b")()
qwen = GetOllama(model_type=1, model_name="qwen2.5:14b")()



# 内存数据结构
class AgentState(TypedDict):
    input_text: str
    dialog_history: Annotated[list, lambda a, b: a + b]
    belief_state: dict
    last_updated: datetime

# Talker节点实现
def talker_node(state: AgentState):
    # 上下文处理模块
    context = "\n".join(state['dialog_history'][-3:])  # 取最近3轮对话作为上下文

    # 调用Deepseek生成响应
    response = qwen.invoke(
        prompt=f"辩论上下文：{context}\n当前信念状态：{state['belief_state']}\n生成回应："
    )

    # 更新对话历史
    new_history = state['dialog_history'] + [f"Agent: {response}"]

    return {
        "dialog_history": new_history,
        "last_updated": datetime.now()
    }

# Reasoner节点实现
def reasoner_node(state: AgentState):
    # 复杂推理模块
    analysis = r1.invoke(
        prompt=f"分析论点：{state['input_text']}\n当前信念：{state['belief_state']}"
    )

    # 知识检索模块（示例使用伪代码）
    if needs_external_data(analysis):
        search_results = search_engine.query(analysis['keywords'])
        analysis.update({"external_data": search_results})

    # 更新信念状态
    updated_belief = update_belief(state['belief_state'], analysis)

    return {
        "belief_state": updated_belief,
        "analysis_result": analysis
    }

# 构建交互图
workflow = StateGraph(AgentState)

# 添加节点
workflow.add_node("talker", talker_node)
workflow.add_node("reasoner", reasoner_node)
workflow.add_node("update_memory", lambda state: state)  # 内存更新节点

# 在路由条件函数前添加新的分析函数
def requires_deep_analysis(text: str) -> bool:
    """综合判断是否需要深度分析的决策函数"""
    # 基础特征分析
    text_length = len(text.split())
    has_question = any(w in text for w in ["为什么", "如何", "怎样", "是否", "难道"])

    # 逻辑复杂度分析（示例规则）
    logic_indicators = {
        "因果": ["因此", "所以", "导致"],
        "转折": ["但是", "然而", "尽管"],
        "条件": ["如果", "假设", "要是"]
    }
    logic_complexity = sum(1 for k,v in logic_indicators.items() if any(w in text for w in v)) >= 2

    # 语义深度分析（伪代码示例）
    # sentiment_intensity = abs(sentiment_model.predict(text).score) > 0.7

    # 组合判断条件（可根据实际需求调整权重）
    return (
        (text_length > 15 and has_question) or  # 长疑问句
        logic_complexity or  # 复合逻辑结构
        ("最新研究" in text) or  # 需要事实核查
        ("据统计" in text)  # 需要数据验证
    )

# ... existing code ...
def route_condition(state):
    if requires_deep_analysis(state['input_text']):  # 此处调用分析函数
        return "reasoner"
    return "talker"
# ... existing code ...


# 构建交互流程
workflow.add_conditional_edges(
    "update_memory",
    route_condition,
    {
        "reasoner": "reasoner",
        "talker": "talker"
    }
)

workflow.add_edge("talker", "update_memory")
workflow.add_edge("reasoner", "update_memory")
workflow.set_entry_point("update_memory")

# 初始化agent
debate_agent = workflow.compile()
