from re import search
from typing import TypedDict
from langchain_core import tools
from langgraph.graph import StateGraph, START, END
from question_analyzer import QuestionAnalyzer
from openai import OpenAI


# 状态结构
class State(TypedDict):
    question: str
    need_more_data: str
    answer: str
    mcp_data: str
    review_result: str
    retry_count: int

MAX_RETRY = 3;

# 节点函数
def analyze_question(state: State):
    analyzer = QuestionAnalyzer()
    question = state.get("question", "")
    need_more = analyzer.need_more_data(question)
    print(f"大模型判断是否需要更多数据: {need_more}")
    return {"need_more_data": need_more}

# 新增：答案审核节点
def answer_review(state: State):
    analyzer = QuestionAnalyzer()
    question = state.get("question", "")
    answer = state.get("answer", "")
    retry_count = state.get("retry_count", 0)
    # prompt 让大模型判断答案是否相关且正确
    review_prompt = (
        "请判断下面的答案是否和问题相关且正确。只回答 'yes' 或 'no'，不要输出其它内容。\n"
        f"问题：{question}\n答案：{answer}"
    )
    print("审核答案和问题相关性与正确性...")
    review_result = analyzer.client.chat.completions.create(
        model=analyzer.model,
        messages=[{"role": "user", "content": review_prompt}],
        max_tokens=3,
        temperature=0
    ).choices[0].message.content.strip().lower()
    print(f"审核结果: {review_result}")
    # 如果不相关或不正确，调用大模型改写问题
    if review_result != "yes" and retry_count < MAX_RETRY:
        rewrite_prompt = (
            "请根据下述原始问题和答案，改写一个更容易获得正确答案的新问题。\n"
            f"原始问题：{question}\n原始答案：{answer}"
        )
        print("答案不正确，调用大模型改写问题...")
        new_question = analyzer.client.chat.completions.create(
            model=analyzer.model,
            messages=[{"role": "user", "content": rewrite_prompt}],
            max_tokens=128,
            temperature=0.7
        ).choices[0].message.content.strip()
        print(f"改写后新问题: {new_question}")
        return {"review_result": review_result, "question": new_question, "retry_count": retry_count + 1}
    else:
        return {"review_result": review_result, "retry_count": retry_count}



def generate_answer(state: State):
    analyzer = QuestionAnalyzer()
    question = state.get("question", "")
    need_more = state.get("need_more_data", "")
    mcp_data = ""
    answer = ""
    # client = OpenAI(api_key="sk-local", base_url="http://192.168.210.9:81/v1")
    # client = OpenAI(api_key="sk-local", base_url="http://192.168.210.9:6559/v1")
    client = OpenAI(api_key="sk-local", base_url="http://192.168.210.9:7303/v1")

    if need_more == "no":
        print("直接让大模型生成答案...")
        answer = analyzer.generate_answer(question)
    else:
        print("需要额外数据，调用 MCP 工具...")
        # 让 LLM 解析参数
        response = client.chat.completions.create(
            model="TT-14B-R1-0223",
            messages=[{"role": "user", "content": question}],
        )
        msg = response.choices[0].message

        if msg.function_call:
            print("function_call.name:" + msg.function_call.name)
        else:
            print("没有function_call")

    return {"answer": answer, "mcp_data": mcp_data}

# 构建 StateGraph
builder = StateGraph(State)
# 必须在 add_node 之前定义
builder.add_node("analyze_question", analyze_question)
builder.add_node("generate_answer", generate_answer)
builder.add_node("answer_review", answer_review)
builder.add_edge(START, "analyze_question")
builder.add_edge("analyze_question", "generate_answer")
builder.add_edge("generate_answer", "answer_review")

# 条件流转
def review_condition(state: State):
    if state.get("review_result", "") == "yes" or state.get("retry_count", 0) >= MAX_RETRY:
        return "end"
    else:
        return "retry"

builder.add_conditional_edges(
    "answer_review",
    review_condition,
    {"end": END, "retry": "generate_answer"}
)
graph = builder.compile()

def main():
    # 运行图，传入一个问题
    result = graph.invoke({"question": "有哪些可用的工具？", "need_more_data": "", "answer": "", "mcp_data": "", "review_result": "", "retry_count": 0})
    print("最终状态：", result)

if __name__ == "__main__":
    main() 