import json
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline

# ----------------------------
# Step 1: 加载本地小模型（如 Qwen 或 DialoGPT）
# ----------------------------

model_name = "microsoft/DialoGPT-medium"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)

# 创建文本生成管道
generator = pipeline("text-generation", model=model, tokenizer=tokenizer)

# ----------------------------
# Step 2: 定义工具函数（Tool Functions）
# ----------------------------

def get_weather(location="Beijing"):
    # 模拟天气 API 调用
    return f"北京当前温度为 25°C"

def calculate(expr):
    try:
        result = eval(expr)
        return f"{expr} 的结果是 {result}"
    except:
        return "表达式有误，请重新输入"

tools = {
    "get_weather": get_weather,
    "calculate": calculate
}

# ----------------------------
# Step 3: 工具调用解释器（Runtime）
# ----------------------------

def execute_tool_call(tool_call_json):
    try:
        tool_call = json.loads(tool_call_json)
        tool_name = tool_call.get("name")
        args = tool_call.get("arguments")

        if tool_name in tools:
            result = tools[tool_name](**args)
            return {"status": "success", "content": result}
        else:
            return {"status": "error", "content": f"未识别的工具: {tool_name}"}
    except Exception as e:
        return {"status": "error", "content": str(e)}

# ----------------------------
# Step 4: LLM 推理 + 工具调用检测
# ----------------------------

def agent_query(user_input, history=""):
    prompt = f"{history}\n用户: {user_input}\n助手:"
    
    response = generator(prompt, max_new_tokens=200, num_return_sequences=1)[0]['generated_text']
    
    print("原始响应:", response)

    if 'tool_call:' in response:
        tool_call_str = response.split('tool_call:')[-1].strip()
        try:
            result = execute_tool_call(tool_call_str)
            final_answer = f"{response}\n工具结果: {result['content']}"
        except Exception as e:
            final_answer = f"{response}\n工具调用失败: {str(e)}"
    else:
        final_answer = response
    
    return final_answer

# ----------------------------
# Step 5: 对话主循环
# ----------------------------

if __name__ == "__main__":
    history = ""
    print("欢迎使用 MCP Agent！输入 exit 退出对话。")
    while True:
        user_input = input("你: ")
        if user_input.lower() in ["exit", "quit"]:
            break
        response = agent_query(user_input, history)
        print(f"AI: {response}")
        history += f"\n用户: {user_input}\n助手: {response}"