from transformers import AutoModelForCausalLM, AutoTokenizer
import requests

# 加载预训练模型和分词器
model_name = "microsoft/DialoGPT-medium"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)

def generate_response(input_text, context=""):
    # 动态生成提示，包含上下文信息
    prompt = f"{context}{input_text}"
    inputs = tokenizer.encode(prompt + tokenizer.eos_token, return_tensors='pt')
    
    outputs = model.generate(inputs, max_length=1000, pad_token_id=tokenizer.eos_token_id)
    response = tokenizer.decode(outputs[:, inputs.shape[-1]:][0], skip_special_tokens=True)
    return response

def external_tool(query):
    """模拟调用外部工具或服务"""
    if "天气" in query:
        # 假设这里有一个API可以获得天气信息
        response = requests.get("http://api.weatherapi.com/v1/current.json", params={"key": "your_api_key", "q": "Beijing"})
        weather_data = response.json()
        return f"当前北京的温度是{weather_data['current']['temp_c']}度 Celsius。"
    else:
        return "对不起，我不确定如何回答这个问题。"

def smart_assistant(input_text, history=[]):
    # 根据历史对话构建上下文
    context = "\n".join(history) + "\n" if history else ""
    response = generate_response(input_text, context=context)
    
    # 如果回复中提到需要额外的信息，尝试调用外部工具
    if "查找" in response or "查询" in response:
        tool_response = external_tool(response)
        response += f"\n工具返回: {tool_response}"
    
    # 更新历史记录
    history.append(f"用户: {input_text}")
    history.append(f"助手: {response}")
    return response

if __name__ == "__main__":
    print("开始与智能助手对话（输入'exit'退出）：")
    history = []
    while True:
        input_text = input("你: ")
        if input_text.lower() == 'exit':
            break
        response = smart_assistant(input_text, history)
        print(f"AI: {response}")