from langchain.agents import load_tools, initialize_agent, AgentType
from langchain.chat_models import ChatOpenAI
from langchain.llms import HuggingFacePipeline
from langchain.agents import Tool
from langchain.chains.conversation.memory import ConversationBufferMemory
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline

# ----------------------------
# Step 1: 设置 LLM（可选 OpenAI 或 Hugging Face 模型）
# ----------------------------

# 方式一：使用 OpenAI 的 GPT-3.5 / GPT-4
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)

# 方式二：使用本地 HF 模型（如 DialoGPT-medium）
"""
model_name = "microsoft/DialoGPT-medium"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
pipe = pipeline(
    "text-generation",
    model=model,
    tokenizer=tokenizer,
    max_new_tokens=100,
    device=0 if torch.cuda.is_available() else -1
)
llm = HuggingFacePipeline(pipeline=pipe)
"""

# ----------------------------
# Step 2: 加载工具（Tools）
# ----------------------------

tools = load_tools(
    ["ddg-search", "llm-math"],
    llm=llm
)

# 可自定义更多工具（例如添加数据库查询、天气接口等）
"""
custom_tool = Tool(
    name="custom_tool",
    func=my_custom_function,
    description="描述你的工具作用"
)
tools.append(custom_tool)
"""

# ----------------------------
# Step 3: 初始化 Agent
# ----------------------------

memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)

agent = initialize_agent(
    tools=tools,
    llm=llm,
    agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION,
    verbose=True,
    memory=memory
)

# ----------------------------
# Step 4: 运行 Agent（对话循环）
# ----------------------------

if __name__ == "__main__":
    print("欢迎使用 LangChain Agent！输入 exit 退出。")
    while True:
        user_input = input("你: ")
        if user_input.lower() in ["exit", "quit"]:
            break
        response = agent.run(user_input)
        print(f"AI: {response}")