from gc import callbacks

from langchain.agents import initialize_agent, AgentType,create_react_agent,AgentExecutor
from langchain.agents import Tool
from langchain_core.prompts import PromptTemplate
from langchain_ollama import ChatOllama
from langchain_community.tools.tavily_search import TavilySearchResults
import requests
from langchain.tools import StructuredTool

import os

os.environ["TAVILY_API_KEY"] = "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"


def get_weather(location: str) -> str:
    """根据天气API 获取天气信息"""
    api_url = f"https://api.weatherapi.com/v1/current.json?key=264efe8419d84d64ab371844251707&q={location}"
    response = requests.get(api_url)
    data = response.json()
    # weather = data["current"]["condition"]["text"]
    return f"{location} 天气信息：{data}"


def get_calc(num1, num2) -> str:
    """接收两个参数，用于计算两个数字的差"""
    return str(int(num1 - num2))


def get_web(query: str):
    search = TavilySearchResults()
    result = search.run(query)
    return result


my_prompt = PromptTemplate.from_template("""
尽可能回答用户的问题，你可以使用下面的工具:
"WeatherFetcher":"获得指定城市的天气JSON数据信息"
"CalcFetcher":"接收两个参数，用于计算两个数字的差"
"WebSearch":"通过Web搜索获取指定问题的信息"

严格遵守并使用下面的格式:
Question: 用户提出的问题
Thought: 思考应该如何解决问题
Action: 制定接下来要执行的动作
Action Input：接下来要执行的动作
Observation: 观察执行动作返回的结果是否能解决问题
...（Thought/Action/Action Input/Observation的过程有可能会重复多次）
Thought: 我现在可以得到最终答案了
Final Answer: 给用户输出最终答案

现在让我们开始吧!
Question: {input}
""")

weather_tool = Tool(
    name="WeatherFetcher",
    func=get_weather,
    description="用于获取指定城市的天气信息"
)

calc_tool = StructuredTool.from_function(
    name="CalcFetcher",
    func=get_calc,
    description="接收两个参数，用于计算两个数字的差"
)

web_tool = Tool(
    name="WebSearch",
    func=get_web,
    description="通过Web搜索获取指定问题的信息"
)

from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler


class StreamingCallbackHandler(StreamingStdOutCallbackHandler):
    def __init__(self):
        super().__init__()
        self.output = ""

    def on_llm_new_token(self, token: str, **kwargs) -> None:
        self.output += token
        print(token, end="", flush=True)


# 2. 初始化 LLM
llm = ChatOllama(base_url="http://10.0.2.114:11434", model="qwen3:8b", temperature=1.0,
                 callbacks=[StreamingCallbackHandler()])

# 3. 初始化 Agent
agent = initialize_agent(
    tools=[weather_tool, calc_tool, web_tool],
    llm=llm,
    agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
    verbose=True,
    handle_parsing_errors=True,
    agent_kwargs={"prompt": my_prompt},
)

# # 定义 ReAct 提示模板
# prompt_template = PromptTemplate(
#     input_variables=["input", "agent_scratchpad", "tools", "tool_names"],
#     template="""
#     You are an AI assistant. You can use the following tools:
#
#     {tools}
#
#     Available tool names: {tool_names}
#
#     You have access to the following tools:
#     {tools}
#
#     Use the following format:
#     Thought: Do I need to use a tool? Yes
#     Action: [tool name]
#     Action Input: [tool input]
#     Observation: [tool output]
#     Thought: Do I need to use another tool? No
#
#     {agent_scratchpad}
#
#     User's Input: {input}
#     """
# )
#
# agent = create_react_agent(
#     tools=[weather_tool, calc_tool, web_tool],
#     llm=llm,
#     prompt=prompt_template
# )
#
# # 创建代理执行器
# agent_executor = AgentExecutor(agent=agent, tools=[weather_tool, calc_tool, web_tool])


# for chunk in agent_executor.stream({"input": "What is the capital of France?"}):
#     print(chunk,end="|",flush=True)

# for chunk in agent.stream(input={"input": "讲个20字以内的冷笑话"}):
#     print(chunk, end="|", flush=True)

# # 4. 使用 Agent
# # 天气相关问题
# result = agent.run({"input": "北京今天的天气怎么样,沈阳今天的天气又怎么样，它们两个的温度差又是多少"})
# result = agent.run({"input": "北京今天的天气怎么样"})
#
# # 非天气相关问题
# result = agent.run("介绍一下深度学习")

# result=agent.run("100-20-30-41是多少")
# print(result)
