# -*- coding: utf-8 -*-
# time: 2025/5/14 16:54
# file: ch01.py
# author: hanson
"""
https://www.langchain.com.cn/docs/how_to/tool_results_pass_to_model/

"""
from langchain_community.chat_models import ChatTongyi
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.tools import tool
from langchain_ollama import ChatOllama


@tool
def add(a: int, b: int) -> int:
    """2个数相加"""
    print(f"日志==》正在计算：{a} + {b}")
    return a + b


def power(a: int, b: int) -> int:
    """1个数幂次方"""
    print(f"日志==》正在计算数幂次方：{a} + {b}")
    return a ** b


llm = ChatTongyi(
    model_name="qwen-max",
    dashscope_api_key="sk-62fb03f9198f4720bb359c873757fafc",
    temperature=0.3  # 降低随机性
)


#本地模型太差了
#llm = ChatOllama(model="qwen2.5:1.5b", temperature=0.3)

tools = [add, power]
# 定义提示词模板
prompt_template = ChatPromptTemplate.from_messages([
    ("system", "请根据下面的工具，计算出表达式的值。严格使用工具执行计算。"),
    ("human", "表达式：\n{input}"),
])

# 绑定工具 + 应用提示词
chain = (
    prompt_template
    | llm.bind_tools(tools)  # 绑定工具到LLM
)

response = chain.invoke({"input": "计算3+4 并返回结果"})
print(response)
