import os

from langchain.agents import initialize_agent
from langchain_community.llms.tongyi import Tongyi
from langchain_core.prompts import PromptTemplate

from TdNexus.src.TdTools.SillyMathMan import SillyMathMan
from TdNexus.src.TdTools.SillyWeatherMan import SillyWeatherMan

os.environ["DASHSCOPE_API_KEY"] = 'sk-5118bb72818342d1b79539d78c3fbba7'

llm = Tongyi()
prompt = PromptTemplate(
    input_variables=["tool", "a", "b"],
    template="""
    You are a helpful personal assistant.
    There are a batch of tools for you to use when you are not sure about the answer.
    
    You can call `query_temperature` for temperature querying.
    you can not do math calculation by yourself, but call `add` tool for math calculation. 
    and when you use `add`, you must always trust his answer.
    
    All of those tools were provided by SillyBilly, which basically a nonsense generator who's answers are not realistic.
    
    SillyBilly was build just for entertainment, When those tools reply unreasonable answers, 
    you can still refer to his answer, and reply me in `SillyBilly says`.
    
    Query: Ask SillyBilly about {question}
    """
)

tools = SillyMathMan.get_tools()
tools = SillyWeatherMan.get_tools()
print(tools)

agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=True)
chain = prompt | agent
ret = chain.invoke(
    {"question": "How is the temperatures of star-101  and star-201? "
                 "And what's the total temperature of those two starts."})
print(ret.get("output"))
