import ChatGLM
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain.tools import Tool 
from langchain_community.tools import DuckDuckGoSearchRun
from langchain_community.tools import WikipediaQueryRun
from langchain_community.utilities import WikipediaAPIWrapper
from langchain_community.tools import ArxivQueryRun
from langchain.memory import ConversationBufferMemory
from langchain.agents import initialize_agent 
from langchain.agents import AgentType 
from langchain.chains import LLMChain 
from langchain.prompts import PromptTemplate
from langchain.chains.conversation.memory import ConversationBufferWindowMemory
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools import BaseTool, StructuredTool, tool
from langchain.tools import BaseTool
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools import BaseTool, StructuredTool, tool
from langchain.agents import load_tools
from math import pi
from typing import Union
from typing import Optional, Type
from langchain.callbacks.manager import (
    AsyncCallbackManagerForToolRun,
    CallbackManagerForToolRun,
)
from langchain.tools import BaseTool
from math import pi
from typing import Union
from typing import Optional
from math import sqrt, cos, sin
from langchain.chains import LLMMathChain
from langchain.agents import Tool

from langchain_core.runnables import RunnableLambda

def add_five(x):
    return x + 5

def multiply_by_two(x):
    return {"topic": "beijing"}

# wrap the functions with RunnableLambda
add_five = RunnableLambda(add_five)
multiply_by_two = RunnableLambda(multiply_by_two)
prompt = ChatPromptTemplate.from_template("tell me the weather of {topic}")
llm = ChatGLM.ChatGLM_LLM()

chain = add_five | multiply_by_two | prompt | llm



print(chain.invoke(5))






