from langchain_community.llms import Ollama  
from langchain_core.messages import AIMessage, HumanMessage,SystemMessage
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_core.chat_history import (
    BaseChatMessageHistory,
    InMemoryChatMessageHistory,
)
from langchain_core.prompts import ChatPromptTemplate


llm = Ollama(
    model="qwen:latest"
)  # assuming you have Ollama installed and have llama3 model pulled with `ollama pull llama3 `
messages = [
    SystemMessage(content="Translate the following from English into Italian"),
    HumanMessage(content="hi!"),
]
system_template = "Translate the following into {language}:"
prompt_template = ChatPromptTemplate.from_messages(
    [("system", system_template), ("user", "{text}")]
)
result = prompt_template.invoke({"language": "italian", "text": "hi"})
# print(result)
# print(result.to_messages())# llm.invoke(messages)
# # print(llm.invoke(messages))
chain = prompt_template | llm

print(chain.invoke({"language": "italian", "text": "hi"}))

# # # llm.invoke("Tell me a joke in chinese")

