from langchain_ollama import OllamaLLM, ChatOllama
from langchain_core.prompts import ChatPromptTemplate
from langchain.schema import HumanMessage
import asyncio
# 假设Ollama的Web API部署在本地的11434端口
ollama_url = "http://172.26.167.51:11434"
llm = OllamaLLM(base_url=ollama_url, model="qwen2.5")
chat = ChatOllama(base_url=ollama_url, model="qwen2.5")
# async def get_completion():
#     inputtext = "ollama is an ai company that "
#     completion = llm.invoke(inputtext)
#     print(completion)
#     return completion
#
#
# asyncio.run(get_completion())


# template = """Question: {question}
#
# Answer: Let's think step by step."""
#
# prompt = ChatPromptTemplate.from_template(template)
#
# chain = prompt | llm
#
# result = chain.invoke({"question": "什么是LangChain?"})
# print(result)

text = "给生产杯子的公司取一个名字。"
message = [HumanMessage(content=text)]

if __name__ == "__main__":
    print(llm.invoke(text))
    result = chat.invoke(message)
    print(result.content)
    print("")