'''
使用Ollama对接千问0.5b
'''

from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain_community.llms.ollama import Ollama

llm = Ollama(base_url="http://localhost:11434", model="qwen:0.5b", )

def get_completion_ollama(prompt):
    return llm.invoke(prompt)

prompt = "帮我写一篇宣传稿"
res = get_completion_ollama(prompt=prompt)
print(res)
