from langchain_ollama import ChatOllama

llm = ChatOllama(
    model="qwen3:0.6b",  # 模型名
    base_url="http://localhost:11434",  # 本地部署的ollama
    temperature=0.0
    # other params...
)

messages = [
    (
        "system",
        "You are a helpful assistant that translates English to Chinese. Translate the user sentence.",
    ),
    ("human", "I love programming."),
]
ai_msg = llm.invoke(messages)
print(ai_msg.content)