from langsmith import traceable
from customize.get_ollama import GetOllama
model = GetOllama(ip=GetOllama.ailab_linux_ip, model_type=1, model_name="llama3.1")()
# Auto-trace LLM calls in-context
# client = wrap_openai(model)

@traceable # Auto-trace this function
def pipeline(user_input: str):
    result = model.invoke(
        [{"role": "user", "content": user_input}],
    )
    return result.content

print(pipeline("Hello, world!"))