from langchain_ollama import ChatOllama
from langchain_ollama import OllamaEmbeddings

def get_my_llm():
    llm = ChatOllama(
        model="qwen2.5:14b",
        temperature=0,
    )
    return llm

def get_my_structured_llm(schema):
    llm = get_my_llm()
    return llm.with_structured_output(schema=schema)

def get_my_embeddings():
    embeddings = OllamaEmbeddings(
        model="qwen2.5:14b",
    )
    return embeddings