from langchain_ollama import OllamaEmbeddings

from common_config import CHAT_OLLAMA_MODEL

embeddings = OllamaEmbeddings(
    model=CHAT_OLLAMA_MODEL,
)

f = open("../tool_call/langchain_ollama_get_time_with_think.py", "r")
content = f.read()
f.close()

input_text = content

vector = embeddings.embed_query(input_text)
print(vector[:3])

input_texts = ["Document 1...", "Document 2..."]
vectors = embeddings.embed_documents(input_texts)
print(len(vectors))

print(vectors[0][:3])
print(vectors[1][:3])