from langchain_core.documents import Document
import ollama

documents = [
    Document(
        page_content="Dogs are great companions, known for their loyalty and friendliness.",
        metadata={"source": "mammal-pets-doc"},
    ),
    Document(
        page_content="Dogs are great companions, known for their loyalty and friendliness.",
        metadata={"source": "mammal-pets-doc"},
    ),
    Document(
        page_content="Cats are independent pets that often enjoy their own space.",
        metadata={"source": "mammal-pets-doc"},
    ),
    Document(
        page_content="Goldfish are popular pets for beginners, requiring relatively simple care.",
        metadata={"source": "fish-pets-doc"},
    ),
    Document(
        page_content="Parrots are intelligent birds capable of mimicking human speech.",
        metadata={"source": "bird-pets-doc"},
    ),
    Document(
        page_content="Rabbits are social animals that need plenty of space to hop around.",
        metadata={"source": "mammal-pets-doc"},
    ),
]

from langchain_chroma import Chroma

from langchain_ollama import OllamaEmbeddings
# all-minilm:latest
embeddings = OllamaEmbeddings(
    model="qwen:latest",
)

vectorstore = Chroma.from_documents(
    documents,
    embedding=embeddings,
)


print(vectorstore.similarity_search_with_score("cat"))

embedding = embeddings.embed_query("cat")

print(vectorstore.similarity_search_by_vector(embedding))

# from typing import List

# from langchain_core.documents import Document
# from langchain_core.runnables import RunnableLambda

# retriever = RunnableLambda(vectorstore.similarity_search).bind(k=1)  # select top result

# retriever.batch(["cat", "shark"])

# from langchain_core.prompts import ChatPromptTemplate
# from langchain_core.runnables import RunnablePassthrough
# from langchain_community.llms import Ollama  

# message = """
# Answer this question using the provided context only.

# {question}

# Context:
# {context}
# """

# prompt = ChatPromptTemplate.from_messages([("human", message)])

# llm = Ollama(
#     model="qwen:latest"
# )

# rag_chain = {"context": retriever, "question": RunnablePassthrough()} | prompt | llm

# response = rag_chain.invoke("tell me about cats")

# print(response.content)