#https://python.langchain.com/docs/use_cases/question_answering/

#https://milvus.io/docs/integrate_with_langchain.md
# from os import environ
print("Hello, Langchain!")


from langchain.vectorstores import Milvus
from langchain.document_loaders import WebBaseLoader
# from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings.ollama import OllamaEmbeddings

#Prepare the documents you want the LLM to peak at when it thinks.
#Set up an embedding model to convert documents into vector embeddings.
#Set up a vector store used to save the vector embeddings.

loader = WebBaseLoader([
    # "https://gitee.com/beyond-prototype/mistral/blob/master/README.md",
    "https://milvus.io/docs/overview.md",
])

docs = loader.load()

print("Load completed:")
print(len(docs))
print(len(docs[0].page_content))

from langchain.text_splitter import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
# text_splitter = CharacterTextSplitter(chunk_size=1024, chunk_overlap=0)
docs = text_splitter.split_documents(docs)

# print("Split completed:")
# print(len(docs))

embeddings = OllamaEmbeddings(base_url="http://ollama:11434", model="mistral:instruct")


MILVUS_HOST = "standalone"
MILVUS_PORT = "19530"

vector_store = Milvus.from_documents(
    docs,
    embedding=embeddings,
    connection_args={"host": MILVUS_HOST, "port": MILVUS_PORT}
)

# query = "What is mistral?"
query = "What is Milvus?"
docs = vector_store.similarity_search(query)

print(docs)

# print("Hello, OllamaEmbeddings @_@!")

# from langchain.chains.qa_with_sources import load_qa_with_sources_chain
# from langchain.llms import Ollama
# chain = load_qa_with_sources_chain(Ollama(base_url="http://127.0.0.1:11434",model="mistral:instruct"), chain_type="map_reduce", return_intermediate_steps=True)
# query = "What is Milvus?"
# chain({"input_documents": docs, "question": query}, return_only_outputs=True)
