from langchain.chains import RetrievalQA
from langchain.llms import OpenAI


def text_split(state_of_the_union):
    from langchain.embeddings.openai import OpenAIEmbeddings
    from langchain.text_splitter import CharacterTextSplitter
    from langchain.vectorstores import Chroma

    with open('../../state_of_the_union.txt') as f:
        state_of_the_union = f.read()
    text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
    texts = text_splitter.split_text(state_of_the_union)

    embeddings = OpenAIEmbeddings()
    docsearch = Chroma.from_texts(texts, embeddings)

    query = "What did the president say about Ketanji Brown Jackson"
    docs = docsearch.similarity_search(query)
    print(docs[0].page_content)


def vector_store_retriever():
    from langchain.document_loaders import TextLoader
    loader = TextLoader('../../../state_of_the_union.txt')

    from langchain.text_splitter import CharacterTextSplitter
    from langchain.vectorstores import FAISS
    from langchain.embeddings import OpenAIEmbeddings

    documents = loader.load()
    text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
    texts = text_splitter.split_documents(documents)
    embeddings = OpenAIEmbeddings()
    db = FAISS.from_documents(texts, embeddings)

    retriever = db.as_retriever()
    docs = retriever.get_relevant_documents("what did he say about ketanji brown jackson")


# 1. load the text
from langchain.document_loaders import TextLoader

loader = TextLoader('../state_of_the_union.txt', encoding='utf8')
documents = loader.load()

# 2. split the loaded text
from langchain.text_splitter import CharacterTextSplitter

text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)

# 3. set the embedding generator
from langchain.embeddings import OpenAIEmbeddings

embeddings = OpenAIEmbeddings()

# 4. index the text and vectors into db and get the retriever
from langchain.vectorstores import Chroma

db = Chroma.from_documents(texts, embeddings)
retriever = db.as_retriever()

# 5. build the chain with LLM and retriever
qa = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type="stuff", retriever=retriever)

# 6. run the query
query = "What did the president say about Ketanji Brown Jackson"
qa.run(query)

# index = VectorstoreIndexCreator().from_loaders([loader])
'''
from langchain.indexes import VectorstoreIndexCreator
index_creator = VectorstoreIndexCreator(
    vectorstore_cls=Chroma,
    embedding=OpenAIEmbeddings(),
    text_splitter=CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
)
'''
