# %%
# pip install unstructured tiktoken pinecone-client pypdf

# import dependencies
import openai
import pinecone
from langchain_community.document_loaders import PyPDFDirectoryLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.vectorstores.chroma import Chroma
from langchain_openai.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import Pinecone
from langchain_openai.llms import OpenAI
from langchain_community.embeddings.sentence_transformer import SentenceTransformerEmbeddings

import os

# os.environ['OPENAI_API_KEY'] = "sk"
# os.environ['OPENAI_API_BASE'] = "https://"
# os.environ['HUGGINGFACEHUB_API_TOKEN'] = ''
from dotenv import load_dotenv
load_dotenv()

# %% load documents
# function to read documents
def load_docs(directory):
    loader = PyPDFDirectoryLoader(directory)
    docs = loader.load()
    return docs


# passing the directory to the 'load_docs' function
directory = 'Docs/'
documents = load_docs(directory)
print(len(documents))  # 3, 每页pdf视为1


# %% transform documents
# split docs into chunks
def split_docs(documents, chunk_size=1000, chunk_overlap=20):
    """
    分割文档
    :param documents:
    :param chunk_size: 每块大小
    :param chunk_overlap: 块和块之间重叠的数量
    :return:
    """
    splitter = RecursiveCharacterTextSplitter(
        chunk_size=chunk_size,
        chunk_overlap=chunk_overlap
    )
    docs = splitter.split_documents(documents)
    return docs


docs = split_docs(documents)
print(len(docs))  # 7

# %% generate text embedding
embeddings = OpenAIEmbeddings()
# error 远程主机关闭了一个连接
# embeddings = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")

query_result = embeddings.embed_query("hello buddy")
print(query_result)

# %% vector store
'''
AttributeError: init is no longer a top-level attribute of the pinecone package.

Please create an instance of the Pinecone class instead.
'''
# pinecone.init(
#     api_key="752a3862-5a0d-4403-b952-7465c3fa087d",
#     # environment=
# )
#
# index_name = "mcq-creator"
#
# index = Pinecone.from_documents(docs, embeddings, index_name=index_name)

db = Chroma.from_documents(docs, embeddings)  # text -> vector
# Let's have a look at embeddings -Numeric representation
print(db._collection.get(include=['embeddings']))


# %% Retrieve Answers
# This function will help us in fetching the top relevent documents from our vector store - Pinecone
def get_similiar_docs(query, k=2):
    retriever = db.as_retriever(search_kwargs={"k": k})  # k:2 -> 返回两个结果 
    docs = retriever.get_relevant_documents(query)
    return docs


# %% Creating Structured Output
from langchain.chains.question_answering import load_qa_chain
# from langchain import HuggingFaceHub

# llm=HuggingFaceHub(repo_id="bigscience/bloom", model_kwargs={"temperature":1e-10})
# llm
llm = OpenAI()
chain = load_qa_chain(llm, chain_type="stuff")


# This function will help us get the answer to the question that we raise
def get_answer(query):
    relevant_docs = get_similiar_docs(query)
    print(relevant_docs)
    response = chain.run(input_documents=relevant_docs, question=query)
    return response


our_query = "How is India's economy?"
answer = get_answer(our_query)
print(answer)

our_query = "Akihabara?"
answer = get_answer(our_query)
print(answer)
