Spaces:
Sleeping
Sleeping
import os | |
from langchain.chains import RetrievalQA | |
from langchain.llms import OpenAI | |
from langchain.document_loaders import PyPDFLoader | |
from langchain.indexes import VectorstoreIndexCreator | |
from langchain.text_splitter import CharacterTextSplitter | |
from langchain.embeddings import OpenAIEmbeddings | |
from langchain.vectorstores import Chroma | |
import gradio as gr | |
import tempfile | |
def qa(file, openaikey, query, chain_type, k): | |
os.environ["OPENAI_API_KEY"] = openaikey | |
# load document | |
loader = PyPDFLoader(file.name) | |
documents = loader.load() | |
# split the documents into chunks | |
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | |
texts = text_splitter.split_documents(documents) | |
# select which embeddings we want to use | |
embeddings = OpenAIEmbeddings() | |
# create the vectorestore to use as the index | |
db = Chroma.from_documents(texts, embeddings) | |
# expose this index in a retriever interface | |
retriever = db.as_retriever( | |
search_type="similarity", search_kwargs={"k": k}) | |
# create a chain to answer questions | |
qa = RetrievalQA.from_chain_type( | |
llm=OpenAI(), chain_type=chain_type, retriever=retriever, return_source_documents=True) | |
result = qa({"query": query}) | |
print(result['result']) | |
return result["result"] | |
iface = gr.Interface( | |
fn=qa, | |
inputs=[ | |
gr.inputs.File(label="Upload PDF"), | |
gr.inputs.Textbox(label="OpenAI API Key"), | |
gr.inputs.Textbox(label="Your question"), | |
gr.inputs.Dropdown(choices=['stuff', 'map_reduce', "refine", "map_rerank"], label="Chain type"), | |
gr.inputs.Slider(minimum=1, maximum=5, default=2, label="Number of relevant chunks"), | |
], | |
outputs="text", | |
title="Question Answering with your PDF file", | |
description="Upload a PDF file, enter OpenAI API key, type a question and get your answer." | |
) | |
iface.launch() | |