Spaces:
Sleeping
Sleeping
File size: 1,896 Bytes
0562abf 21fefd4 0562abf 21fefd4 0562abf 6e503f7 0562abf 6e503f7 0562abf 6e503f7 0562abf d33f21a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
import os
from langchain.chains import RetrievalQA
from langchain.llms import OpenAI
from langchain.document_loaders import PyPDFLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Chroma
import gradio as gr
import tempfile
def qa(file, openaikey, query, chain_type, k):
os.environ["OPENAI_API_KEY"] = openaikey
# load document
loader = PyPDFLoader(file.name)
documents = loader.load()
# split the documents into chunks
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
# select which embeddings we want to use
embeddings = OpenAIEmbeddings()
# create the vectorestore to use as the index
db = Chroma.from_documents(texts, embeddings)
# expose this index in a retriever interface
retriever = db.as_retriever(
search_type="similarity", search_kwargs={"k": k})
# create a chain to answer questions
qa = RetrievalQA.from_chain_type(
llm=OpenAI(), chain_type=chain_type, retriever=retriever, return_source_documents=True)
result = qa({"query": query})
print(result['result'])
return result["result"]
iface = gr.Interface(
fn=qa,
inputs=[
gr.inputs.File(label="Upload PDF"),
gr.inputs.Textbox(label="OpenAI API Key"),
gr.inputs.Textbox(label="Your question"),
gr.inputs.Dropdown(choices=['stuff', 'map_reduce', "refine", "map_rerank"], label="Chain type"),
gr.inputs.Slider(minimum=1, maximum=5, default=2, label="Number of relevant chunks"),
],
outputs="text",
title="Question Answering with your PDF file",
description="Upload a PDF file, enter OpenAI API key, type a question and get your answer."
)
iface.launch()
|