File size: 2,573 Bytes
94232b5
 
 
 
 
 
 
 
 
 
 
 
 
479cb43
c99f1a1
94232b5
 
 
 
 
424b8b6
 
 
 
 
 
 
 
 
94232b5
 
 
 
 
 
 
 
 
 
c353b99
94232b5
 
 
 
 
133e845
94232b5
 
 
 
 
 
 
 
 
 
 
 
 
424b8b6
 
94232b5
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
import gradio as gr
import os
import tempfile
from langchain.document_loaders import UnstructuredPDFLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.chains import RetrievalQA
from langchain.schema import AIMessage, HumanMessage
from langchain.vectorstores import FAISS
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain import HuggingFaceHub

# Set your API keys
API_KEY = os.environ["API_KEY"] 
pdf_path = './Adventure Works Analysis Report.pdf'

# Create a temporary upload directory

# Define global variables for loaders and index
index = None
def load_file(pdf_path):
                global index
                pdf_loader = UnstructuredPDFLoader(pdf_path)
                index = VectorstoreIndexCreator(
                embedding=HuggingFaceEmbeddings(),
                text_splitter=CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
                ).from_loaders([pdf_loader])
                return "DONE ✅"
load_file(pdf_path)

def chat(message,history):
    global index
    history_langchain_format = []
    for human, ai in history:
        history_langchain_format.append(HumanMessage(content=human))
        history_langchain_format.append(AIMessage(content=ai))
    history_langchain_format.append(HumanMessage(content=message))
    history_langchain_format.append(HumanMessage(content=message))
    # Create the index (update index)
    llm2 = HuggingFaceHub(repo_id="declare-lab/flan-alpaca-large", model_kwargs={"temperature": 0, "max_length": 512},huggingfacehub_api_token = API_KEY )
    chain = RetrievalQA.from_chain_type(llm=llm2,
                                        chain_type="stuff",
                                        retriever=index.vectorstore.as_retriever(),
                                        input_key="question")
    # Perform question-answering on the uploaded PDF with the user's question
    gpt_response = chain.run("Based on the file you have processed, provide a related answer to this question: "+ message)
    return gpt_response


# Create a Gradio interface for chat
chat_interface = gr.ChatInterface(
    chat,
    theme=gr.themes.Soft()
)



with gr.Blocks(theme=gr.themes.Soft()) as demo:
    with gr.Row():
        with gr.Column():
            # text = gr.Textbox(load_file, [pdf_path],label="Status")
            chat_interface = gr.ChatInterface(
                            chat,
                            theme=gr.themes.Soft()
                        )

demo.queue().launch(inline=False)