lekkalar commited on
Commit
3a51c5e
1 Parent(s): 02ded34

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +101 -0
app.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+
4
+
5
+ from langchain.document_loaders import OnlinePDFLoader #for laoding the pdf
6
+ from langchain.embeddings import OpenAIEmbeddings # for creating embeddings
7
+ from langchain.vectorstores import Chroma # for the vectorization part
8
+ from langchain.chains import ConversationalRetrievalChain # for conversing with chatGPT
9
+ from langchain.chat_models import ChatOpenAI # the LLM model we'll use (ChatGPT)
10
+
11
+ def loading_pdf():
12
+ return "Loading..."
13
+
14
+ def pdf_changes(pdf_doc, open_ai_key):
15
+ if openai_key is not None:
16
+ os.environ['OPENAI_API_KEY'] = open_ai_key
17
+ #Load the pdf file
18
+ loader = OnlinePDFLoader(pdf_doc.name)
19
+ pages = loader.load_and_split()
20
+
21
+ #Create an instance of OpenAIEmbeddings, which is responsible for generating embeddings for text
22
+ embeddings = OpenAIEmbeddings()
23
+
24
+ #To create a vector store, we use the Chroma class, which takes the documents (pages in our case), the embeddings instance, and a directory to store the vector data
25
+ vectordb = Chroma.from_documents(pages, embedding=embeddings)
26
+
27
+ #Finally, we create the bot using the ConversationalRetrievalChain class
28
+ #A ConversationalRetrievalChain is similar to a RetrievalQAChain, except that the ConversationalRetrievalChain allows for
29
+ #passing in of a chat history which can be used to allow for follow up questions.
30
+ global pdf_qa
31
+ pdf_qa = ConversationalRetrievalChain.from_llm(ChatOpenAI(temperature=0, model_name="gpt-4"), vectordb.as_retriever(), return_source_documents=False)
32
+
33
+ return "Ready"
34
+ else:
35
+ return "Please provide an OpenAI API key"
36
+
37
+ def add_text(history, text):
38
+ history = history + [(text, None)]
39
+ return history, ""
40
+
41
+ def bot(history):
42
+ response = infer(history[-1][0], history)
43
+ history[-1][1] = ""
44
+
45
+ for character in response:
46
+ history[-1][1] += character
47
+ time.sleep(0.05)
48
+ yield history
49
+
50
+
51
+ def infer(question, history):
52
+
53
+ results = []
54
+ for human, ai in history[:-1]:
55
+ pair = (human, ai)
56
+ res.append(pair)
57
+
58
+ chat_history = results
59
+ print(chat_history)
60
+ query = question
61
+ result = qa({"question": query, "chat_history": chat_history})
62
+ print(result)
63
+ return result["answer"]
64
+
65
+ css="""
66
+ #col-container {max-width: 700px; margin-left: auto; margin-right: auto;}
67
+ """
68
+
69
+ title = """
70
+ <div style="text-align: center;max-width: 700px;">
71
+ <h1>Chat with PDF • OpenAI</h1>
72
+ <p style="text-align: center;">Upload a .PDF, click the "Load PDF to LangChain" button, <br />
73
+ when everything is ready, go ahead and start typing your questions <br />
74
+ This version is set to store chat history, and uses gpt-4 as LLM</p>
75
+ </div>
76
+ """
77
+
78
+
79
+ with gr.Blocks(css=css) as demo:
80
+ with gr.Column(elem_id="col-container"):
81
+ gr.HTML(title)
82
+
83
+ with gr.Column():
84
+ openai_key = gr.Textbox(label="You OpenAI API key", type="password")
85
+ pdf_doc = gr.File(label="Load a pdf", file_types=['.pdf'], type="file")
86
+ with gr.Row():
87
+ langchain_status = gr.Textbox(label="Status", placeholder="", interactive=False)
88
+ load_pdf = gr.Button("Load pdf to langchain")
89
+
90
+ chatbot = gr.Chatbot([], elem_id="chatbot").style(height=350)
91
+ question = gr.Textbox(label="Question", placeholder="Type your question and hit Enter ")
92
+ submit_btn = gr.Button("Send Message")
93
+ load_pdf.click(loader.load_and_split()_pdf, None, langchain_status, queue=False)
94
+ load_pdf.click(pdf_changes, inputs=[pdf_doc, openai_key], outputs=[langchain_status], queue=False)
95
+ question.submit(add_text, [chatbot, question], [chatbot, question]).then(
96
+ bot, chatbot, chatbot
97
+ )
98
+ submit_btn.click(add_text, [chatbot, question], [chatbot, question]).then(
99
+ bot, chatbot, chatbot)
100
+
101
+ demo.launch()