chomakov commited on
Commit
7db9219
1 Parent(s): 858aca3

changed to .py

Browse files
Files changed (2) hide show
  1. Dockerfile +2 -1
  2. GPT-4_PDF_summary.py +156 -0
Dockerfile CHANGED
@@ -10,7 +10,8 @@ RUN python3 -m pip install --no-cache-dir --upgrade -r requirements.txt
10
 
11
  COPY . .
12
 
13
- CMD ["panel", "serve", "/GPT-4_PDF_summary.ipynb", "--address", "0.0.0.0", "--port", "7860", "--allow-websocket-origin", "chomakov-gpt-4-pdf-summary.hf.space:7860", "--allow-websocket-origin", "0.0.0.0:7860"]
 
14
 
15
  RUN mkdir /.cache
16
  RUN chmod 777 /.cache
 
10
 
11
  COPY . .
12
 
13
+ CMD ["panel", "serve", "/GPT-4_PDF_summary.py", "--address", "0.0.0.0", "--port", "7860", "--allow-websocket-origin", "chomakov-gpt-4-pdf-summary.hf.space:7860", "--allow-websocket-origin", "0.0.0.0:7860"]
14
+
15
 
16
  RUN mkdir /.cache
17
  RUN chmod 777 /.cache
GPT-4_PDF_summary.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # In[ ]:
5
+
6
+
7
+ get_ipython().system('pip install langchain openai chromadb tiktoken pypdf panel')
8
+
9
+
10
+ # In[ ]:
11
+
12
+
13
+ import os
14
+ from langchain.chains import RetrievalQA
15
+ from langchain.llms import OpenAI
16
+ from langchain.document_loaders import TextLoader
17
+ from langchain.document_loaders import PyPDFLoader
18
+ from langchain.indexes import VectorstoreIndexCreator
19
+ from langchain.text_splitter import CharacterTextSplitter
20
+ from langchain.embeddings import OpenAIEmbeddings
21
+ from langchain.vectorstores import Chroma
22
+ import panel as pn
23
+ import tempfile
24
+
25
+
26
+ # In[ ]:
27
+
28
+
29
+ pn.extension('texteditor', template="bootstrap", sizing_mode='stretch_width')
30
+ pn.state.template.param.update(
31
+ main_max_width="690px",
32
+ header_background="#F08080",
33
+ )
34
+
35
+
36
+ # In[3]:
37
+
38
+
39
+ file_input = pn.widgets.FileInput(width=300)
40
+
41
+ openaikey = pn.widgets.PasswordInput(
42
+ value="", placeholder="Enter your OpenAI API Key here...", width=300
43
+ )
44
+ prompt = pn.widgets.TextEditor(
45
+ value="", placeholder="Enter your questions here...", height=160, toolbar=False
46
+ )
47
+ run_button = pn.widgets.Button(name="Run!")
48
+
49
+ select_k = pn.widgets.IntSlider(
50
+ name="Number of relevant chunks", start=1, end=5, step=1, value=2
51
+ )
52
+ select_chain_type = pn.widgets.RadioButtonGroup(
53
+ name='Chain type',
54
+ options=['stuff', 'map_reduce', "refine", "map_rerank"]
55
+ )
56
+
57
+ widgets = pn.Row(
58
+ pn.Column(prompt, run_button, margin=5),
59
+ pn.Card(
60
+ "Chain type:",
61
+ pn.Column(select_chain_type, select_k),
62
+ title="Advanced settings", margin=10
63
+ ), width=600
64
+ )
65
+
66
+
67
+ # In[4]:
68
+
69
+
70
+ def qa(file, query, chain_type, k):
71
+ # load document
72
+ loader = PyPDFLoader(file)
73
+ documents = loader.load()
74
+ # split the documents into chunks
75
+ text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
76
+ texts = text_splitter.split_documents(documents)
77
+ # select which embeddings we want to use
78
+ embeddings = OpenAIEmbeddings()
79
+ # create the vectorestore to use as the index
80
+ db = Chroma.from_documents(texts, embeddings)
81
+ # expose this index in a retriever interface
82
+ retriever = db.as_retriever(search_type="similarity", search_kwargs={"k": k})
83
+ # create a chain to answer questions
84
+ qa = RetrievalQA.from_chain_type(
85
+ llm=OpenAI(), chain_type=chain_type, retriever=retriever, return_source_documents=True)
86
+ result = qa({"query": query})
87
+ print(result['result'])
88
+ return result
89
+
90
+
91
+ # In[6]:
92
+
93
+
94
+ convos = [] # store all panel objects in a list
95
+
96
+ def qa_result(_):
97
+ os.environ["OPENAI_API_KEY"] = openaikey.value
98
+
99
+ # save pdf file to a temp file
100
+ if file_input.value is not None:
101
+ file_input.save("/.cache/temp.pdf")
102
+
103
+ prompt_text = prompt.value
104
+ if prompt_text:
105
+ result = qa(file="/.cache/temp.pdf", query=prompt_text, chain_type=select_chain_type.value, k=select_k.value)
106
+ convos.extend([
107
+ pn.Row(
108
+ pn.panel("\U0001F60A", width=10),
109
+ prompt_text,
110
+ width=600
111
+ ),
112
+ pn.Row(
113
+ pn.panel("\U0001F916", width=10),
114
+ pn.Column(
115
+ result["result"],
116
+ "Relevant source text:",
117
+ pn.pane.Markdown('\n--------------------------------------------------------------------\n'.join(doc.page_content for doc in result["source_documents"]))
118
+ )
119
+ )
120
+ ])
121
+ #return convos
122
+ return pn.Column(*convos, margin=15, width=575, min_height=400)
123
+
124
+
125
+ # In[7]:
126
+
127
+
128
+ qa_interactive = pn.panel(
129
+ pn.bind(qa_result, run_button),
130
+ loading_indicator=True,
131
+ )
132
+
133
+
134
+ # In[8]:
135
+
136
+
137
+ output = pn.WidgetBox('*Output will show up here:*', qa_interactive, width=630, scroll=True)
138
+
139
+
140
+ # In[9]:
141
+
142
+
143
+ # layout
144
+ pn.Column(
145
+ pn.pane.Markdown("""
146
+ ## \U0001F60A! Question Answering with your PDF file
147
+
148
+ 1) Upload a PDF. 2) Enter OpenAI API key. This costs $. Set up billing at [OpenAI](https://platform.openai.com/account). 3) Type a question and click "Run".
149
+
150
+ """),
151
+ pn.Row(file_input,openaikey),
152
+ output,
153
+ widgets
154
+
155
+ ).servable()
156
+