standardteam chomakov commited on
Commit
1dee7f2
0 Parent(s):

Duplicate from chomakov/GPT-4_PDF_summary

Browse files

Co-authored-by: Ivan Chomakov <chomakov@users.noreply.huggingface.co>

Files changed (5) hide show
  1. .gitattributes +34 -0
  2. Dockerfile +14 -0
  3. GPT-4_PDF_summary.py +155 -0
  4. README.md +12 -0
  5. requirements.txt +7 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
Dockerfile ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9
2
+
3
+ COPY ./requirements.txt /requirements.txt
4
+ RUN python3 -m pip install --no-cache-dir --upgrade pip
5
+ RUN python3 -m pip install --no-cache-dir --upgrade -r /requirements.txt
6
+
7
+ COPY . .
8
+
9
+ CMD ["panel", "serve", "GPT-4_PDF_summary.py", "--address", "0.0.0.0", "--port", "7860", "--allow-websocket-origin", "chomakov-gpt-4-pdf-summary.hf.space", "--allow-websocket-origin", "0.0.0.0:7860"]
10
+
11
+ RUN mkdir /.cache
12
+ RUN chmod 777 /.cache
13
+ RUN mkdir .chroma
14
+ RUN chmod 777 .chroma
GPT-4_PDF_summary.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # !pip install langchain openai chromadb tiktoken pypdf panel
5
+ # In[ ]:
6
+
7
+
8
+ import os
9
+ from langchain.chains import RetrievalQA
10
+ from langchain.llms import OpenAI
11
+ from langchain.document_loaders import TextLoader
12
+ from langchain.document_loaders import PyPDFLoader
13
+ from langchain.indexes import VectorstoreIndexCreator
14
+ from langchain.text_splitter import CharacterTextSplitter
15
+ from langchain.embeddings import OpenAIEmbeddings
16
+ from langchain.vectorstores import Chroma
17
+ import panel as pn
18
+ import tempfile
19
+
20
+
21
+ # In[ ]:
22
+
23
+
24
+ pn.extension('texteditor', template="bootstrap", sizing_mode='stretch_width')
25
+ pn.state.template.param.update(
26
+ main_max_width="690px",
27
+ header_background="#F08080",
28
+ )
29
+
30
+
31
+ # In[3]:
32
+
33
+
34
+ file_input = pn.widgets.FileInput(width=300)
35
+
36
+ openaikey = pn.widgets.PasswordInput(
37
+ value="", placeholder="Enter your OpenAI API Key here...", width=300
38
+ )
39
+ prompt = pn.widgets.TextEditor(
40
+ value="", placeholder="Enter your questions here...", height=160, toolbar=False
41
+ )
42
+ run_button = pn.widgets.Button(name="Run!")
43
+
44
+ select_k = pn.widgets.IntSlider(
45
+ name="Number of relevant chunks", start=1, end=5, step=1, value=2
46
+ )
47
+ select_chain_type = pn.widgets.RadioButtonGroup(
48
+ name='Chain type',
49
+ options=['stuff', 'map_reduce', "refine", "map_rerank"]
50
+ )
51
+
52
+ widgets = pn.Row(
53
+ pn.Column(prompt, run_button, margin=5),
54
+ pn.Card(
55
+ "Chain type:",
56
+ pn.Column(select_chain_type, select_k),
57
+ title="Advanced settings", margin=10
58
+ ), width=600
59
+ )
60
+
61
+
62
+ # In[4]:
63
+
64
+
65
+ def qa(file, query, chain_type, k):
66
+ # load document
67
+ loader = PyPDFLoader(file)
68
+ documents = loader.load()
69
+ # split the documents into chunks
70
+ text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
71
+ texts = text_splitter.split_documents(documents)
72
+ # select which embeddings we want to use
73
+ embeddings = OpenAIEmbeddings()
74
+ # create the vectorestore to use as the index
75
+ db = Chroma.from_documents(texts, embeddings)
76
+ # expose this index in a retriever interface
77
+ retriever = db.as_retriever(
78
+ search_type="similarity", search_kwargs={"k": k})
79
+ # create a chain to answer questions
80
+ qa = RetrievalQA.from_chain_type(
81
+ llm=OpenAI(), chain_type=chain_type, retriever=retriever, return_source_documents=True)
82
+ result = qa({"query": query})
83
+ print(result['result'])
84
+ return result
85
+
86
+
87
+ # In[6]:
88
+
89
+
90
+ convos = [] # store all panel objects in a list
91
+
92
+
93
+ def qa_result(_):
94
+ os.environ["OPENAI_API_KEY"] = openaikey.value
95
+
96
+ # save pdf file to a temp file
97
+ if file_input.value is not None:
98
+ file_input.save("/.cache/temp.pdf")
99
+
100
+ prompt_text = prompt.value
101
+ if prompt_text:
102
+ result = qa(file="/.cache/temp.pdf", query=prompt_text,
103
+ chain_type=select_chain_type.value, k=select_k.value)
104
+ convos.extend([
105
+ pn.Row(
106
+ pn.panel("\U0001F60A", width=10),
107
+ prompt_text,
108
+ width=600
109
+ ),
110
+ pn.Row(
111
+ pn.panel("\U0001F916", width=10),
112
+ pn.Column(
113
+ result["result"],
114
+ "Relevant source text:",
115
+ pn.pane.Markdown('\n--------------------------------------------------------------------\n'.join(
116
+ doc.page_content for doc in result["source_documents"]))
117
+ )
118
+ )
119
+ ])
120
+ # return convos
121
+ return pn.Column(*convos, margin=15, width=575, min_height=400)
122
+
123
+
124
+ # In[7]:
125
+
126
+
127
+ qa_interactive = pn.panel(
128
+ pn.bind(qa_result, run_button),
129
+ loading_indicator=True,
130
+ )
131
+
132
+
133
+ # In[8]:
134
+
135
+
136
+ output = pn.WidgetBox('*Output will show up here:*',
137
+ qa_interactive, width=630, scroll=True)
138
+
139
+
140
+ # In[9]:
141
+
142
+
143
+ # layout
144
+ pn.Column(
145
+ pn.pane.Markdown("""
146
+ ## \U0001F60A! Question Answering with your PDF file
147
+
148
+ 1) Upload a PDF. 2) Enter OpenAI API key. This costs $. Set up billing at [OpenAI](https://platform.openai.com/account). 3) Type a question and click "Run".
149
+
150
+ """),
151
+ pn.Row(file_input, openaikey),
152
+ output,
153
+ widgets
154
+
155
+ ).servable()
README.md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: GPT-4 PDF Summary
3
+ emoji: 💩
4
+ colorFrom: yellow
5
+ colorTo: indigo
6
+ sdk: docker
7
+ pinned: false
8
+ license: creativeml-openrail-m
9
+ duplicated_from: chomakov/GPT-4_PDF_summary
10
+ ---
11
+
12
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ langchain
2
+ openai
3
+ chromadb
4
+ pypdf
5
+ tiktoken
6
+ panel
7
+ notebook