Raghav001 commited on
Commit
72671db
0 Parent(s):

Duplicate from Raghav001/ReviewTwo

Browse files
Files changed (4) hide show
  1. .gitattributes +34 -0
  2. README.md +14 -0
  3. app.py +251 -0
  4. requirements.txt +5 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: ChatPDF
3
+ emoji: 💻
4
+ colorFrom: gray
5
+ colorTo: yellow
6
+ sdk: gradio
7
+ sdk_version: 3.20.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: apache-2.0
11
+ duplicated_from: Raghav001/ReviewTwo
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import json
3
+ import gradio as gr
4
+ # from concurrent.futures import ThreadPoolExecutor
5
+ import pdfplumber
6
+ import pandas as pd
7
+ import langchain
8
+ import time
9
+ from cnocr import CnOcr
10
+
11
+ # from langchain.document_loaders import PyPDFLoader
12
+ from langchain.document_loaders import UnstructuredWordDocumentLoader
13
+ from langchain.document_loaders import UnstructuredPowerPointLoader
14
+ # from langchain.document_loaders.image import UnstructuredImageLoader
15
+
16
+
17
+
18
+
19
+ from sentence_transformers import SentenceTransformer, models, util
20
+ word_embedding_model = models.Transformer('sentence-transformers/all-MiniLM-L6-v2', do_lower_case=True)
21
+ pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), pooling_mode='cls')
22
+ embedder = SentenceTransformer(modules=[word_embedding_model, pooling_model])
23
+ ocr = CnOcr()
24
+ # chat_url = 'https://Raghav001-API.hf.space/sale'
25
+ chat_url = 'https://Raghav001-API.hf.space/chatpdf'
26
+ headers = {
27
+ 'Content-Type': 'application/json',
28
+ }
29
+ # thread_pool_executor = ThreadPoolExecutor(max_workers=4)
30
+ history_max_len = 500
31
+ all_max_len = 3000
32
+
33
+
34
+ def get_emb(text):
35
+ emb_url = 'https://Raghav001-API.hf.space/embeddings'
36
+ data = {"content": text}
37
+ try:
38
+ result = requests.post(url=emb_url,
39
+ data=json.dumps(data),
40
+ headers=headers
41
+ )
42
+ print("--------------------------------Embeddings-----------------------------------")
43
+ print(result.json()['data'][0]['embedding'])
44
+ return result.json()['data'][0]['embedding']
45
+ except Exception as e:
46
+ print('data', data, 'result json', result.json())
47
+
48
+
49
+ def doc_emb(doc: str):
50
+ texts = doc.split('\n')
51
+ # futures = []
52
+ emb_list = embedder.encode(texts)
53
+ # for text in texts:
54
+ # futures.append(thread_pool_executor.submit(get_emb, text))
55
+ # for f in futures:
56
+ # emb_list.append(f.result())
57
+ print('\n'.join(texts))
58
+ gr.Textbox.update(value="")
59
+ return texts, emb_list, gr.Textbox.update(visible=True), gr.Button.update(visible=True), gr.Markdown.update(
60
+ value="""success ! Let's talk"""), gr.Chatbot.update(visible=True)
61
+
62
+
63
+ def get_response(msg, bot, doc_text_list, doc_embeddings):
64
+ # future = thread_pool_executor.submit(get_emb, msg)
65
+ gr.Textbox.update(value="")
66
+ now_len = len(msg)
67
+ req_json = {'question': msg}
68
+ his_bg = -1
69
+ for i in range(len(bot) - 1, -1, -1):
70
+ if now_len + len(bot[i][0]) + len(bot[i][1]) > history_max_len:
71
+ break
72
+ now_len += len(bot[i][0]) + len(bot[i][1])
73
+ his_bg = i
74
+ req_json['history'] = [] if his_bg == -1 else bot[his_bg:]
75
+ # query_embedding = future.result()
76
+ query_embedding = embedder.encode([msg])
77
+ cos_scores = util.cos_sim(query_embedding, doc_embeddings)[0]
78
+ score_index = [[score, index] for score, index in zip(cos_scores, [i for i in range(len(cos_scores))])]
79
+ score_index.sort(key=lambda x: x[0], reverse=True)
80
+ print('score_index:\n', score_index)
81
+ print('doc_emb_state', doc_emb_state)
82
+ index_set, sub_doc_list = set(), []
83
+ for s_i in score_index:
84
+ doc = doc_text_list[s_i[1]]
85
+ if now_len + len(doc) > all_max_len:
86
+ break
87
+ index_set.add(s_i[1])
88
+ now_len += len(doc)
89
+ # Maybe the paragraph is truncated wrong, so add the upper and lower paragraphs
90
+ if s_i[1] > 0 and s_i[1] -1 not in index_set:
91
+ doc = doc_text_list[s_i[1]-1]
92
+ if now_len + len(doc) > all_max_len:
93
+ break
94
+ index_set.add(s_i[1]-1)
95
+ now_len += len(doc)
96
+ if s_i[1] + 1 < len(doc_text_list) and s_i[1] + 1 not in index_set:
97
+ doc = doc_text_list[s_i[1]+1]
98
+ if now_len + len(doc) > all_max_len:
99
+ break
100
+ index_set.add(s_i[1]+1)
101
+ now_len += len(doc)
102
+
103
+ index_list = list(index_set)
104
+ index_list.sort()
105
+ for i in index_list:
106
+ sub_doc_list.append(doc_text_list[i])
107
+ req_json['doc'] = '' if len(sub_doc_list) == 0 else '\n'.join(sub_doc_list)
108
+ data = {"content": json.dumps(req_json)}
109
+ print('data:\n', req_json)
110
+ result = requests.post(url=chat_url,
111
+ data=json.dumps(data),
112
+ headers=headers
113
+ )
114
+ res = result.json()['content']
115
+ bot.append([msg, res])
116
+ return bot[max(0, len(bot) - 3):]
117
+
118
+
119
+ def up_file(fls):
120
+ doc_text_list = []
121
+
122
+
123
+ names = []
124
+ print(names)
125
+ for i in fls:
126
+ names.append(str(i.name))
127
+
128
+
129
+ pdf = []
130
+ docs = []
131
+ pptx = []
132
+
133
+ for i in names:
134
+
135
+ if i[-3:] == "pdf":
136
+ pdf.append(i)
137
+ elif i[-4:] == "docx":
138
+ docs.append(i)
139
+ else:
140
+ pptx.append(i)
141
+
142
+
143
+ #Pdf Extracting
144
+ for idx, file in enumerate(pdf):
145
+ print("11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111")
146
+ #print(file.name)
147
+ with pdfplumber.open(file) as pdf:
148
+ for i in range(len(pdf.pages)):
149
+ # Read page i+1 of a PDF document
150
+ page = pdf.pages[i]
151
+ res_list = page.extract_text().split('\n')[:-1]
152
+
153
+ for j in range(len(page.images)):
154
+ # Get the binary stream of the image
155
+ img = page.images[j]
156
+ file_name = '{}-{}-{}.png'.format(str(time.time()), str(i), str(j))
157
+ with open(file_name, mode='wb') as f:
158
+ f.write(img['stream'].get_data())
159
+ try:
160
+ res = ocr.ocr(file_name)
161
+ # res = PyPDFLoader(file_name)
162
+ except Exception as e:
163
+ res = []
164
+ if len(res) > 0:
165
+ res_list.append(' '.join([re['text'] for re in res]))
166
+
167
+ tables = page.extract_tables()
168
+ for table in tables:
169
+ # The first column is used as the header
170
+ df = pd.DataFrame(table[1:], columns=table[0])
171
+ try:
172
+ records = json.loads(df.to_json(orient="records", force_ascii=False))
173
+ for rec in records:
174
+ res_list.append(json.dumps(rec, ensure_ascii=False))
175
+ except Exception as e:
176
+ res_list.append(str(df))
177
+
178
+ doc_text_list += res_list
179
+
180
+ #pptx Extracting
181
+ for i in pptx:
182
+ loader = UnstructuredPowerPointLoader(i)
183
+ data = loader.load()
184
+ # content = str(data).split("'")
185
+ # cnt = content[1]
186
+ # # c = cnt.split('\\n\\n')
187
+ # # final = "".join(c)
188
+ # c = cnt.replace('\\n\\n',"").replace("<PAGE BREAK>","").replace("\t","")
189
+ doc_text_list.append(data)
190
+
191
+
192
+
193
+ #Doc Extracting
194
+ for i in docs:
195
+ loader = UnstructuredWordDocumentLoader(i)
196
+ data = loader.load()
197
+ # content = str(data).split("'")
198
+ # cnt = content[1]
199
+ # # c = cnt.split('\\n\\n')
200
+ # # final = "".join(c)
201
+ # c = cnt.replace('\\n\\n',"").replace("<PAGE BREAK>","").replace("\t","")
202
+ doc_text_list.append(data)
203
+
204
+ # #Image Extraction
205
+ # for i in jpg:
206
+ # loader = UnstructuredImageLoader(i)
207
+ # data = loader.load()
208
+ # # content = str(data).split("'")
209
+ # # cnt = content[1]
210
+ # # # c = cnt.split('\\n\\n')
211
+ # # # final = "".join(c)
212
+ # # c = cnt.replace('\\n\\n',"").replace("<PAGE BREAK>","").replace("\t","")
213
+ # doc_text_list.append(data)
214
+
215
+ doc_text_list = [str(text).strip() for text in doc_text_list if len(str(text).strip()) > 0]
216
+ # print(doc_text_list)
217
+ return gr.Textbox.update(value='\n'.join(doc_text_list), visible=True), gr.Button.update(
218
+ visible=True), gr.Markdown.update(
219
+ value="Processing")
220
+
221
+
222
+
223
+
224
+
225
+ with gr.Blocks() as demo:
226
+ with gr.Row():
227
+ with gr.Column():
228
+ file = gr.File(file_types=['.pptx','.docx','.pdf'], label='Click to upload Document', file_count='multiple')
229
+ doc_bu = gr.Button(value='Submit', visible=False)
230
+
231
+
232
+ txt = gr.Textbox(label='result', visible=False)
233
+
234
+
235
+ doc_text_state = gr.State([])
236
+ doc_emb_state = gr.State([])
237
+
238
+ with gr.Column():
239
+ md = gr.Markdown("Please Upload the PDF")
240
+ chat_bot = gr.Chatbot(visible=False)
241
+ msg_txt = gr.Textbox(visible = False)
242
+ chat_bu = gr.Button(value='Clear', visible=False)
243
+
244
+ file.change(up_file, [file], [txt, doc_bu, md]) #hiding the text
245
+ doc_bu.click(doc_emb, [txt], [doc_text_state, doc_emb_state, msg_txt, chat_bu, md, chat_bot])
246
+ msg_txt.submit(get_response, [msg_txt, chat_bot,doc_text_state, doc_emb_state], [chat_bot],queue=False)
247
+ chat_bu.click(lambda: None, None, chat_bot, queue=False)
248
+
249
+ if __name__ == "__main__":
250
+ demo.queue().launch(show_api=False)
251
+ # demo.queue().launch(share=False, server_name='172.22.2.54', server_port=9191)
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ pdfplumber
2
+ sentence_transformers
3
+ cnocr
4
+ langchain
5
+ unstructured