stephenz007 Raghav001 commited on
Commit
fe2477c
β€’
0 Parent(s):

Duplicate from Raghav001/Experiment

Browse files

Co-authored-by: Raghavan <Raghav001@users.noreply.huggingface.co>

Files changed (4) hide show
  1. .gitattributes +34 -0
  2. README.md +14 -0
  3. app.py +247 -0
  4. requirements.txt +5 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: ChatPDF
3
+ emoji: πŸ’»
4
+ colorFrom: gray
5
+ colorTo: yellow
6
+ sdk: gradio
7
+ sdk_version: 3.20.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: apache-2.0
11
+ duplicated_from: Raghav001/Experiment
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import json
3
+ import gradio as gr
4
+ # from concurrent.futures import ThreadPoolExecutor
5
+ import pdfplumber
6
+ import pandas as pd
7
+ import langchain
8
+ import time
9
+ from cnocr import CnOcr
10
+
11
+ # from langchain.document_loaders import PyPDFLoader
12
+ from langchain.document_loaders import UnstructuredWordDocumentLoader
13
+ from langchain.document_loaders import UnstructuredPowerPointLoader
14
+ # from langchain.document_loaders.image import UnstructuredImageLoader
15
+
16
+
17
+
18
+
19
+ from sentence_transformers import SentenceTransformer, models, util
20
+ word_embedding_model = models.Transformer('sentence-transformers/all-MiniLM-L6-v2', do_lower_case=True)
21
+ pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), pooling_mode='cls')
22
+ embedder = SentenceTransformer(modules=[word_embedding_model, pooling_model])
23
+ ocr = CnOcr()
24
+ # chat_url = 'https://souljoy-my-api.hf.space/sale'
25
+ chat_url = 'https://souljoy-my-api.hf.space/chatpdf'
26
+ headers = {
27
+ 'Content-Type': 'application/json',
28
+ }
29
+ # thread_pool_executor = ThreadPoolExecutor(max_workers=4)
30
+ history_max_len = 500
31
+ all_max_len = 3000
32
+
33
+
34
+ def get_emb(text):
35
+ emb_url = 'https://souljoy-my-api.hf.space/embeddings'
36
+ data = {"content": text}
37
+ try:
38
+ result = requests.post(url=emb_url,
39
+ data=json.dumps(data),
40
+ headers=headers
41
+ )
42
+ return result.json()['data'][0]['embedding']
43
+ except Exception as e:
44
+ print('data', data, 'result json', result.json())
45
+
46
+
47
+ def doc_emb(doc: str):
48
+ texts = doc.split('\n')
49
+ # futures = []
50
+ emb_list = embedder.encode(texts)
51
+ # for text in texts:
52
+ # futures.append(thread_pool_executor.submit(get_emb, text))
53
+ # for f in futures:
54
+ # emb_list.append(f.result())
55
+ print('\n'.join(texts))
56
+ gr.Textbox.update(value="")
57
+ return texts, emb_list, gr.Textbox.update(visible=True), gr.Button.update(visible=True), gr.Markdown.update(
58
+ value="""success ! Let's talk"""), gr.Chatbot.update(visible=True)
59
+
60
+
61
+ def get_response(msg, bot, doc_text_list, doc_embeddings):
62
+ # future = thread_pool_executor.submit(get_emb, msg)
63
+ gr.Textbox.update(value="")
64
+ now_len = len(msg)
65
+ req_json = {'question': msg}
66
+ his_bg = -1
67
+ for i in range(len(bot) - 1, -1, -1):
68
+ if now_len + len(bot[i][0]) + len(bot[i][1]) > history_max_len:
69
+ break
70
+ now_len += len(bot[i][0]) + len(bot[i][1])
71
+ his_bg = i
72
+ req_json['history'] = [] if his_bg == -1 else bot[his_bg:]
73
+ # query_embedding = future.result()
74
+ query_embedding = embedder.encode([msg])
75
+ cos_scores = util.cos_sim(query_embedding, doc_embeddings)[0]
76
+ score_index = [[score, index] for score, index in zip(cos_scores, [i for i in range(len(cos_scores))])]
77
+ score_index.sort(key=lambda x: x[0], reverse=True)
78
+ print('score_index:\n', score_index)
79
+ index_set, sub_doc_list = set(), []
80
+ for s_i in score_index:
81
+ doc = doc_text_list[s_i[1]]
82
+ if now_len + len(doc) > all_max_len:
83
+ break
84
+ index_set.add(s_i[1])
85
+ now_len += len(doc)
86
+ # Maybe the paragraph is truncated wrong, so add the upper and lower paragraphs
87
+ if s_i[1] > 0 and s_i[1] -1 not in index_set:
88
+ doc = doc_text_list[s_i[1]-1]
89
+ if now_len + len(doc) > all_max_len:
90
+ break
91
+ index_set.add(s_i[1]-1)
92
+ now_len += len(doc)
93
+ if s_i[1] + 1 < len(doc_text_list) and s_i[1] + 1 not in index_set:
94
+ doc = doc_text_list[s_i[1]+1]
95
+ if now_len + len(doc) > all_max_len:
96
+ break
97
+ index_set.add(s_i[1]+1)
98
+ now_len += len(doc)
99
+
100
+ index_list = list(index_set)
101
+ index_list.sort()
102
+ for i in index_list:
103
+ sub_doc_list.append(doc_text_list[i])
104
+ req_json['doc'] = '' if len(sub_doc_list) == 0 else '\n'.join(sub_doc_list)
105
+ data = {"content": json.dumps(req_json)}
106
+ print('data:\n', req_json)
107
+ result = requests.post(url=chat_url,
108
+ data=json.dumps(data),
109
+ headers=headers
110
+ )
111
+ res = result.json()['content']
112
+ bot.append([msg, res])
113
+ return bot[max(0, len(bot) - 3):]
114
+
115
+
116
+ def up_file(fls):
117
+ doc_text_list = []
118
+
119
+
120
+ names = []
121
+ print(names)
122
+ for i in fls:
123
+ names.append(str(i.name))
124
+
125
+
126
+ pdf = []
127
+ docs = []
128
+ pptx = []
129
+
130
+ for i in names:
131
+
132
+ if i[-3:] == "pdf":
133
+ pdf.append(i)
134
+ elif i[-4:] == "docx":
135
+ docs.append(i)
136
+ else:
137
+ pptx.append(i)
138
+
139
+
140
+ #Pdf Extracting
141
+ for idx, file in enumerate(pdf):
142
+ print("11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111")
143
+ #print(file.name)
144
+ with pdfplumber.open(file) as pdf:
145
+ for i in range(len(pdf.pages)):
146
+ # Read page i+1 of a PDF document
147
+ page = pdf.pages[i]
148
+ res_list = page.extract_text().split('\n')[:-1]
149
+
150
+ for j in range(len(page.images)):
151
+ # Get the binary stream of the image
152
+ img = page.images[j]
153
+ file_name = '{}-{}-{}.png'.format(str(time.time()), str(i), str(j))
154
+ with open(file_name, mode='wb') as f:
155
+ f.write(img['stream'].get_data())
156
+ try:
157
+ res = ocr.ocr(file_name)
158
+ # res = PyPDFLoader(file_name)
159
+ except Exception as e:
160
+ res = []
161
+ if len(res) > 0:
162
+ res_list.append(' '.join([re['text'] for re in res]))
163
+
164
+ tables = page.extract_tables()
165
+ for table in tables:
166
+ # The first column is used as the header
167
+ df = pd.DataFrame(table[1:], columns=table[0])
168
+ try:
169
+ records = json.loads(df.to_json(orient="records", force_ascii=False))
170
+ for rec in records:
171
+ res_list.append(json.dumps(rec, ensure_ascii=False))
172
+ except Exception as e:
173
+ res_list.append(str(df))
174
+
175
+ doc_text_list += res_list
176
+
177
+ #pptx Extracting
178
+ for i in pptx:
179
+ loader = UnstructuredPowerPointLoader(i)
180
+ # data = loader.load()
181
+ # content = str(data).split("'")
182
+ # cnt = content[1]
183
+ # # c = cnt.split('\\n\\n')
184
+ # # final = "".join(c)
185
+ # c = cnt.replace('\\n\\n',"").replace("<PAGE BREAK>","").replace("\t","")
186
+ doc_text_list.append(data)
187
+
188
+
189
+
190
+ #Doc Extracting
191
+ for i in docs:
192
+ loader = UnstructuredWordDocumentLoader(i)
193
+ # data = loader.load()
194
+ # content = str(data).split("'")
195
+ # cnt = content[1]
196
+ # # c = cnt.split('\\n\\n')
197
+ # # final = "".join(c)
198
+ # c = cnt.replace('\\n\\n',"").replace("<PAGE BREAK>","").replace("\t","")
199
+ doc_text_list.append(data)
200
+
201
+ # #Image Extraction
202
+ # for i in jpg:
203
+ # loader = UnstructuredImageLoader(i)
204
+ # # data = loader.load()
205
+ # # content = str(data).split("'")
206
+ # # cnt = content[1]
207
+ # # # c = cnt.split('\\n\\n')
208
+ # # # final = "".join(c)
209
+ # # c = cnt.replace('\\n\\n',"").replace("<PAGE BREAK>","").replace("\t","")
210
+ # doc_text_list.append(data)
211
+
212
+ doc_text_list = [str(text).strip() for text in doc_text_list if len(str(text).strip()) > 0]
213
+ # print(doc_text_list)
214
+ return gr.Textbox.update(value='\n'.join(doc_text_list), visible=True), gr.Button.update(
215
+ visible=True), gr.Markdown.update(
216
+ value="Processing")
217
+
218
+
219
+
220
+
221
+
222
+ with gr.Blocks(css=".gradio-container {background-color: #f7f377}, footer {visibility: hidden}") as demo:
223
+ with gr.Row():
224
+ with gr.Column():
225
+ file = gr.File(file_types=['.pptx','.docx','.pdf'], label='Click to upload Document', file_count='multiple')
226
+ doc_bu = gr.Button(value='Submit', visible=False)
227
+
228
+
229
+ txt = gr.Textbox(label='result', visible=False)
230
+
231
+
232
+ doc_text_state = gr.State([])
233
+ doc_emb_state = gr.State([])
234
+ with gr.Column():
235
+ md = gr.Markdown("Please Upload the PDF")
236
+ chat_bot = gr.Chatbot(visible=False)
237
+ msg_txt = gr.Textbox(visible = False)
238
+ chat_bu = gr.Button(value='Clear', visible=False)
239
+
240
+ file.change(up_file, [file], [txt, doc_bu, md]) #hiding the text
241
+ doc_bu.click(doc_emb, [txt], [doc_text_state, doc_emb_state, msg_txt, chat_bu, md, chat_bot])
242
+ msg_txt.submit(get_response, [msg_txt, chat_bot,doc_text_state, doc_emb_state], [chat_bot],queue=False)
243
+ chat_bu.click(lambda: None, None, chat_bot, queue=False)
244
+
245
+ if __name__ == "__main__":
246
+ demo.queue().launch(show_api=False)
247
+ # demo.queue().launch(share=False, server_name='172.22.2.54', server_port=9191)
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ pdfplumber
2
+ sentence_transformers
3
+ cnocr
4
+ langchain
5
+ unstructured