alphayomega commited on
Commit
f740148
0 Parent(s):

Duplicate from alphayomega/PDF15

Browse files
Files changed (3) hide show
  1. .gitattributes +34 -0
  2. README.md +13 -0
  3. app.py +198 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: PDF15
3
+ emoji: 🐢
4
+ colorFrom: gray
5
+ colorTo: blue
6
+ sdk: gradio
7
+ sdk_version: 3.23.0
8
+ app_file: app.py
9
+ pinned: false
10
+ duplicated_from: alphayomega/PDF15
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import subprocess
2
+
3
+ subprocess.call(['pip', 'install', 'PyMuPDF', 'gradio', 'numpy==1.23.3', 'scikit-learn', 'tensorflow', 'tensorflow-hub', 'openai==0.10.2', '--user'])
4
+
5
+ import urllib.request
6
+ import fitz
7
+ import re
8
+ import numpy as np
9
+ import tensorflow_hub as hub
10
+ import openai
11
+ import gradio as gr
12
+ import os
13
+ from sklearn.neighbors import NearestNeighbors
14
+
15
+ def download_pdf(url, output_path):
16
+ urllib.request.urlretrieve(url, output_path)
17
+
18
+
19
+ def preprocess(text):
20
+ text = text.replace('\n', ' ')
21
+ text = re.sub('\s+', ' ', text)
22
+ return text
23
+
24
+
25
+ def pdf_to_text(path, start_page=1, end_page=None):
26
+ doc = fitz.open(path)
27
+ total_pages = doc.page_count
28
+
29
+ if end_page is None:
30
+ end_page = total_pages
31
+
32
+ text_list = []
33
+
34
+ for i in range(start_page-1, end_page):
35
+ text = doc.load_page(i).get_text("text")
36
+ text = preprocess(text)
37
+ text_list.append(text)
38
+
39
+ doc.close()
40
+ return text_list
41
+
42
+
43
+ def text_to_chunks(texts, word_length=150, start_page=1):
44
+ text_toks = [t.split(' ') for t in texts]
45
+ page_nums = []
46
+ chunks = []
47
+
48
+ for idx, words in enumerate(text_toks):
49
+ for i in range(0, len(words), word_length):
50
+ chunk = words[i:i+word_length]
51
+ if (i+word_length) > len(words) and (len(chunk) < word_length) and (
52
+ len(text_toks) != (idx+1)):
53
+ text_toks[idx+1] = chunk + text_toks[idx+1]
54
+ continue
55
+ chunk = ' '.join(chunk).strip()
56
+ chunk = f'[{idx+start_page}]' + ' ' + '"' + chunk + '"'
57
+ chunks.append(chunk)
58
+ return chunks
59
+
60
+
61
+ class SemanticSearch:
62
+
63
+ def __init__(self):
64
+ self.use = hub.load('https://tfhub.dev/google/universal-sentence-encoder/4')
65
+ self.fitted = False
66
+
67
+
68
+ def fit(self, data, batch=1000, n_neighbors=5):
69
+ self.data = data
70
+ self.embeddings = self.get_text_embedding(data, batch=batch)
71
+ n_neighbors = min(n_neighbors, len(self.embeddings))
72
+ self.nn = NearestNeighbors(n_neighbors=n_neighbors)
73
+ self.nn.fit(self.embeddings)
74
+ self.fitted = True
75
+
76
+
77
+ def __call__(self, text, return_data=True):
78
+ inp_emb = self.use([text])
79
+ neighbors = self.nn.kneighbors(inp_emb, return_distance=False)[0]
80
+
81
+ if return_data:
82
+ return [self.data[i] for i in neighbors]
83
+ else:
84
+ return neighbors
85
+
86
+
87
+ def get_text_embedding(self, texts, batch=1000):
88
+ embeddings = []
89
+ for i in range(0, len(texts), batch):
90
+ text_batch = texts[i:(i+batch)]
91
+ emb_batch = self.use(text_batch)
92
+ embeddings.append(emb_batch)
93
+ embeddings = np.vstack(embeddings)
94
+ return embeddings
95
+
96
+
97
+
98
+ def load_recommender(path, start_page=1):
99
+ global recommender
100
+ texts = pdf_to_text(path, start_page=start_page)
101
+ chunks = text_to_chunks(texts, start_page=start_page)
102
+ recommender.fit(chunks)
103
+ return 'Corpus Loaded.'
104
+
105
+
106
+ def generate_text(openAI_key,prompt, engine="text-davinci-003"):
107
+ openai.api_key = openAI_key
108
+ completions = openai.Completion.create(
109
+ engine=engine,
110
+ prompt=prompt,
111
+ max_tokens=1024,
112
+ n=1,
113
+ stop=None,
114
+ temperature=0.7,
115
+ )
116
+ message = completions.choices[0].text
117
+ return message
118
+
119
+
120
+ def generate_answer(question,openAI_key):
121
+ topn_chunks = recommender(question)
122
+ prompt = ""
123
+ prompt += 'search results:\n\n'
124
+ for c in topn_chunks:
125
+ prompt += c + '\n\n'
126
+
127
+ prompt += "Instructions: Compose a comprehensive reply to the query using the search results given. "\
128
+ "Cite each reference using [number] notation (every result has this number at the beginning). "\
129
+ "Citation should be done at the end of each sentence. If the search results mention multiple subjects "\
130
+ "with the same name, create separate answers for each. Only include information found in the results and "\
131
+ "don't add any additional information. Make sure the answer is correct and don't output false content. "\
132
+ "If the text does not relate to the query, simply state 'Found Nothing'. Ignore outlier "\
133
+ "search results which has nothing to do with the question. Only answer what is asked. The "\
134
+ "answer should be short and concise.\n\nQuery: {question}\nAnswer: "
135
+
136
+ prompt += f"Query: {question}\nAnswer:"
137
+ answer = generate_text(openAI_key, prompt,"text-davinci-003")
138
+ return answer
139
+
140
+
141
+ def question_answer(url, file, question,openAI_key):
142
+ if openAI_key.strip()=='':
143
+ return '[ERROR]: Please enter you Open AI Key. Get your key here : https://platform.openai.com/account/api-keys'
144
+ if url.strip() == '' and file == None:
145
+ return '[ERROR]: Both URL and PDF is empty. Provide atleast one.'
146
+
147
+ if url.strip() != '' and file != None:
148
+ return '[ERROR]: Both URL and PDF is provided. Please provide only one (eiter URL or PDF).'
149
+
150
+ if url.strip() != '':
151
+ glob_url = url
152
+ download_pdf(glob_url, 'corpus.pdf')
153
+ load_recommender('corpus.pdf')
154
+
155
+ else:
156
+ old_file_name = file.name
157
+ file_name = file.name
158
+ file_name = file_name[:-12] + file_name[-4:]
159
+ os.rename(old_file_name, file_name)
160
+ load_recommender(file_name)
161
+
162
+ if question.strip() == '':
163
+ return '[ERROR]: Question field is empty'
164
+
165
+ return generate_answer(question,openAI_key)
166
+
167
+
168
+ recommender = SemanticSearch()
169
+
170
+ title = 'Deep Learning'
171
+ description = """ <p style="text-align:center">Sistema automatizado de productividad organizacional para incrementar la eficiencia, la efectividad y la eficacia en los procesos internos de la organizaci&oacute;n.</p>
172
+
173
+ <p style="text-align:center"><strong>By Manget Impact LLC - Miguel Angel Gil.</strong></p>
174
+ """
175
+
176
+ with gr.Blocks() as demo:
177
+
178
+ gr.Markdown(f'<center><h1>{title}</h1></center>')
179
+ gr.Markdown(description)
180
+
181
+ with gr.Row():
182
+
183
+ with gr.Group():
184
+ gr.Markdown(f'<p style="text-align:center">Consiga su clave de OpenAI aquí <a href="https://platform.openai.com/account/api-keys">here</a></p>')
185
+ openAI_key=gr.Textbox(label='INSERTE SU OPENAI:_APIKEY AQUI')
186
+ url = gr.Textbox(label='URL del PDF a Procesar')
187
+ gr.Markdown("<center><h4>OR<h4></center>")
188
+ file = gr.File(label='Subir PDF a Procesar', file_types=['.pdf'])
189
+ question = gr.Textbox(label='Escriba su consulta aqui:')
190
+ btn = gr.Button(value='Consultar')
191
+ btn.style(full_width=True)
192
+
193
+ with gr.Group():
194
+ answer = gr.Textbox(label='Respuesta de la consulta:')
195
+
196
+ btn.click(question_answer, inputs=[url, file, question,openAI_key], outputs=[answer])
197
+ #openai.api_key = os.getenv('Your_Key_Here')
198
+ demo.launch()