Spaces:
Runtime error
Runtime error
File size: 11,716 Bytes
49182e8 d7831dc 49182e8 d7831dc 49182e8 5df44d2 49182e8 5df44d2 49182e8 5df44d2 d7831dc 49182e8 d7831dc 49182e8 d7831dc 49182e8 d7831dc 49182e8 d7831dc 49182e8 d7831dc 49182e8 d7831dc 49182e8 d7831dc 49182e8 d7831dc 49182e8 d7831dc 49182e8 d7831dc 49182e8 d7831dc 49182e8 d7831dc 49182e8 d7831dc 49182e8 d7831dc 49182e8 d7831dc 49182e8 d7831dc 49182e8 d7831dc 49182e8 d7831dc 49182e8 d7831dc 49182e8 d7831dc 49182e8 d7831dc 49182e8 d7831dc 49182e8 d7831dc 49182e8 d7831dc 49182e8 d7831dc 49182e8 d7831dc 49182e8 5df44d2 d7831dc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 |
# Import required modules
import gradio as gr
import urllib.request
import fitz
import re
import numpy as np
import tensorflow_hub as hub
from sklearn.neighbors import NearestNeighbors
from transformers import AutoTokenizer, AutoModelForCausalLM
import transformers
import torch
# Load the Falcon model
model = "tiiuae/falcon-40b-instruct"
tokenizer = AutoTokenizer.from_pretrained(model)
pipeline = transformers.pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
torch_dtype=torch.bfloat16,
trust_remote_code=True,
device_map="auto",
)
# Load the PDF-GPT model
recommender = SemanticSearch()
# Define chat function
def chat():
with gr.Interface(
question_answer,
[
gr.inputs.Textbox(placeholder="Chat History", type="text", label="Chat History", lines=20),
gr.inputs.Textbox(placeholder="Enter PDF URL here", type="text", label="URL"),
gr.inputs.File(label="Or upload your PDF here"),
gr.inputs.Textbox(placeholder="Enter your question here", type="text", label="Question"),
],
gr.outputs.Textbox(placeholder="Chat History", type="text", label="Chat History", lines=20),
title="Falcon-PDF Chatbot",
description="A chatbot that can read and answer questions about a PDF document using the Falcon model",
layout="vertical",
) as interface:
with gr.Row():
chatbot = gr.Chatbot(placeholder="Chat History", lines=20)
with gr.Row():
inputs = gr.Textbox(placeholder="Hello Falcon !!", label="Type an input and press Enter", max_lines=3)
url = gr.Textbox(placeholder="Enter PDF URL here", label="URL")
file = gr.File(label="Or upload your PDF here")
question = gr.Textbox(placeholder="Enter your question here", label="Question")
chat_button = gr.Button(label="Chat")
chat_button.on_click(question_answer, [chatbot, url, file, question])
with gr.Row():
retry_button = gr.Button("β»οΈ Retry last turn")
delete_turn_button = gr.Button("𧽠Delete last turn")
clear_chat_button = gr.Button("β¨ Delete all history")
retry_button.on_click(retry_last_turn, [chatbot])
delete_turn_button.on_click(delete_last_turn, [chatbot])
clear_chat_button.on_click(clear_chat_history, [chatbot])
# Launch the Gradio interface
interface.launch()
def retry_last_turn(chat_history):
"""Handles retrying the last turn."""
if len(chat_history) > 0:
# Get the last question from the chat history
last_question = chat_history[-1][0]
# Remove the last turn from the chat history
chat_history = chat_history[:-1]
# Retry the last question
question_answer(chat_history, last_question)
else:
print("Chat history is empty.")
return chat_history
def delete_last_turn(chat_history):
"""Handles deleting the last turn."""
if len(chat_history) > 0:
# Remove the last turn from the chat history
chat_history = chat_history[:-1]
else:
print("Chat history is empty.")
return chat_history
def clear_chat_history(chat_history):
"""Handles clearing the chat history."""
# Clear the chat history
chat_history = []
return chat_history
def download_pdf(url, output_path):
"""Download a PDF from a URL and save it to the specified output path."""
urllib.request.urlretrieve(url, output_path)
def preprocess(text):
"""Preprocess a text by replacing newline characters with spaces and reducing multiple spaces to single spaces."""
text = text.replace('\n', ' ')
text = re.sub('\s+', ' ', text)
return text
def pdf_to_text(path, start_page=1, end_page=None):
"""Extract text from a PDF file from the specified start page to the end page."""
doc = fitz.open(path)
total_pages = doc.page_count
if end_page is None:
end_page = total_pages
text_list = []
for i in range(start_page-1, end_page):
text = doc.load_page(i).get_text("text")
text = preprocess(text)
text_list.append(text)
doc.close()
return text_list
def text_to_chunks(texts, word_length=150, start_page=1):
"""Split a list of texts into chunks with the specified word length."""
text_toks = [t.split(' ') for t in texts]
chunks = []
for idx, words in enumerate(text_toks):
for i in range(0, len(words), word_length):
chunk = words[i:i+word_length]
if (i+word_length) > len(words) and (len(chunk) < word_length) and (len(text_toks) != (idx+1)):
text_toks[idx+1] = chunk + text_toks[idx+1]
continue
chunk = ' '.join(chunk).strip()
chunk = f'[Page no. {idx+start_page}]' + ' ' + '"' + chunk + '"'
chunks.append(chunk)
return chunks
class SemanticSearch:
"""A class for performing semantic search using the Universal Sentence Encoder."""
def __init__(self):
self.use = hub.load('https://tfhub.dev/google/universal-sentence-encoder/4')
self.fitted = False
def fit(self, data, batch=1000, n_neighbors=5):
"""Fit the model to the data."""
self.data = data
self.embeddings = self.get_text_embedding(data, batch=batch)
n_neighbors = min(n_neighbors, len(self.embeddings))
self.nn = NearestNeighbors(n_neighbors=n_neighbors)
self.nn.fit(self.embeddings)
self.fitted = True
def __call__(self, text, return_data=True):
"""Find the nearest neighbors to a text."""
inp_emb = self.use([text])
neighbors = self.nn.kneighbors(inp_emb, return_distance=False)[0]
if return_data:
return [self.data[i] for i in neighbors]
else:
return neighbors
def get_text_embedding(self, texts, batch=1000):
"""Get the embeddings of a list of texts."""
embeddings = []
for i in range(0, len(texts), batch):
text_batch = texts[i:(i+batch)]
emb_batch = self.use(text_batch)
embeddings.append(emb_batch)
embeddings = np.vstack(embeddings)
return embeddings
def load_recommender(path, start_page=1):
"""Load a recommender model with a PDF file."""
global recommender
texts = pdf_to_text(path, start_page=start_page)
chunks = text_to_chunks(texts, start_page=start_page)
recommender.fit(chunks)
return 'Corpus Loaded.'
def generate_answer(question):
topn_chunks = recommender(question)
prompt = ""
prompt += 'search results:\n\n'
for c in topn_chunks:
prompt += c + '\n\n'
prompt += f"Query: {question}\nAnswer:"
sequences = pipeline(
prompt,
max_length=200,
do_sample=True,
top_k=10,
num_return_sequences=1,
eos_token_id=tokenizer.eos_token_id,
)
return sequences[0]['generated_text']
def question_answer(chat_history, url, file, question):
try:
if url.strip() == '' and file is None:
return '[ERROR]: Both URL and PDF is empty. Provide at least one.'
if url.strip() != '' and file is not None:
return '[ERROR]: Both URL and PDF is provided. Please provide only one (either URL or PDF).'
if url.strip() != '':
glob_url = url
download_pdf(glob_url, 'corpus.pdf')
load_recommender('corpus.pdf')
else:
old_file_name = file.name
file_name = file.name
file_name = file_name[:-12] + file_name[-4:]
os.rename(old_file_name, file_name)
load_recommender(file_name)
if question.strip() == '':
return '[ERROR]: Question field is empty'
topn_chunks = recommender(question)
prompt = ""
prompt += 'search results:\n\n'
for c in topn_chunks:
prompt += c + '\n\n'
prompt += "Instructions: Compose a comprehensive reply to the query using the search results given. "\
"Cite each reference using [ Page Number] notation (every result has this number at the beginning). "\
"Citation should be done at the end of each sentence. If the search results mention multiple subjects "\
"with the same name, create separate answers for each. Only include information found in the results and "\
"don't add any additional information. Make sure the answer is correct and don't output false content. "\
"If the text does not relate to the query, simply state 'Found Nothing'. Ignore outlier "\
"search results which has nothing to do with the question. Only answer what is asked. The "\
"answer should be short and concise. \n\nQuery: {question}\nAnswer: "
prompt += f"Query: {question}\nAnswer:"
sequences = pipeline(
prompt,
max_length=200,
do_sample=True,
top_k=10,
num_return_sequences=1,
eos_token_id=tokenizer.eos_token_id,
)
answer = sequences[0]['generated_text']
chat_history.append([question, answer])
return chat_history
except Exception as e:
return f'[ERROR]: {str(e)}'
questions = [
"What did the study investigate?",
"Can you provide a summary of this document?",
"What are the methodologies used in this study?",
"What are the data intervals used in this study? Give me the start dates and end dates.",
"What are the main limitations of this study?",
"What are the main shortcomings of this study?",
"What are the main findings of the study?",
"What are the main results of the study?",
"What are the main contributions of this study?",
"What is the conclusion of this paper?",
"What are the input features used in this study?",
"What is the dependent variable in this study?",
]
title = 'PDF GPT Turbo'
description = """ PDF GPT Turbo allows you to chat with your PDF file using Universal Sentence Encoder and Falcon. It gives hallucination free response than other tools as the embeddings are better than OpenAI. The returned response can even cite the page number in square brackets([]) where the information is located, adding credibility to the responses and helping to locate pertinent information quickly."""
with gr.Blocks(css="""#chatbot { font-size: 14px; min-height: 800px; }""") as demo:
gr.Markdown(f'<center><h1>{title}</h1></center>')
gr.Markdown(description)
with gr.Row():
with gr.Group():
url = gr.Textbox(label='Enter PDF URL here (Example: https://arxiv.org/pdf/1706.03762.pdf )')
gr.Markdown("<center><h4>OR<h4></center>")
file = gr.File(label='Upload your PDF/ Research Paper / Book here', file_types=['.pdf'])
question = gr.Textbox(label='Enter your question here')
gr.Examples(
[[q] for q in questions],
inputs=[question],
label="PRE-DEFINED QUESTIONS: Click on a question to auto-fill the input box, then press Enter!",
)
btn = gr.Button(value='Submit')
btn.style(full_width=True)
with gr.Group():
chatbot = gr.Chatbot(placeholder="Chat History", label="Chat History", lines=20, elem_id="chatbot")
# Bind the click event of the button to the question_answer function
btn.click(
question_answer,
inputs=[chatbot, url, file, question],
outputs=[chatbot],
)
demo.launch()
|