Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,16 +1,170 @@
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
-
from huggingface_hub import InferenceApi
|
3 |
-
from duckduckgo_search import DDGS
|
4 |
import requests
|
5 |
-
import
|
6 |
from typing import List
|
7 |
from pydantic import BaseModel, Field
|
8 |
-
import
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
# Environment variables and configurations
|
11 |
huggingface_token = os.environ.get("HUGGINGFACE_TOKEN")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
-
# Function to perform a DuckDuckGo search
|
14 |
def duckduckgo_search(query):
|
15 |
with DDGS() as ddgs:
|
16 |
results = ddgs.text(query, max_results=5)
|
@@ -22,82 +176,140 @@ class CitingSources(BaseModel):
|
|
22 |
description="List of sources to cite. Should be an URL of the source."
|
23 |
)
|
24 |
|
25 |
-
def
|
26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
search_results = duckduckgo_search(query)
|
28 |
-
|
29 |
-
# Use the search results as context for the model
|
30 |
context = "\n".join(f"{result['title']}\n{result['body']}\nSource: {result['href']}\n"
|
31 |
for result in search_results if 'body' in result)
|
32 |
|
33 |
-
# Prompt formatted for Mistral-7B-Instruct
|
34 |
prompt = f"""<s>[INST] Using the following context:
|
35 |
{context}
|
36 |
Write a detailed and complete research document that fulfills the following user request: '{query}'
|
37 |
After writing the document, please provide a list of sources used in your response. [/INST]"""
|
38 |
|
39 |
-
|
40 |
-
API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.3"
|
41 |
-
|
42 |
-
# Headers
|
43 |
-
headers = {"Authorization": f"Bearer {huggingface_token}"}
|
44 |
|
45 |
-
#
|
46 |
-
|
47 |
-
|
48 |
-
"parameters": {
|
49 |
-
"max_new_tokens": 1000,
|
50 |
-
"temperature": 0.7,
|
51 |
-
"top_p": 0.95,
|
52 |
-
"top_k": 40,
|
53 |
-
"repetition_penalty": 1.1
|
54 |
-
}
|
55 |
-
}
|
56 |
|
57 |
-
#
|
58 |
-
|
|
|
|
|
59 |
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
# Split the response into main content and sources
|
71 |
-
parts = generated_text.split("Sources:", 1)
|
72 |
-
main_content = parts[0].strip()
|
73 |
-
sources = parts[1].strip() if len(parts) > 1 else ""
|
74 |
-
|
75 |
-
return main_content, sources
|
76 |
-
else:
|
77 |
-
return f"Unexpected response format: {result}", ""
|
78 |
else:
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
|
102 |
if __name__ == "__main__":
|
103 |
-
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
import re
|
4 |
import gradio as gr
|
|
|
|
|
5 |
import requests
|
6 |
+
from duckduckgo_search import DDGS
|
7 |
from typing import List
|
8 |
from pydantic import BaseModel, Field
|
9 |
+
from tempfile import NamedTemporaryFile
|
10 |
+
from langchain_community.vectorstores import FAISS
|
11 |
+
from langchain_community.document_loaders import PyPDFLoader
|
12 |
+
from langchain_community.embeddings import HuggingFaceEmbeddings
|
13 |
+
from llama_parse import LlamaParse
|
14 |
+
from langchain_core.documents import Document
|
15 |
+
from huggingface_hub import InferenceClient
|
16 |
+
import inspect
|
17 |
|
18 |
# Environment variables and configurations
|
19 |
huggingface_token = os.environ.get("HUGGINGFACE_TOKEN")
|
20 |
+
llama_cloud_api_key = os.environ.get("LLAMA_CLOUD_API_KEY")
|
21 |
+
CLOUDFLARE_ACCOUNT_ID = os.environ.get("CLOUDFLARE_ACCOUNT_ID")
|
22 |
+
CLOUDFLARE_AUTH_TOKEN = os.environ.get("CLOUDFLARE_AUTH_TOKEN")
|
23 |
+
|
24 |
+
MODELS = [
|
25 |
+
"Qwen/Qwen2-72B-Instruct",
|
26 |
+
"google/gemma-2-9b",
|
27 |
+
"microsoft/Phi-3-mini-4k-instruct",
|
28 |
+
"Qwen/Qwen2-7B-Instruct",
|
29 |
+
"mistralai/Mistral-Nemo-Instruct-2407",
|
30 |
+
"mistralai/Mistral-7B-Instruct-v0.3",
|
31 |
+
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
32 |
+
"cloudflare/llama-3.1-8b-instruct" # Added Cloudflare Llama 3.1 model
|
33 |
+
]
|
34 |
+
|
35 |
+
# Initialize LlamaParse
|
36 |
+
llama_parser = LlamaParse(
|
37 |
+
api_key=llama_cloud_api_key,
|
38 |
+
result_type="markdown",
|
39 |
+
num_workers=4,
|
40 |
+
verbose=True,
|
41 |
+
language="en",
|
42 |
+
)
|
43 |
+
|
44 |
+
def load_document(file: NamedTemporaryFile, parser: str = "pypdf") -> List[Document]:
|
45 |
+
"""Loads and splits the document into pages."""
|
46 |
+
if parser == "pypdf":
|
47 |
+
loader = PyPDFLoader(file.name)
|
48 |
+
return loader.load_and_split()
|
49 |
+
elif parser == "llamaparse":
|
50 |
+
try:
|
51 |
+
documents = llama_parser.load_data(file.name)
|
52 |
+
return [Document(page_content=doc.text, metadata={"source": file.name}) for doc in documents]
|
53 |
+
except Exception as e:
|
54 |
+
print(f"Error using Llama Parse: {str(e)}")
|
55 |
+
print("Falling back to PyPDF parser")
|
56 |
+
loader = PyPDFLoader(file.name)
|
57 |
+
return loader.load_and_split()
|
58 |
+
else:
|
59 |
+
raise ValueError("Invalid parser specified. Use 'pypdf' or 'llamaparse'.")
|
60 |
+
|
61 |
+
def get_embeddings():
|
62 |
+
return HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")
|
63 |
+
|
64 |
+
def update_vectors(files, parser):
|
65 |
+
if not files:
|
66 |
+
return "Please upload at least one PDF file."
|
67 |
+
|
68 |
+
embed = get_embeddings()
|
69 |
+
total_chunks = 0
|
70 |
+
|
71 |
+
all_data = []
|
72 |
+
for file in files:
|
73 |
+
data = load_document(file, parser)
|
74 |
+
all_data.extend(data)
|
75 |
+
total_chunks += len(data)
|
76 |
+
|
77 |
+
if os.path.exists("faiss_database"):
|
78 |
+
database = FAISS.load_local("faiss_database", embed, allow_dangerous_deserialization=True)
|
79 |
+
database.add_documents(all_data)
|
80 |
+
else:
|
81 |
+
database = FAISS.from_documents(all_data, embed)
|
82 |
+
|
83 |
+
database.save_local("faiss_database")
|
84 |
+
|
85 |
+
return f"Vector store updated successfully. Processed {total_chunks} chunks from {len(files)} files using {parser}."
|
86 |
+
|
87 |
+
def generate_chunked_response(prompt, model, max_tokens=1000, max_chunks=5, temperature=0.7):
|
88 |
+
if model == "cloudflare/llama-3.1-8b-instruct":
|
89 |
+
return generate_cloudflare_response(prompt, max_tokens, temperature)
|
90 |
+
|
91 |
+
client = InferenceClient(
|
92 |
+
model,
|
93 |
+
token=huggingface_token,
|
94 |
+
)
|
95 |
+
|
96 |
+
full_response = ""
|
97 |
+
messages = [{"role": "user", "content": prompt}]
|
98 |
+
|
99 |
+
try:
|
100 |
+
for message in client.chat_completion(
|
101 |
+
messages=messages,
|
102 |
+
max_tokens=max_tokens,
|
103 |
+
temperature=temperature,
|
104 |
+
stream=True,
|
105 |
+
):
|
106 |
+
chunk = message.choices[0].delta.content
|
107 |
+
if chunk:
|
108 |
+
full_response += chunk
|
109 |
+
|
110 |
+
except Exception as e:
|
111 |
+
print(f"Error in generating response: {str(e)}")
|
112 |
+
|
113 |
+
# Clean up the response
|
114 |
+
clean_response = re.sub(r'<s>\[INST\].*?\[/INST\]\s*', '', full_response, flags=re.DOTALL)
|
115 |
+
clean_response = clean_response.replace("Using the following context:", "").strip()
|
116 |
+
clean_response = clean_response.replace("Using the following context from the PDF documents:", "").strip()
|
117 |
+
|
118 |
+
return clean_response
|
119 |
+
|
120 |
+
def generate_cloudflare_response(prompt, max_tokens, temperature):
|
121 |
+
try:
|
122 |
+
response = requests.post(
|
123 |
+
f"https://api.cloudflare.com/client/v4/accounts/{CLOUDFLARE_ACCOUNT_ID}/ai/run/@cf/meta/llama-3.1-8b-instruct",
|
124 |
+
headers={"Authorization": f"Bearer {CLOUDFLARE_AUTH_TOKEN}"},
|
125 |
+
json={
|
126 |
+
"messages": [
|
127 |
+
{"role": "system", "content": "You are a friendly assistant"},
|
128 |
+
{"role": "user", "content": prompt}
|
129 |
+
],
|
130 |
+
"max_tokens": max_tokens,
|
131 |
+
"temperature": temperature
|
132 |
+
}
|
133 |
+
)
|
134 |
+
|
135 |
+
# Check if the request was successful
|
136 |
+
response.raise_for_status()
|
137 |
+
|
138 |
+
result = response.json()
|
139 |
+
if not result:
|
140 |
+
raise ValueError("Empty response from Cloudflare API")
|
141 |
+
|
142 |
+
if 'result' not in result:
|
143 |
+
raise ValueError(f"Unexpected response format. 'result' key missing. Response: {result}")
|
144 |
+
|
145 |
+
if 'response' not in result['result']:
|
146 |
+
raise ValueError(f"Unexpected response format. 'response' key missing. Result: {result['result']}")
|
147 |
+
|
148 |
+
return result['result']['response']
|
149 |
+
|
150 |
+
except requests.exceptions.RequestException as e:
|
151 |
+
error_message = f"Network error when calling Cloudflare API: {str(e)}"
|
152 |
+
print(error_message)
|
153 |
+
return f"Error: {error_message}"
|
154 |
+
except json.JSONDecodeError as e:
|
155 |
+
error_message = f"Error decoding JSON response from Cloudflare API: {str(e)}"
|
156 |
+
print(error_message)
|
157 |
+
return f"Error: {error_message}"
|
158 |
+
except ValueError as e:
|
159 |
+
error_message = str(e)
|
160 |
+
print(error_message)
|
161 |
+
return f"Error: {error_message}"
|
162 |
+
except Exception as e:
|
163 |
+
error_message = f"Unexpected error in generate_cloudflare_response: {str(e)}"
|
164 |
+
print(error_message)
|
165 |
+
return f"Error: {error_message}"
|
166 |
+
|
167 |
|
|
|
168 |
def duckduckgo_search(query):
|
169 |
with DDGS() as ddgs:
|
170 |
results = ddgs.text(query, max_results=5)
|
|
|
176 |
description="List of sources to cite. Should be an URL of the source."
|
177 |
)
|
178 |
|
179 |
+
def get_response_from_pdf(query, model, temperature=0.7):
|
180 |
+
embed = get_embeddings()
|
181 |
+
if os.path.exists("faiss_database"):
|
182 |
+
database = FAISS.load_local("faiss_database", embed, allow_dangerous_deserialization=True)
|
183 |
+
else:
|
184 |
+
return "No documents available. Please upload PDF documents to answer questions."
|
185 |
+
|
186 |
+
retriever = database.as_retriever()
|
187 |
+
relevant_docs = retriever.get_relevant_documents(query)
|
188 |
+
context_str = "\n".join([doc.page_content for doc in relevant_docs])
|
189 |
+
|
190 |
+
prompt = f"""<s>[INST] Using the following context from the PDF documents:
|
191 |
+
{context_str}
|
192 |
+
Write a detailed and complete response that answers the following user question: '{query}'
|
193 |
+
Do not include a list of sources in your response. [/INST]"""
|
194 |
+
|
195 |
+
generated_text = generate_chunked_response(prompt, model, temperature=temperature)
|
196 |
+
|
197 |
+
# Clean the response
|
198 |
+
clean_text = re.sub(r'<s>\[INST\].*?\[/INST\]\s*', '', generated_text, flags=re.DOTALL)
|
199 |
+
clean_text = clean_text.replace("Using the following context from the PDF documents:", "").strip()
|
200 |
+
|
201 |
+
return clean_text
|
202 |
+
|
203 |
+
def get_response_with_search(query, model, temperature=0.7):
|
204 |
search_results = duckduckgo_search(query)
|
|
|
|
|
205 |
context = "\n".join(f"{result['title']}\n{result['body']}\nSource: {result['href']}\n"
|
206 |
for result in search_results if 'body' in result)
|
207 |
|
|
|
208 |
prompt = f"""<s>[INST] Using the following context:
|
209 |
{context}
|
210 |
Write a detailed and complete research document that fulfills the following user request: '{query}'
|
211 |
After writing the document, please provide a list of sources used in your response. [/INST]"""
|
212 |
|
213 |
+
generated_text = generate_chunked_response(prompt, model, temperature=temperature)
|
|
|
|
|
|
|
|
|
214 |
|
215 |
+
# Clean the response
|
216 |
+
clean_text = re.sub(r'<s>\[INST\].*?\[/INST\]\s*', '', generated_text, flags=re.DOTALL)
|
217 |
+
clean_text = clean_text.replace("Using the following context:", "").strip()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
218 |
|
219 |
+
# Split the content and sources
|
220 |
+
parts = clean_text.split("Sources:", 1)
|
221 |
+
main_content = parts[0].strip()
|
222 |
+
sources = parts[1].strip() if len(parts) > 1 else ""
|
223 |
|
224 |
+
return main_content, sources
|
225 |
+
|
226 |
+
def chatbot_interface(message, history, use_web_search, model, temperature):
|
227 |
+
if not message.strip(): # Check if the message is empty or just whitespace
|
228 |
+
return history
|
229 |
+
|
230 |
+
if use_web_search:
|
231 |
+
main_content, sources = get_response_with_search(message, model, temperature)
|
232 |
+
formatted_response = f"{main_content}\n\nSources:\n{sources}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
233 |
else:
|
234 |
+
response = get_response_from_pdf(message, model, temperature)
|
235 |
+
formatted_response = response
|
236 |
+
|
237 |
+
# Check if the last message in history is the same as the current message
|
238 |
+
if history and history[-1][0] == message:
|
239 |
+
# Replace the last response instead of adding a new one
|
240 |
+
history[-1] = (message, formatted_response)
|
241 |
+
else:
|
242 |
+
# Add the new message-response pair
|
243 |
+
history.append((message, formatted_response))
|
244 |
+
|
245 |
+
return history
|
246 |
+
|
247 |
+
|
248 |
+
def clear_and_update_chat(message, history, use_web_search, model, temperature):
|
249 |
+
updated_history = chatbot_interface(message, history, use_web_search, model, temperature)
|
250 |
+
return "", updated_history # Return empty string to clear the input
|
251 |
+
|
252 |
+
# Gradio interface
|
253 |
+
with gr.Blocks() as demo:
|
254 |
+
|
255 |
+
is_generating = gr.State(False)
|
256 |
+
|
257 |
+
def protected_clear_and_update_chat(message, history, use_web_search, model, temperature, is_generating):
|
258 |
+
if is_generating:
|
259 |
+
return message, history, is_generating
|
260 |
+
is_generating = True
|
261 |
+
updated_message, updated_history = clear_and_update_chat(message, history, use_web_search, model, temperature)
|
262 |
+
is_generating = False
|
263 |
+
return updated_message, updated_history, is_generating
|
264 |
+
|
265 |
+
gr.Markdown("# AI-powered Web Search and PDF Chat Assistant")
|
266 |
+
|
267 |
+
with gr.Row():
|
268 |
+
file_input = gr.Files(label="Upload your PDF documents", file_types=[".pdf"])
|
269 |
+
parser_dropdown = gr.Dropdown(choices=["pypdf", "llamaparse"], label="Select PDF Parser", value="pypdf")
|
270 |
+
update_button = gr.Button("Upload Document")
|
271 |
+
|
272 |
+
update_output = gr.Textbox(label="Update Status")
|
273 |
+
update_button.click(update_vectors, inputs=[file_input, parser_dropdown], outputs=update_output)
|
274 |
+
|
275 |
+
chatbot = gr.Chatbot(label="Conversation")
|
276 |
+
msg = gr.Textbox(label="Ask a question")
|
277 |
+
use_web_search = gr.Checkbox(label="Use Web Search", value=False)
|
278 |
+
|
279 |
+
with gr.Row():
|
280 |
+
model_dropdown = gr.Dropdown(choices=MODELS, label="Select Model", value=MODELS[2])
|
281 |
+
temperature_slider = gr.Slider(minimum=0.1, maximum=1.0, value=0.7, step=0.1, label="Temperature")
|
282 |
+
|
283 |
+
submit = gr.Button("Submit")
|
284 |
+
|
285 |
+
gr.Examples(
|
286 |
+
examples=[
|
287 |
+
["What are the latest developments in AI?"],
|
288 |
+
["Tell me about recent updates on GitHub"],
|
289 |
+
["What are the best hotels in Galapagos, Ecuador?"],
|
290 |
+
["Summarize recent advancements in Python programming"],
|
291 |
+
],
|
292 |
+
inputs=msg,
|
293 |
+
)
|
294 |
+
|
295 |
+
submit.click(protected_clear_and_update_chat,
|
296 |
+
inputs=[msg, chatbot, use_web_search, model_dropdown, temperature_slider, is_generating],
|
297 |
+
outputs=[msg, chatbot, is_generating])
|
298 |
+
msg.submit(protected_clear_and_update_chat,
|
299 |
+
inputs=[msg, chatbot, use_web_search, model_dropdown, temperature_slider, is_generating],
|
300 |
+
outputs=[msg, chatbot, is_generating])
|
301 |
+
|
302 |
+
gr.Markdown(
|
303 |
+
"""
|
304 |
+
## How to use
|
305 |
+
1. Upload PDF documents using the file input at the top.
|
306 |
+
2. Select the PDF parser (pypdf or llamaparse) and click "Upload Document" to update the vector store.
|
307 |
+
3. Ask questions in the textbox.
|
308 |
+
4. Toggle "Use Web Search" to switch between PDF chat and web search.
|
309 |
+
5. Adjust Temperature and Repetition Penalty sliders to fine-tune the response generation.
|
310 |
+
6. Click "Submit" or press Enter to get a response.
|
311 |
+
"""
|
312 |
+
)
|
313 |
|
314 |
if __name__ == "__main__":
|
315 |
+
demo.launch(share=True)
|