ChanceRAG / app.py
Rabbitt-AI's picture
Update app.py
6243482 verified
raw
history blame
16.4 kB
import time
import fitz
import numpy as np
import pickle
import os
import dill
import logging
import asyncio
import networkx as nx
from mistralai import Mistral
from annoy import AnnoyIndex
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity, euclidean_distances
from sklearn.preprocessing import normalize
from rank_bm25 import BM25Okapi
from gensim.models import Word2Vec
from typing import List, Optional, Tuple
import gradio as gr
logger = logging.getLogger(__name__)
api_key = os.getenv("MISTRAL_API_KEY")
client = Mistral(api_key=api_key)
def get_text_embedding_with_rate_limit(text_list, initial_delay=2, max_retries=10):
embeddings = []
for text in text_list:
retries = 0
delay = initial_delay
while retries < max_retries:
try:
token_count = len(text.split())
if token_count > 16384:
print("Warning: Text chunk exceeds the token limit. Truncating the text.")
text = " ".join(text.split()[:16384])
response = client.embeddings.create(model="mistral-embed", inputs=[text])
embeddings.extend([embedding.embedding for embedding in response.data])
time.sleep(delay)
break
except Exception as e:
retries += 1
print(f"Rate limit exceeded, retrying in {delay} seconds... (Attempt {retries}/{max_retries})")
time.sleep(delay)
delay *= 2
if retries == max_retries:
print("Max retries reached. Skipping this chunk.")
break
return embeddings
def store_embeddings_in_vector_db(
pdf_path: str,
vector_db_path: str,
annoy_index_path: str,
chunk_size: int = 2048,
overlap: int = 200,
num_trees: int = 10
):
doc = fitz.open(pdf_path)
all_embeddings = []
all_texts = []
total_pages = doc.page_count
logging.info(f"Processing PDF: {pdf_path} with {total_pages} pages.")
for page_num in range(total_pages):
page = doc.load_page(page_num)
text = page.get_text()
if text.strip():
chunks = split_text_into_chunks(text, chunk_size, overlap)
embeddings = get_text_embedding_with_rate_limit(chunks)
all_embeddings.extend(embeddings)
all_texts.extend(chunks)
logging.info(f"Processed page {page_num + 1}/{total_pages}, extracted {len(chunks)} chunks.")
else:
logging.warning(f"No text found on page {page_num + 1}.")
embeddings_np = np.array(all_embeddings).astype('float32')
with open(vector_db_path, "wb") as f:
dill.dump({'embeddings': embeddings_np, 'texts': all_texts}, f)
logging.info(f"Stored embeddings and texts to {vector_db_path}.")
if os.path.exists(annoy_index_path):
os.remove(annoy_index_path)
logging.info(f"Existing Annoy index at {annoy_index_path} removed.")
embedding_dim = embeddings_np.shape[1]
annoy_index = AnnoyIndex(embedding_dim, 'angular')
for i, embedding in enumerate(embeddings_np):
annoy_index.add_item(i, embedding)
annoy_index.build(num_trees)
annoy_index.save(annoy_index_path)
logging.info(f"Annoy index built with {len(all_embeddings)} items and saved to {annoy_index_path}.")
def split_text_into_chunks(text: str, chunk_size: int = 2048, overlap: int = 200) -> List[str]:
tokens = text.split()
chunks = []
start = 0
while start < len(tokens):
end = start + chunk_size
chunk = " ".join(tokens[start:end])
chunks.append(chunk)
start += chunk_size - overlap
return chunks
class MistralRAGChatbot:
def __init__(self, vector_db_path: str, annoy_index_path: str):
self.embeddings, self.texts = self.load_vector_db(vector_db_path)
self.annoy_index = self.load_annoy_index(annoy_index_path, self.embeddings.shape[1])
self.bm25 = BM25Okapi([text.split() for text in self.texts])
self.word2vec_model = self.train_word2vec(self.texts)
self.reranking_methods = {
'advanced_fusion': self.advanced_fusion_retrieval
}
logging.info("MistralRAGChatbot initialized successfully.")
def load_vector_db(self, vector_db_path: str) -> Tuple[np.ndarray, List[str]]:
with open(vector_db_path, "rb") as f:
data = dill.load(f)
embeddings = np.array(data['embeddings'], dtype='float32')
texts = data['texts']
logging.info(f"Loaded vector database from {vector_db_path} with {len(texts)} entries.")
return embeddings, texts
def load_annoy_index(self, annoy_index_path: str, embedding_dim: int) -> AnnoyIndex:
annoy_index = AnnoyIndex(embedding_dim, 'angular')
annoy_index.load(annoy_index_path)
logging.info(f"Loaded Annoy index from {annoy_index_path}.")
return annoy_index
def train_word2vec(self, texts: List[str]) -> Word2Vec:
tokenized_texts = [text.split() for text in texts]
model = Word2Vec(sentences=tokenized_texts, vector_size=100, window=5, min_count=1, workers=4)
logging.info("Word2Vec model trained.")
return model
async def get_text_embedding(self, text: str, model: str = "mistral-embed") -> np.ndarray:
try:
response = await client.embeddings.create_async(model=model, inputs=[text])
return np.array(response.data[0].embedding)
except Exception as e:
logging.error(f"Error fetching embedding: {e}")
return np.zeros((1024,))
def advanced_fusion_retrieval(self, user_query: str, docs: List[dict]) -> List[dict]:
query_embedding = self.create_embeddings([user_query])[0]
vector_scores = {doc['index']: doc['score'] for doc in docs if doc['method'] == 'annoy'}
bm25_scores = {doc['index']: doc['score'] for doc in docs if doc['method'] == 'bm25'}
sim_graph = nx.Graph()
sim_matrix = cosine_similarity(self.embeddings)
for i in range(len(self.embeddings)):
for j in range(i + 1, len(self.embeddings)):
if sim_matrix[i, j] > 0.5:
sim_graph.add_edge(i, j, weight=sim_matrix[i, j])
pagerank_scores = np.array(list(nx.pagerank(sim_graph, weight='weight').values()))
combined_scores = {}
for doc in docs:
idx = doc['index']
combined_scores[idx] = (
0.5 * vector_scores.get(idx, 0) +
0.3 * bm25_scores.get(idx, 0) +
0.2 * pagerank_scores[idx]
)
min_score = min(combined_scores.values())
max_score = max(combined_scores.values())
normalized_scores = {idx: (score - min_score) / (max_score - min_score) for idx, score in combined_scores.items()}
sorted_indices = sorted(combined_scores, key=combined_scores.get, reverse=True)
return [{'text': self.texts[i], 'method': 'advanced_fusion', 'score': normalized_scores[i], 'index': i} for i in sorted_indices[:5]]
def create_embeddings(self, text_list: List[str]) -> np.ndarray:
expected_dim = 1024
embeddings = []
for text in text_list:
word_vectors = [self.word2vec_model.wv[token] for token in text.split() if token in self.word2vec_model.wv]
avg_embedding = np.mean(word_vectors, axis=0, dtype=np.float32) if word_vectors else np.zeros(self.word2vec_model.vector_size, dtype=np.float32)
if avg_embedding.shape[0] < expected_dim:
avg_embedding = np.pad(avg_embedding, (0, expected_dim - avg_embedding.shape[0]), 'constant')
elif avg_embedding.shape[0] > expected_dim:
avg_embedding = avg_embedding[:expected_dim]
embeddings.append(avg_embedding)
return np.array(embeddings, dtype=np.float32)
async def generate_response_with_rag(
self,
user_query: str,
model: str = "mistral-small-latest",
top_k: int = 10,
response_style: str = "Detailed",
selected_retrieval_methods: Optional[List[str]] = None,
selected_reranking_methods: Optional[List[str]] = None
) -> Tuple[str, List[str], List[dict]]:
if not selected_retrieval_methods:
selected_retrieval_methods = ['annoy', 'tfidf', 'bm25', 'word2vec', 'euclidean', 'jaccard']
if not selected_reranking_methods:
selected_reranking_methods = ['reciprocal_rank_fusion', 'weighted_score_fusion', 'advanced_fusion']
query_embedding = await self.get_text_embedding(user_query)
retrieved_docs = self.retrieve_documents(user_query, query_embedding, top_k, selected_retrieval_methods)
reranked_docs = self.rerank_documents(user_query, retrieved_docs, selected_reranking_methods)
context = "\n\n".join([doc['text'] for doc in reranked_docs[:5]])
prompt = self.build_prompt(context, user_query, response_style)
try:
async_response = await client.chat.stream_async(model=model, messages=[{"role": "user", "content": prompt}])
response = ""
async for chunk in async_response:
response += chunk.data.choices[0].delta.content
logging.info("Response generated successfully.")
except Exception as e:
logging.error(f"Error generating response: {e}")
response = "An error occurred while generating the response."
return response, [doc['text'] for doc in reranked_docs[:5]], reranked_docs[:5]
def retrieve_documents(
self,
user_query: str,
query_embedding: np.ndarray,
top_k: int,
selected_methods: List[str]
) -> List[dict]:
all_docs = []
for method in selected_methods:
indices, scores = getattr(self, f"retrieve_with_{method}")(user_query, query_embedding, top_k)
for idx, score in zip(indices, scores):
all_docs.append({
'text': self.texts[idx],
'method': method,
'score': score,
'index': idx
})
return all_docs
def retrieve_with_annoy(self, user_query: str, query_embedding: np.ndarray, top_k: int) -> Tuple[List[int], List[float]]:
n_results = min(top_k, len(self.texts))
indices, distances = self.annoy_index.get_nns_by_vector(query_embedding, n_results, include_distances=True)
scores = [1.0 - (dist / max(distances)) for dist in distances]
logging.debug(f"Annoy retrieval returned {len(indices)} documents.")
return indices, scores
def retrieve_with_bm25(self, user_query: str, query_embedding: np.ndarray, top_k: int) -> Tuple[List[int], List[float]]:
tokenized_query = user_query.split()
scores = self.bm25.get_scores(tokenized_query)
indices = np.argsort(-scores)[:top_k]
logging.debug(f"BM25 retrieval returned {len(indices)} documents.")
return indices, scores[indices].tolist()
def retrieve_with_word2vec(self, user_query: str, query_embedding: np.ndarray, top_k: int) -> Tuple[List[int], List[float]]:
query_tokens = user_query.split()
query_vec = np.mean([self.word2vec_model.wv[token] for token in query_tokens if token in self.word2vec_model.wv], axis=0)
expected_dim = query_vec.shape[0]
doc_vectors = []
for doc in self.texts:
word_vectors = [self.word2vec_model.wv[token] for token in doc.split() if token in self.word2vec_model.wv]
avg_vector = np.mean(word_vectors, axis=0) if word_vectors else np.zeros(expected_dim)
doc_vectors.append(avg_vector)
doc_vectors = np.array(doc_vectors)
similarities = cosine_similarity([query_vec], doc_vectors).flatten()
indices = np.argsort(-similarities)[:top_k]
return indices, similarities[indices].tolist()
def rerank_documents(
self,
user_query: str,
retrieved_docs: List[dict],
selected_methods: List[str]
) -> List[dict]:
reranked_docs = retrieved_docs
for method in selected_methods:
if method == 'advanced_fusion':
reranked_docs = self.advanced_fusion_retrieval(user_query, reranked_docs)
else:
reranked_docs = self.reranking_methods[method](user_query, reranked_docs)
return reranked_docs
def build_prompt(self, context: str, user_query: str, response_style: str) -> str:
styles = {
"detailed": "Provide a comprehensive and detailed answer based on the provided context.",
"concise": "Provide a brief and concise answer based on the provided context.",
"creative": "Provide a creative and engaging answer based on the provided context.",
"technical": "Provide a technical and in-depth answer based on the provided context."
}
style_instruction = styles.get(response_style.lower(), styles["detailed"])
if not context or not self.is_context_relevant(context, user_query):
prompt = f"""You are an intelligent assistant.
User Question:
{user_query}
Instruction:
The document database does not contain relevant information to answer the question. Please inform the user that no relevant documents were found and refrain from generating an imaginative or unrelated response."""
else:
prompt = f"""You are an intelligent assistant.
Context:
{context}
User Question:
{user_query}
Instruction:
{style_instruction}"""
logging.debug("Prompt constructed for response generation.")
return prompt
def is_context_relevant(self, context: str, user_query: str) -> bool:
context_lower = context.lower()
user_query_lower = user_query.lower()
query_terms = set(user_query_lower.split())
context_terms = set(context_lower.split())
common_terms = query_terms.intersection(context_terms)
return len(common_terms) > len(query_terms) * 0.2
def create_vector_db_and_annoy_index(pdf_path, vector_db_path, annoy_index_path):
store_embeddings_in_vector_db(pdf_path, vector_db_path, annoy_index_path)
print("Vector database and Annoy index creation completed.")
def chatbot_interface(file, user_query, response_style):
vector_db_path = "vector_db.pkl"
annoy_index_path = "vector_index.ann"
chunk_size = 2048
overlap = 200
store_embeddings_in_vector_db(file.name, 'vector_db.pkl', 'vector_index.ann', chunk_size, overlap)
chatbot = MistralRAGChatbot(vector_db_path, annoy_index_path)
selected_retrieval_methods_list = ['annoy', 'bm25']
selected_reranking_methods_list = ["advanced_fusion"]
response, retrieved_docs, source_info = asyncio.run(chatbot.generate_response_with_rag(
user_query=user_query,
response_style=response_style,
selected_retrieval_methods=selected_retrieval_methods_list,
selected_reranking_methods=selected_reranking_methods_list
))
formatted_response = f"Response:\n{response}\n\n"
formatted_response += "Retrieved and Reranked Documents:\n"
for idx, doc_info in enumerate(source_info, start=1):
formatted_response += f"\nDocument {idx}:\n"
formatted_response += f"Content Preview: {doc_info['text'][:200]}...\n"
formatted_response += f"Retrieval Method: {doc_info['method']}\n"
if 'score' in doc_info:
formatted_response += f"Precision Score: {doc_info['score']:.4f}\n"
for key, value in doc_info.items():
if key.endswith('_score') and key != 'score':
formatted_response += f"{key.replace('_', ' ').title()}: {value:.4f}\n"
return formatted_response
iface = gr.Interface(
fn=chatbot_interface,
theme='Rabbitt-AI/ChanceRAG',
inputs=[
gr.File(label="Upload a File"),
gr.Textbox(lines=5, label="User Query"),
gr.Dropdown(["Detailed", "Concise", "Creative", "Technical"], label="Retreival Style"),
],
outputs=gr.Textbox(label="ChanceRAG Response"),
title="Chance RAG",
)
iface.launch(share=True)