Spaces:
Running
Running
import numpy as np | |
import pandas as pd | |
import faiss | |
import zipfile | |
import logging | |
from pathlib import Path | |
from sentence_transformers import SentenceTransformer, util | |
import streamlit as st | |
import time | |
import os | |
from urllib.parse import quote | |
import requests | |
# Configure logging | |
logging.basicConfig( | |
level=logging.INFO, | |
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', | |
handlers=[ | |
logging.StreamHandler() | |
] | |
) | |
logger = logging.getLogger("MetadataManager") | |
class MetadataManager: | |
def __init__(self): | |
self.cache_dir = Path("unzipped_cache") | |
self.shard_dir = self.cache_dir / "metadata_shards" | |
self.shard_map = {} | |
self.loaded_shards = {} | |
self.total_docs = 0 | |
self.api_cache = {} | |
logger.info("Initializing MetadataManager") | |
self._ensure_directories() | |
self._unzip_if_needed() | |
self._build_shard_map() | |
logger.info(f"Total documents indexed: {self.total_docs}") | |
logger.info(f"Total shards found: {len(self.shard_map)}") | |
def _ensure_directories(self): | |
"""Create necessary directories if they don't exist""" | |
self.cache_dir.mkdir(parents=True, exist_ok=True) | |
self.shard_dir.mkdir(parents=True, exist_ok=True) | |
def _unzip_if_needed(self): | |
"""Handle ZIP extraction with nested directory handling""" | |
zip_path = Path("metadata_shards.zip") | |
# Check if we need to unzip by looking for parquet files in any subdirectory | |
if not any(self.shard_dir.rglob("*.parquet")): | |
logger.info("No parquet files found, checking for zip archive") | |
if not zip_path.exists(): | |
raise FileNotFoundError(f"Metadata ZIP file not found at {zip_path}") | |
logger.info(f"Extracting {zip_path} to {self.shard_dir}") | |
try: | |
with zipfile.ZipFile(zip_path, 'r') as zip_ref: | |
# Check for nested directory structure in zip | |
zip_root = self._get_zip_root(zip_ref) | |
# Extract while preserving structure | |
zip_ref.extractall(self.shard_dir) | |
# Handle nested directory if exists | |
if zip_root: | |
nested_dir = self.shard_dir / zip_root | |
if nested_dir.exists(): | |
# Move files up from nested directory | |
self._flatten_directory(nested_dir, self.shard_dir) | |
nested_dir.rmdir() | |
# Verify extraction | |
parquet_files = list(self.shard_dir.rglob("*.parquet")) | |
if not parquet_files: | |
raise RuntimeError("Extraction completed but no parquet files found") | |
logger.info(f"Found {len(parquet_files)} parquet files after extraction") | |
except Exception as e: | |
logger.error(f"Failed to extract zip file: {str(e)}") | |
self._clean_failed_extraction() | |
raise | |
def _get_zip_root(self, zip_ref): | |
"""Identify common root directory in zip file""" | |
try: | |
first_file = zip_ref.namelist()[0] | |
if '/' in first_file: | |
return first_file.split('/')[0] | |
return "" | |
except Exception as e: | |
logger.warning(f"Error detecting zip root: {str(e)}") | |
return "" | |
def _flatten_directory(self, src_dir, dest_dir): | |
"""Move files from nested directory to destination""" | |
for item in src_dir.iterdir(): | |
if item.is_dir(): | |
self._flatten_directory(item, dest_dir) | |
item.rmdir() | |
else: | |
target = dest_dir / item.name | |
if target.exists(): | |
target.unlink() | |
item.rename(target) | |
def _clean_failed_extraction(self): | |
"""Remove any extracted files after failed attempt""" | |
logger.info("Cleaning up failed extraction") | |
for item in self.shard_dir.iterdir(): | |
if item.is_dir(): | |
shutil.rmtree(item) | |
else: | |
item.unlink() | |
def _build_shard_map(self): | |
"""Create validated index range to shard mapping""" | |
logger.info("Building shard map from parquet files") | |
parquet_files = list(self.shard_dir.glob("*.parquet")) | |
if not parquet_files: | |
raise FileNotFoundError("No parquet files found after extraction") | |
# Sort files by numerical order | |
parquet_files = sorted(parquet_files, key=lambda x: int(x.stem.split("_")[1])) | |
# Track expected next index | |
expected_start = 0 | |
for f in parquet_files: | |
try: | |
parts = f.stem.split("_") | |
if len(parts) != 3: | |
raise ValueError("Invalid filename format") | |
start = int(parts[1]) | |
end = int(parts[2]) | |
# Validate continuity | |
if start != expected_start: | |
raise ValueError(f"Non-contiguous shard start: expected {expected_start}, got {start}") | |
# Validate range | |
if end <= start: | |
raise ValueError(f"Invalid shard range: {start}-{end}") | |
self.shard_map[(start, end)] = f.name | |
self.total_docs = end + 1 | |
expected_start = end + 1 | |
logger.debug(f"Mapped shard {f.name}: indices {start}-{end}") | |
except Exception as e: | |
logger.error(f"Error processing shard {f.name}: {str(e)}") | |
raise RuntimeError("Invalid shard structure") from e | |
logger.info(f"Validated {len(self.shard_map)} continuous shards") | |
logger.info(f"Total document count: {self.total_docs}") | |
# Log shard statistics | |
logger.info(f"Shard map built with {len(self.shard_map)} shards") | |
logger.info(f"Total document count: {self.total_docs}") | |
# Validate shard boundaries for gaps or overlaps | |
sorted_ranges = sorted(self.shard_map.keys()) | |
for i in range(1, len(sorted_ranges)): | |
prev_end = sorted_ranges[i-1][1] | |
curr_start = sorted_ranges[i][0] | |
if curr_start != prev_end + 1: | |
logger.warning(f"Gap or overlap detected between shards: {prev_end} to {curr_start}") | |
def get_metadata(self, global_indices): | |
"""Retrieve metadata with validation""" | |
# Check for empty numpy array properly | |
if isinstance(global_indices, np.ndarray) and global_indices.size == 0: | |
logger.warning("Empty indices array passed to get_metadata") | |
return pd.DataFrame(columns=["title", "summary", "source", "similarity"]) | |
# Convert numpy array to list for processing | |
indices_list = global_indices.tolist() if isinstance(global_indices, np.ndarray) else global_indices | |
logger.info(f"Retrieving metadata for {len(indices_list)} indices") | |
# Filter valid indices | |
valid_indices = [idx for idx in indices_list if 0 <= idx < self.total_docs] | |
invalid_count = len(indices_list) - len(valid_indices) | |
if invalid_count > 0: | |
logger.warning(f"Filtered out {invalid_count} invalid indices") | |
if not valid_indices: | |
logger.warning("No valid indices remain after filtering") | |
return pd.DataFrame(columns=["title", "summary", "source", "similarity"]) | |
# Group indices by shard with boundary check | |
shard_groups = {} | |
unassigned_indices = [] | |
for idx in valid_indices: | |
found = False | |
for (start, end), shard in self.shard_map.items(): | |
if start <= idx <= end: | |
if shard not in shard_groups: | |
shard_groups[shard] = [] | |
shard_groups[shard].append(idx - start) | |
found = True | |
break | |
if not found: | |
unassigned_indices.append(idx) | |
logger.warning(f"Index {idx} not found in any shard range") | |
if unassigned_indices: | |
logger.warning(f"Could not assign {len(unassigned_indices)} indices to any shard") | |
# Load and process shards | |
results = [] | |
for shard, local_indices in shard_groups.items(): | |
try: | |
logger.info(f"Processing shard {shard} with {len(local_indices)} indices") | |
start_time = time.time() | |
if shard not in self.loaded_shards: | |
logger.info(f"Loading shard file: {shard}") | |
shard_path = self.shard_dir / shard | |
# Verify file exists | |
if not shard_path.exists(): | |
logger.error(f"Shard file not found: {shard_path}") | |
continue | |
# Log file size | |
file_size_mb = os.path.getsize(shard_path) / (1024 * 1024) | |
logger.info(f"Shard file size: {file_size_mb:.2f} MB") | |
# Attempt to read the parquet file | |
try: | |
self.loaded_shards[shard] = pd.read_parquet( | |
shard_path, | |
columns=["title", "summary", "source"] | |
) | |
logger.info(f"Successfully loaded shard {shard} with {len(self.loaded_shards[shard])} rows") | |
except Exception as e: | |
logger.error(f"Failed to read parquet file {shard}: {str(e)}") | |
# Try to read file schema for debugging | |
try: | |
schema = pd.read_parquet(shard_path, engine='pyarrow').dtypes | |
logger.info(f"Parquet schema: {schema}") | |
except: | |
pass | |
continue | |
if local_indices: | |
# Validate indices are within dataframe bounds | |
df_len = len(self.loaded_shards[shard]) | |
valid_local_indices = [idx for idx in local_indices if 0 <= idx < df_len] | |
if len(valid_local_indices) != len(local_indices): | |
logger.warning(f"Filtered {len(local_indices) - len(valid_local_indices)} out-of-bounds indices") | |
if valid_local_indices: | |
logger.debug(f"Retrieving rows at indices: {valid_local_indices}") | |
chunk = self.loaded_shards[shard].iloc[valid_local_indices] | |
results.append(chunk) | |
logger.info(f"Retrieved {len(chunk)} records from shard {shard}") | |
logger.info(f"Shard processing completed in {time.time() - start_time:.2f} seconds") | |
except Exception as e: | |
logger.error(f"Error processing shard {shard}: {str(e)}", exc_info=True) | |
continue | |
# Combine results | |
if results: | |
combined = pd.concat(results).reset_index(drop=True) | |
logger.info(f"Combined metadata: {len(combined)} records from {len(results)} shards") | |
return combined | |
else: | |
logger.warning("No metadata records retrieved") | |
return pd.DataFrame(columns=["title", "summary", "source", "similarity"]) | |
def _resolve_paper_url(self, title): | |
"""Find paper URL using multiple strategies""" | |
# Check cache first | |
if title in self.api_cache: | |
return self.api_cache[title] | |
links = {} | |
# Try arXiv first | |
arxiv_url = self._get_arxiv_url(title) | |
if arxiv_url: | |
links["arxiv"] = arxiv_url | |
# Attempt to get a direct link using Semantic Scholar's API | |
semantic_url = self._get_semantic_scholar_url(title) | |
if semantic_url: | |
links["semantic_search"] = semantic_url | |
# Fallback to Google Scholar search | |
scholar_url = f"https://scholar.google.com/scholar?q={quote(title)}" | |
links["google"] = scholar_url | |
self.api_cache[title] = links | |
return links | |
def _get_arxiv_url(self, title): | |
"""Search arXiv API for paper""" | |
try: | |
response = requests.get( | |
"http://export.arxiv.org/api/query", | |
params={ | |
"search_query": f'ti:"{title}"', | |
"max_results": 1, | |
"sortBy": "relevance" | |
}, | |
timeout=5 | |
) | |
response.raise_for_status() | |
# Parse XML response | |
from xml.etree import ElementTree as ET | |
root = ET.fromstring(response.content) | |
entry = root.find('{http://www.w3.org/2005/Atom}entry') | |
if entry is not None: | |
arxiv_id = entry.find('{http://www.w3.org/2005/Atom}id').text | |
return arxiv_id.replace('http:', 'https:') # Force HTTPS | |
except Exception as e: | |
logger.error(f"arXiv API failed for '{title}': {str(e)}") | |
return None | |
def _get_semantic_scholar_url(self, title): | |
"""Search Semantic Scholar API for a paper by title and return its URL.""" | |
try: | |
response = requests.get( | |
"https://api.semanticscholar.org/graph/v1/paper/search", | |
params={ | |
"query": title, | |
"limit": 1, | |
"fields": "paperId,url,title" | |
}, | |
timeout=5 | |
) | |
response.raise_for_status() # This raises for 429 or other errors | |
data = response.json() | |
if "data" in data and len(data["data"]) > 0: | |
paper = data["data"][0] | |
if paper.get("url"): | |
return paper["url"] | |
elif paper.get("paperId"): | |
return f"https://www.semanticscholar.org/paper/{paper['paperId']}" | |
except requests.exceptions.HTTPError as http_err: | |
if response.status_code == 429: | |
# logger.error(f"Rate limit exceeded for Semantic Scholar API for '{title}'. Falling back.") | |
# Optionally, add a sleep delay here for backoff | |
time.sleep(1) # simple backoff delay; consider exponential backoff | |
else: | |
# logger.error(f"Semantic Scholar API failed for '{title}': {http_err}") | |
except Exception as e: | |
# logger.error(f"Semantic Scholar API failed for '{title}': {e}") | |
return None | |
class SemanticSearch: | |
def __init__(self): | |
self.shard_dir = Path("compressed_shards") | |
self.model = None | |
self.index_shards = [] | |
self.metadata_mgr = MetadataManager() | |
self.shard_sizes = [] | |
# Configure search logger | |
self.logger = logging.getLogger("SemanticSearch") | |
self.logger.info("Initializing SemanticSearch") | |
def load_model(_self): | |
return SentenceTransformer('all-MiniLM-L6-v2') | |
def initialize_system(self): | |
self.logger.info("Loading sentence transformer model") | |
start_time = time.time() | |
self.model = self.load_model() | |
self.logger.info(f"Model loaded in {time.time() - start_time:.2f} seconds") | |
self.logger.info("Loading FAISS indices") | |
self._load_faiss_shards() | |
def _load_faiss_shards(self): | |
"""Load all FAISS index shards""" | |
self.logger.info(f"Searching for index files in {self.shard_dir}") | |
if not self.shard_dir.exists(): | |
self.logger.error(f"Shard directory not found: {self.shard_dir}") | |
return | |
index_files = list(self.shard_dir.glob("*.index")) | |
self.logger.info(f"Found {len(index_files)} index files") | |
self.shard_sizes = [] | |
self.index_shards = [] | |
for shard_path in sorted(index_files): | |
try: | |
self.logger.info(f"Loading index: {shard_path}") | |
start_time = time.time() | |
# Log file size | |
file_size_mb = os.path.getsize(shard_path) / (1024 * 1024) | |
self.logger.info(f"Index file size: {file_size_mb:.2f} MB") | |
index = faiss.read_index(str(shard_path)) | |
self.index_shards.append(index) | |
self.shard_sizes.append(index.ntotal) | |
self.logger.info(f"Loaded index with {index.ntotal} vectors in {time.time() - start_time:.2f} seconds") | |
except Exception as e: | |
self.logger.error(f"Failed to load index {shard_path}: {str(e)}") | |
total_vectors = sum(self.shard_sizes) | |
self.logger.info(f"Total loaded vectors: {total_vectors} across {len(self.index_shards)} shards") | |
def _global_index(self, shard_idx, local_idx): | |
"""Convert local index to global index""" | |
return sum(self.shard_sizes[:shard_idx]) + local_idx | |
def search(self, query, top_k=5): | |
"""Search with validation""" | |
self.logger.info(f"Searching for query: '{query}' (top_k={top_k})") | |
start_time = time.time() | |
if not query: | |
self.logger.warning("Empty query provided") | |
return pd.DataFrame() | |
if not self.index_shards: | |
self.logger.error("No index shards loaded") | |
return pd.DataFrame() | |
try: | |
self.logger.info("Encoding query") | |
query_embedding = self.model.encode([query], convert_to_numpy=True) | |
self.logger.debug(f"Query encoded to shape {query_embedding.shape}") | |
except Exception as e: | |
self.logger.error(f"Query encoding failed: {str(e)}") | |
return pd.DataFrame() | |
all_distances = [] | |
all_global_indices = [] | |
# Search with index validation | |
self.logger.info(f"Searching across {len(self.index_shards)} shards") | |
for shard_idx, index in enumerate(self.index_shards): | |
if index.ntotal == 0: | |
self.logger.warning(f"Skipping empty shard {shard_idx}") | |
continue | |
try: | |
shard_start = time.time() | |
distances, indices = index.search(query_embedding, top_k) | |
valid_mask = (indices[0] >= 0) & (indices[0] < index.ntotal) | |
valid_indices = indices[0][valid_mask].tolist() | |
valid_distances = distances[0][valid_mask].tolist() | |
if len(valid_indices) != top_k: | |
self.logger.debug(f"Shard {shard_idx}: Found {len(valid_indices)} valid results out of {top_k}") | |
global_indices = [self._global_index(shard_idx, idx) for idx in valid_indices] | |
all_distances.extend(valid_distances) | |
all_global_indices.extend(global_indices) | |
self.logger.debug(f"Shard {shard_idx} search completed in {time.time() - shard_start:.3f}s") | |
except Exception as e: | |
self.logger.error(f"Search failed in shard {shard_idx}: {str(e)}") | |
continue | |
self.logger.info(f"Search found {len(all_global_indices)} results across all shards") | |
# Process results | |
results = self._process_results( | |
np.array(all_distances), | |
np.array(all_global_indices), | |
top_k | |
) | |
self.logger.info(f"Search completed in {time.time() - start_time:.2f} seconds with {len(results)} final results") | |
return results | |
def _process_results(self, distances, global_indices, top_k): | |
"""Process raw search results into formatted DataFrame""" | |
process_start = time.time() | |
# Proper numpy array emptiness checks | |
if global_indices.size == 0 or distances.size == 0: | |
self.logger.warning("No search results to process") | |
return pd.DataFrame(columns=["title", "summary", "source", "similarity"]) | |
try: | |
# Get metadata for matched indices | |
self.logger.info(f"Retrieving metadata for {len(global_indices)} indices") | |
metadata_start = time.time() | |
results = self.metadata_mgr.get_metadata(global_indices) | |
self.logger.info(f"Metadata retrieved in {time.time() - metadata_start:.2f}s, got {len(results)} records") | |
# Empty results check | |
if len(results) == 0: | |
self.logger.warning("No metadata found for indices") | |
return pd.DataFrame(columns=["title", "summary", "source", "similarity"]) | |
# Ensure distances match results length | |
if len(results) != len(distances): | |
self.logger.warning(f"Mismatch between distances ({len(distances)}) and results ({len(results)})") | |
if len(results) < len(distances): | |
self.logger.info("Truncating distances array to match results length") | |
distances = distances[:len(results)] | |
else: | |
# Should not happen but handle it anyway | |
self.logger.error("More results than distances - this shouldn't happen") | |
distances = np.pad(distances, (0, len(results) - len(distances)), 'constant', constant_values=1.0) | |
# Calculate similarity scores | |
self.logger.debug("Calculating similarity scores") | |
results['similarity'] = 1 - (distances / 2) | |
# Log similarity statistics | |
if not results.empty: | |
self.logger.debug(f"Similarity stats: min={results['similarity'].min():.3f}, " + | |
f"max={results['similarity'].max():.3f}, " + | |
f"mean={results['similarity'].mean():.3f}") | |
results['source'] = results['title'].apply( | |
lambda title: self._format_source_links( | |
self.metadata_mgr._resolve_paper_url(title) | |
) | |
) | |
# Deduplicate and sort results | |
pre_dedup = len(results) | |
results = results.drop_duplicates(subset=["title", "source"]).sort_values("similarity", ascending=False).head(top_k) | |
post_dedup = len(results) | |
if pre_dedup > post_dedup: | |
self.logger.info(f"Removed {pre_dedup - post_dedup} duplicate results") | |
self.logger.info(f"Results processed in {time.time() - process_start:.2f}s, returning {len(results)} items") | |
return results.reset_index(drop=True) | |
except Exception as e: | |
self.logger.error(f"Result processing failed: {str(e)}", exc_info=True) | |
return pd.DataFrame(columns=["title", "summary", "source", "similarity"]) | |
def _format_source_links(self, links): | |
"""Generate an HTML snippet for the available source links.""" | |
html_parts = [] | |
if "arxiv" in links: | |
html_parts.append( | |
f"<a class='source-link' href='{links['arxiv']}' target='_blank' rel='noopener noreferrer'> π arXiv</a>" | |
) | |
if "semantic" in links: | |
html_parts.append( | |
f"<a class='source-link' href='{links['semantic']}' target='_blank' rel='noopener noreferrer'> π Semantic Scholar</a>" | |
) | |
if "google" in links: | |
html_parts.append( | |
f"<a class='source-link' href='{links['google']}' target='_blank' rel='noopener noreferrer'> π Google Scholar</a>" | |
) | |
return " | ".join(html_parts) |