virus / app.py
Hack90's picture
Upload folder using huggingface_hub
aee08b4 verified
"""
Genomic Semantic Search API with FastAPI
=========================================
Search genomic sequences using your pre-trained transformer embeddings.
"""
import pickle
from pathlib import Path
from typing import Optional
import numpy as np
import pandas as pd
from fastapi import FastAPI, HTTPException
from fastapi.staticfiles import StaticFiles
from fastapi.responses import HTMLResponse, FileResponse
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
import faiss
import torch
import torch.nn as nn
from x_transformers import TransformerWrapper, Encoder
import tiktoken
# ============================================================================
# Configuration
# ============================================================================
DATA_DIR = Path("data")
INDEX_PATH = DATA_DIR / "data/faiss.index"
METADATA_PATH = DATA_DIR / "data/metadata.pkl"
EMBEDDINGS_PATH = DATA_DIR / "data/embeddings.npy"
# Model paths - update these to your actual paths
MODEL_WEIGHTS_PATH = DATA_DIR / "data/bpe_plus_special_tokens_model.pt"
TOKENIZER_PATH = DATA_DIR / "data/bpe_plus_special_tokens_tokenizer.json"
# ============================================================================
# Model Definition
# ============================================================================
class GenomicTransformer(nn.Module):
def __init__(self, vocab_size=40000, hidden_dim=512, layers=12, heads=8, max_length=6000):
super().__init__()
self.model = TransformerWrapper(
num_tokens=vocab_size,
max_seq_len=max_length,
attn_layers=Encoder(
dim=hidden_dim,
depth=layers,
heads=heads,
rotary_pos_emb=True,
attn_orthog_projected_values=True,
attn_orthog_projected_values_per_head=True,
attn_flash=True
)
)
def forward(self, input_ids, return_embeddings=False):
return self.model(input_ids, return_embeddings=return_embeddings)
# ============================================================================
# App Setup
# ============================================================================
app = FastAPI(
title="Genomic Semantic Search",
description="Search genomic sequences using transformer embeddings",
version="1.0.0"
)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Global state
device: torch.device = None
model: Optional[GenomicTransformer] = None
encoder: Optional[tiktoken.Encoding] = None
index: Optional[faiss.IndexFlatIP] = None
metadata: Optional[pd.DataFrame] = None
# ============================================================================
# Models
# ============================================================================
class SearchRequest(BaseModel):
query: str # The genomic sequence to search for
top_k: int = 10
class SearchResult(BaseModel):
rank: int
score: float
sequence: str
metadata: dict
class SearchResponse(BaseModel):
query: str
results: list[SearchResult]
total_indexed: int
class IndexStats(BaseModel):
total_documents: int
embedding_dimension: int
model_name: str
device: str
# ============================================================================
# Startup
# ============================================================================
@app.on_event("startup")
async def startup():
"""Load the model, tokenizer, and FAISS index on startup."""
global device, model, encoder, index, metadata
# Setup device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")
# Load tokenizer
print("Loading tokenizer...")
if TOKENIZER_PATH.exists():
with open(TOKENIZER_PATH, "rb") as f:
tokenizer_data = pickle.load(f)
encoder = tiktoken.Encoding(
name="genomic_bpe",
pat_str=tokenizer_data['pattern'],
mergeable_ranks=tokenizer_data['mergable_ranks'],
special_tokens={}
)
print("Tokenizer loaded successfully")
else:
print(f"WARNING: Tokenizer not found at {TOKENIZER_PATH}")
# Load model
print("Loading model...")
if MODEL_WEIGHTS_PATH.exists():
model = GenomicTransformer(
vocab_size=40_000, hidden_dim=512, layers=12, heads=8
)
weights = torch.load(MODEL_WEIGHTS_PATH, map_location=device)
model.load_state_dict(weights)
model = model.to(device)
model.eval()
print("Model loaded successfully")
else:
print(f"WARNING: Model weights not found at {MODEL_WEIGHTS_PATH}")
# Load FAISS index
if INDEX_PATH.exists() and METADATA_PATH.exists():
print("Loading FAISS index...")
index = faiss.read_index(str(INDEX_PATH))
with open(METADATA_PATH, "rb") as f:
metadata = pickle.load(f)
print(f"Index loaded with {index.ntotal} documents")
else:
print(f"WARNING: Index not found at {INDEX_PATH}")
# ============================================================================
# API Endpoints
# ============================================================================
@app.get("/", response_class=HTMLResponse)
async def root():
"""Serve the search frontend."""
return FileResponse("index.html")
@app.get("/api/health")
async def health():
"""Health check endpoint."""
return {
"status": "healthy",
"model_loaded": model is not None,
"index_loaded": index is not None,
"tokenizer_loaded": encoder is not None,
"device": str(device)
}
@app.get("/api/stats", response_model=IndexStats)
async def get_stats():
"""Get statistics about the current index."""
if index is None:
raise HTTPException(status_code=404, detail="No index loaded")
return IndexStats(
total_documents=index.ntotal,
embedding_dimension=index.d,
model_name="GenomicTransformer (512d, 12 layers)",
device=str(device)
)
@app.post("/api/search", response_model=SearchResponse)
async def search(request: SearchRequest):
"""
Perform semantic search over genomic sequences.
- **query**: The genomic sequence to search for (e.g., "ATCGATCG...")
- **top_k**: Number of results to return (default: 10)
"""
if index is None or metadata is None:
raise HTTPException(status_code=404, detail="No index loaded")
if model is None or encoder is None:
raise HTTPException(status_code=503, detail="Model or tokenizer not loaded")
if index.ntotal == 0:
raise HTTPException(status_code=404, detail="Index is empty")
# Encode the query sequence
try:
encodings = encoder.encode_ordinary(request.query)
query_tensor = torch.tensor([encodings]).long().to(device)
with torch.no_grad():
query_embedding = model(query_tensor, return_embeddings=True)
query_embedding = query_embedding.mean(dim=1).cpu().numpy()
query_embedding = query_embedding.astype(np.float32)
except Exception as e:
raise HTTPException(status_code=400, detail=f"Failed to encode query: {str(e)}")
# Search
k = min(request.top_k, index.ntotal)
scores, indices = index.search(query_embedding, k)
# Build results
results = []
for rank, (score, idx) in enumerate(zip(scores[0], indices[0]), 1):
if idx == -1:
continue
row = metadata.iloc[idx]
meta_dict = row.to_dict()
sequence = meta_dict.pop("__sequence__", "")
results.append(SearchResult(
rank=rank,
score=float(score),
sequence=sequence,
metadata=meta_dict
))
return SearchResponse(
query=request.query[:100] + "..." if len(request.query) > 100 else request.query,
results=results,
total_indexed=index.ntotal
)
@app.get("/api/sample")
async def get_sample(n: int = 5):
"""Get a sample of indexed documents."""
if metadata is None:
raise HTTPException(status_code=404, detail="No index loaded")
sample = metadata.head(n)
return {
"total": len(metadata),
"sample": sample.to_dict(orient="records")
}
# Mount files
# app.mount("/static", StaticFiles(directory="static"), name="static")
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8080)