# -*- coding: utf-8 -*- from __future__ import annotations import os import io import json import time import tarfile import logging import hashlib from typing import Dict, Any, List, Tuple, Optional from concurrent.futures import ThreadPoolExecutor import numpy as np import faiss from fastapi import FastAPI, HTTPException from fastapi.middleware.cors import CORSMiddleware from fastapi.responses import JSONResponse, StreamingResponse from pydantic import BaseModel import gradio as gr # ============================================================================= # LOGGING # ============================================================================= LOG = logging.getLogger("remote-indexer-async") if not LOG.handlers: h = logging.StreamHandler() h.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")) LOG.addHandler(h) LOG.setLevel(logging.INFO) DBG = logging.getLogger("remote-indexer-async.debug") if not DBG.handlers: hd = logging.StreamHandler() hd.setFormatter(logging.Formatter("[DEBUG] %(asctime)s - %(message)s")) DBG.addHandler(hd) DBG.setLevel(logging.DEBUG) # ============================================================================= # CONFIG (via ENV) # ============================================================================= PORT = int(os.getenv("PORT", "7860")) DATA_ROOT = os.getenv("DATA_ROOT", "/tmp/data") # stockage interne du Space (volatile en Free) os.makedirs(DATA_ROOT, exist_ok=True) # Provider d'embeddings: # - "dummy" : vecteurs aléatoires déterministes (très rapide) # - "st" : Sentence-Transformers (CPU-friendly) # - "hf" : Transformers pur (AutoModel/AutoTokenizer) EMB_PROVIDER = os.getenv("EMB_PROVIDER", "dummy").strip().lower() EMB_MODEL = os.getenv("EMB_MODEL", "sentence-transformers/all-mpnet-base-v2").strip() EMB_BATCH = int(os.getenv("EMB_BATCH", "32")) EMB_DIM = int(os.getenv("EMB_DIM", "128")) # utilisé pour dummy # Taille du pool de workers (asynchrone) MAX_WORKERS = int(os.getenv("MAX_WORKERS", "1")) # ============================================================================= # CACHE DIRECTORIES (évite PermissionError: '/.cache') # ============================================================================= def _setup_cache_dirs() -> Dict[str, str]: os.environ.setdefault("HOME", "/home/user") CACHE_ROOT = os.getenv("CACHE_ROOT", "/tmp/.cache").rstrip("/") paths = { "root": CACHE_ROOT, "hf_home": f"{CACHE_ROOT}/huggingface", "hf_hub": f"{CACHE_ROOT}/huggingface/hub", "hf_tf": f"{CACHE_ROOT}/huggingface/transformers", "torch": f"{CACHE_ROOT}/torch", "st": f"{CACHE_ROOT}/sentence-transformers", "mpl": f"{CACHE_ROOT}/matplotlib", } for p in paths.values(): try: os.makedirs(p, exist_ok=True) except Exception as e: LOG.warning("Impossible de créer %s : %s", p, e) os.environ["HF_HOME"] = paths["hf_home"] os.environ["HF_HUB_CACHE"] = paths["hf_hub"] os.environ["TRANSFORMERS_CACHE"] = paths["hf_tf"] os.environ["TORCH_HOME"] = paths["torch"] os.environ["SENTENCE_TRANSFORMERS_HOME"] = paths["st"] os.environ["MPLCONFIGDIR"] = paths["mpl"] os.environ.setdefault("HF_HUB_DISABLE_SYMLINKS_WARNING", "1") os.environ.setdefault("TOKENIZERS_PARALLELISM", "false") LOG.info("Caches configurés: %s", json.dumps(paths, indent=2)) return paths CACHE_PATHS = _setup_cache_dirs() # Cache global lazy (pour les modèles) _ST_MODEL = None _HF_TOKENIZER = None _HF_MODEL = None # ============================================================================= # JOB STATE # ============================================================================= class JobState(BaseModel): job_id: str project_id: str stage: str = "pending" # pending -> chunking -> embedding -> indexing -> done/failed total_files: int = 0 total_chunks: int = 0 embedded: int = 0 indexed: int = 0 errors: List[str] = [] messages: List[str] = [] started_at: float = time.time() finished_at: Optional[float] = None JOBS: Dict[str, JobState] = {} def _now() -> str: return time.strftime("%H:%M:%S") def _proj_dirs(project_id: str) -> Tuple[str, str, str]: base = os.path.join(DATA_ROOT, project_id) ds_dir = os.path.join(base, "dataset") fx_dir = os.path.join(base, "faiss") os.makedirs(ds_dir, exist_ok=True) os.makedirs(fx_dir, exist_ok=True) return base, ds_dir, fx_dir def _add_msg(st: JobState, msg: str): st.messages.append(f"[{_now()}] {msg}") LOG.info("[%s] %s", st.job_id, msg) DBG.debug("[%s] %s", st.job_id, msg) def _set_stage(st: JobState, stage: str): st.stage = stage _add_msg(st, f"stage={stage}") # ============================================================================= # UTILS # ============================================================================= def _chunk_text(text: str, size: int = 200, overlap: int = 20) -> List[str]: text = (text or "").replace("\r\n", "\n") tokens = list(text) if size <= 0: return [text] if text else [] if overlap < 0: overlap = 0 chunks = [] i = 0 while i < len(tokens): j = min(i + size, len(tokens)) chunk = "".join(tokens[i:j]).strip() if chunk: chunks.append(chunk) if j == len(tokens): break i = j - overlap if (j - overlap) > i else j return chunks def _l2_normalize(x: np.ndarray) -> np.ndarray: n = np.linalg.norm(x, axis=1, keepdims=True) + 1e-12 return x / n # ----------------------- PROVIDER: DUMMY -------------------------------------- def _emb_dummy(texts: List[str], dim: int = EMB_DIM) -> np.ndarray: vecs = np.zeros((len(texts), dim), dtype="float32") for i, t in enumerate(texts): h = hashlib.sha1((t or "").encode("utf-8")).digest() rng = np.random.default_rng(int.from_bytes(h[:8], "little", signed=False)) v = rng.standard_normal(dim).astype("float32") vecs[i] = v / (np.linalg.norm(v) + 1e-9) return vecs # ----------------- PROVIDER: Sentence-Transformers ---------------------------- def _get_st_model(): global _ST_MODEL if _ST_MODEL is None: from sentence_transformers import SentenceTransformer _ST_MODEL = SentenceTransformer(EMB_MODEL, cache_folder=CACHE_PATHS["st"]) LOG.info("[st] modèle chargé: %s (cache=%s)", EMB_MODEL, CACHE_PATHS["st"]) return _ST_MODEL def _emb_st(texts: List[str]) -> np.ndarray: model = _get_st_model() vecs = model.encode( texts, batch_size=max(1, EMB_BATCH), convert_to_numpy=True, normalize_embeddings=True, show_progress_bar=False, ).astype("float32") return vecs # ----------------------- PROVIDER: Transformers (HF) -------------------------- def _get_hf_model(): global _HF_TOKENIZER, _HF_MODEL if _HF_MODEL is None or _HF_TOKENIZER is None: from transformers import AutoTokenizer, AutoModel _HF_TOKENIZER = AutoTokenizer.from_pretrained(EMB_MODEL, cache_dir=CACHE_PATHS["hf_tf"]) _HF_MODEL = AutoModel.from_pretrained(EMB_MODEL, cache_dir=CACHE_PATHS["hf_tf"]) _HF_MODEL.eval() LOG.info("[hf] modèle chargé: %s (cache=%s)", EMB_MODEL, CACHE_PATHS["hf_tf"]) return _HF_TOKENIZER, _HF_MODEL def _mean_pool(last_hidden_state: "np.ndarray", attention_mask: "np.ndarray") -> "np.ndarray": mask = attention_mask[..., None].astype(last_hidden_state.dtype) summed = (last_hidden_state * mask).sum(axis=1) counts = mask.sum(axis=1).clip(min=1e-9) return summed / counts def _emb_hf(texts: List[str]) -> np.ndarray: import torch tok, mod = _get_hf_model() all_vecs = [] bs = max(1, EMB_BATCH) with torch.no_grad(): for i in range(0, len(texts), bs): batch = texts[i:i+bs] enc = tok(batch, padding=True, truncation=True, return_tensors="pt") out = mod(**enc) last = out.last_hidden_state # (b, t, h) pooled = _mean_pool(last.numpy(), enc["attention_mask"].numpy()) all_vecs.append(pooled.astype("float32")) vecs = np.concatenate(all_vecs, axis=0) return _l2_normalize(vecs) # ---------------------------- DATASET / FAISS --------------------------------- def _save_dataset(ds_dir: str, rows: List[Dict[str, Any]]): os.makedirs(ds_dir, exist_ok=True) data_path = os.path.join(ds_dir, "data.jsonl") with open(data_path, "w", encoding="utf-8") as f: for r in rows: f.write(json.dumps(r, ensure_ascii=False) + "\n") meta = {"format": "jsonl", "columns": ["path", "text", "chunk_id"], "count": len(rows)} with open(os.path.join(ds_dir, "meta.json"), "w", encoding="utf-8") as f: json.dump(meta, f, ensure_ascii=False, indent=2) def _load_dataset(ds_dir: str) -> List[Dict[str, Any]]: data_path = os.path.join(ds_dir, "data.jsonl") if not os.path.isfile(data_path): return [] out = [] with open(data_path, "r", encoding="utf-8") as f: for line in f: try: out.append(json.loads(line)) except Exception: continue return out def _save_faiss(fx_dir: str, xb: np.ndarray, meta: Dict[str, Any]): os.makedirs(fx_dir, exist_ok=True) idx_path = os.path.join(fx_dir, "emb.faiss") index = faiss.IndexFlatIP(xb.shape[1]) # cosine ~ inner product si embeddings normalisés index.add(xb) faiss.write_index(index, idx_path) with open(os.path.join(fx_dir, "meta.json"), "w", encoding="utf-8") as f: json.dump(meta, f, ensure_ascii=False, indent=2) def _load_faiss(fx_dir: str) -> faiss.Index: idx_path = os.path.join(fx_dir, "emb.faiss") if not os.path.isfile(idx_path): raise FileNotFoundError(f"FAISS index introuvable: {idx_path}") return faiss.read_index(idx_path) def _tar_dir_to_bytes(dir_path: str) -> bytes: bio = io.BytesIO() with tarfile.open(fileobj=bio, mode="w:gz") as tar: tar.add(dir_path, arcname=os.path.basename(dir_path)) bio.seek(0) return bio.read() # ============================================================================= # WORKER POOL (asynchrone) # ============================================================================= EXECUTOR = ThreadPoolExecutor(max_workers=max(1, MAX_WORKERS)) LOG.info("ThreadPoolExecutor initialisé : max_workers=%s", MAX_WORKERS) def _do_index_job(st: JobState, files: List[Dict[str, str]], chunk_size: int, overlap: int, batch_size: int, store_text: bool) -> None: """ Tâche lourde lancée dans un worker thread. Met à jour l'état 'st' tout au long du pipeline. """ try: base, ds_dir, fx_dir = _proj_dirs(st.project_id) # 1) Chunking _set_stage(st, "chunking") rows: List[Dict[str, Any]] = [] st.total_files = len(files) for it in files: path = (it.get("path") or "unknown").strip() txt = it.get("text") or "" chks = _chunk_text(txt, size=int(chunk_size), overlap=int(overlap)) _add_msg(st, f"{path}: len(text)={len(txt)} chunks={len(chks)}") for ci, ck in enumerate(chks): rows.append({"path": path, "text": ck, "chunk_id": ci}) st.total_chunks = len(rows) _add_msg(st, f"Total chunks = {st.total_chunks}") # 2) Embedding _set_stage(st, "embedding") texts = [r["text"] for r in rows] if EMB_PROVIDER == "dummy": xb = _emb_dummy(texts, dim=EMB_DIM) dim = xb.shape[1] elif EMB_PROVIDER == "st": xb = _emb_st(texts) dim = xb.shape[1] else: xb = _emb_hf(texts) dim = xb.shape[1] st.embedded = xb.shape[0] _add_msg(st, f"Embeddings {st.embedded}/{st.total_chunks}") _add_msg(st, f"Embeddings dim={dim}") # 3) Sauvegarde dataset (texte) _save_dataset(ds_dir, rows) _add_msg(st, f"Dataset (sans index) sauvegardé dans {ds_dir}") # 4) FAISS _set_stage(st, "indexing") faiss_meta = { "dim": int(dim), "count": int(xb.shape[0]), "provider": EMB_PROVIDER, "model": EMB_MODEL if EMB_PROVIDER != "dummy" else None } _save_faiss(fx_dir, xb, meta=faiss_meta) st.indexed = int(xb.shape[0]) _add_msg(st, f"FAISS écrit sur {os.path.join(fx_dir, 'emb.faiss')}") _add_msg(st, f"OK — dataset+index prêts (projet={st.project_id})") _set_stage(st, "done") st.finished_at = time.time() except Exception as e: LOG.exception("Job %s failed", st.job_id) st.errors.append(str(e)) _add_msg(st, f"❌ Exception: {e}") st.stage = "failed" st.finished_at = time.time() def _submit_job(project_id: str, files: List[Dict[str, str]], chunk_size: int, overlap: int, batch_size: int, store_text: bool) -> str: job_id = hashlib.sha1(f"{project_id}{time.time()}".encode()).hexdigest()[:12] st = JobState(job_id=job_id, project_id=project_id, stage="pending", messages=[]) JOBS[job_id] = st _add_msg(st, f"Job {job_id} créé pour project {project_id}") _add_msg(st, f"Index start project={project_id} files={len(files)} chunk_size={chunk_size} overlap={overlap} batch_size={batch_size} store_text={store_text} provider={EMB_PROVIDER} model={EMB_MODEL if EMB_PROVIDER!='dummy' else '-'}") # Soumission au pool (retour immédiat) EXECUTOR.submit(_do_index_job, st, files, chunk_size, overlap, batch_size, store_text) _set_stage(st, "queued") return job_id # ============================================================================= # FASTAPI # ============================================================================= fastapi_app = FastAPI(title="remote-indexer-async", version="3.0.0") fastapi_app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) class FileItem(BaseModel): path: str text: str class IndexRequest(BaseModel): project_id: str files: List[FileItem] chunk_size: int = 200 overlap: int = 20 batch_size: int = 32 store_text: bool = True @fastapi_app.get("/health") def health(): info = { "ok": True, "service": "remote-indexer-async", "provider": EMB_PROVIDER, "model": EMB_MODEL if EMB_PROVIDER != "dummy" else None, "cache_root": os.getenv("CACHE_ROOT", "/tmp/.cache"), "workers": MAX_WORKERS, "data_root": DATA_ROOT, } return info @fastapi_app.get("/") def root_redirect(): return {"ok": True, "service": "remote-indexer-async", "ui": "/ui"} @fastapi_app.post("/index") def index(req: IndexRequest): """ ASYNCHRONE : retourne immédiatement un job_id. Le traitement est effectué en arrière-plan par le pool de threads. """ try: files = [fi.model_dump() for fi in req.files] job_id = _submit_job( project_id=req.project_id, files=files, chunk_size=int(req.chunk_size), overlap=int(req.overlap), batch_size=int(req.batch_size), store_text=bool(req.store_text), ) return {"job_id": job_id} except Exception as e: LOG.exception("index failed (submit)") raise HTTPException(status_code=500, detail=str(e)) @fastapi_app.get("/status/{job_id}") def status(job_id: str): st = JOBS.get(job_id) if not st: raise HTTPException(status_code=404, detail="job inconnu") return JSONResponse(st.model_dump()) class SearchRequest(BaseModel): project_id: str query: str k: int = 5 @fastapi_app.post("/search") def search(req: SearchRequest): base, ds_dir, fx_dir = _proj_dirs(req.project_id) # Si l'index n'existe pas encore, on répond 409 (conflit / pas prêt) idx_path = os.path.join(fx_dir, "emb.faiss") ds_path = os.path.join(ds_dir, "data.jsonl") if not (os.path.isfile(idx_path) and os.path.isfile(ds_path)): raise HTTPException(status_code=409, detail="Index non prêt (reviens plus tard)") rows = _load_dataset(ds_dir) if not rows: raise HTTPException(status_code=404, detail="dataset introuvable") # Embedding de la requête avec le MÊME provider if EMB_PROVIDER == "dummy": q = _emb_dummy([req.query], dim=EMB_DIM)[0:1, :] elif EMB_PROVIDER == "st": q = _emb_st([req.query])[0:1, :] else: q = _emb_hf([req.query])[0:1, :] # FAISS index = _load_faiss(fx_dir) if index.d != q.shape[1]: raise HTTPException(status_code=500, detail=f"dim incompatibles: index.d={index.d} vs query={q.shape[1]}") scores, ids = index.search(q, int(max(1, req.k))) ids = ids[0].tolist() scores = scores[0].tolist() out = [] for idx, sc in zip(ids, scores): if idx < 0 or idx >= len(rows): continue r = rows[idx] out.append({"path": r.get("path"), "text": r.get("text"), "score": float(sc)}) return {"results": out} # ----------- ARTIFACTS EXPORT ----------- @fastapi_app.get("/artifacts/{project_id}/dataset") def download_dataset(project_id: str): base, ds_dir, _ = _proj_dirs(project_id) if not os.path.isdir(ds_dir): raise HTTPException(status_code=404, detail="Dataset introuvable") buf = _tar_dir_to_bytes(ds_dir) headers = {"Content-Disposition": f'attachment; filename="{project_id}_dataset.tgz"'} return StreamingResponse(io.BytesIO(buf), media_type="application/gzip", headers=headers) @fastapi_app.get("/artifacts/{project_id}/faiss") def download_faiss(project_id: str): base, _, fx_dir = _proj_dirs(project_id) if not os.path.isdir(fx_dir): raise HTTPException(status_code=404, detail="FAISS introuvable") buf = _tar_dir_to_bytes(fx_dir) headers = {"Content-Disposition": f'attachment; filename="{project_id}_faiss.tgz"'} return StreamingResponse(io.BytesIO(buf), media_type="application/gzip", headers=headers) # ============================================================================= # GRADIO UI (facultatif de test) # ============================================================================= def _ui_index(project_id: str, sample_text: str): files = [{"path": "sample.txt", "text": sample_text}] from pydantic import ValidationError try: req = IndexRequest(project_id=project_id, files=[FileItem(**f) for f in files]) except ValidationError as e: return f"Erreur: {e}" try: res = index(req) return f"Job lancé: {res['job_id']}" except Exception as e: return f"Erreur index: {e}" def _ui_search(project_id: str, query: str, k: int): try: res = search(SearchRequest(project_id=project_id, query=query, k=int(k))) return json.dumps(res, ensure_ascii=False, indent=2) except Exception as e: return f"Erreur search: {e}" with gr.Blocks(title="Remote Indexer (Async FAISS)", analytics_enabled=False) as ui: gr.Markdown("## Remote Indexer — **Async** (API: `/index`, `/status/{job}`, `/search`, `/artifacts/...`).") gr.Markdown(f"**Provider**: `{EMB_PROVIDER}` — **Model**: `{EMB_MODEL if EMB_PROVIDER!='dummy' else '-'}` — **Cache**: `{os.getenv('CACHE_ROOT', '/tmp/.cache')}` — **Workers**: `{MAX_WORKERS}`") with gr.Tab("Index"): pid = gr.Textbox(label="Project ID", value="DEEPWEB") sample = gr.Textbox(label="Texte d’exemple", value="Alpha bravo charlie delta echo foxtrot.", lines=4) btn = gr.Button("Lancer index (sample)") out = gr.Textbox(label="Résultat") btn.click(_ui_index, inputs=[pid, sample], outputs=[out]) with gr.Tab("Search"): pid2 = gr.Textbox(label="Project ID", value="DEEPWEB") q = gr.Textbox(label="Query", value="alpha") k = gr.Slider(1, 20, value=5, step=1, label="k") btn2 = gr.Button("Rechercher") out2 = gr.Code(label="Résultats") btn2.click(_ui_search, inputs=[pid2, q, k], outputs=[out2]) fastapi_app = gr.mount_gradio_app(fastapi_app, ui, path="/ui") # ============================================================================= # MAIN # ============================================================================= if __name__ == "__main__": import uvicorn LOG.info("Démarrage Uvicorn sur 0.0.0.0:%s (UI_PATH=/ui) — async index", PORT) uvicorn.run(fastapi_app, host="0.0.0.0", port=PORT)