Spaces:
Running
Running
| # -*- coding: utf-8 -*- | |
| """ | |
| FastAPI + Gradio : service d’indexation asynchrone avec FAISS. | |
| Ce fichier a été corrigé pour : | |
| * importer correctement `JobState` (import relatif) | |
| * garantir que le répertoire `app` est dans le PYTHONPATH lorsqu’on lance le script | |
| * conserver toutes les fonctionnalités précédentes (indexation, recherche, UI) | |
| """ | |
| from __future__ import annotations | |
| import os | |
| import io | |
| import json | |
| import time | |
| import hashlib | |
| import logging | |
| import tarfile | |
| import sys | |
| from pathlib import Path | |
| from typing import List, Dict, Any, Tuple, Optional | |
| from concurrent.futures import ThreadPoolExecutor | |
| import numpy as np | |
| import faiss | |
| from fastapi import FastAPI, HTTPException | |
| from fastapi.middleware.cors import CORSMiddleware | |
| from fastapi.responses import JSONResponse, StreamingResponse | |
| from pydantic import BaseModel | |
| import gradio as gr | |
| # --------------------------------------------------------------------------- # | |
| # RÉGLAGE DU PYTHONPATH (pour que les imports relatifs fonctionnent) | |
| # --------------------------------------------------------------------------- # | |
| # Si le script est lancé depuis le répertoire `app/`, le package `app` n’est pas | |
| # découvert automatiquement. On ajoute le répertoire parent au sys.path. | |
| CURRENT_DIR = Path(__file__).resolve().parent | |
| PROJECT_ROOT = CURRENT_DIR.parent | |
| if str(PROJECT_ROOT) not in sys.path: | |
| sys.path.insert(0, str(PROJECT_ROOT)) | |
| # --------------------------------------------------------------------------- # | |
| # LOGGING | |
| # --------------------------------------------------------------------------- # | |
| LOG = logging.getLogger("remote-indexer-async") | |
| if not LOG.handlers: | |
| h = logging.StreamHandler() | |
| h.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")) | |
| LOG.addHandler(h) | |
| LOG.setLevel(logging.INFO) | |
| DBG = logging.getLogger("remote-indexer-async.debug") | |
| if not DBG.handlers: | |
| hd = logging.StreamHandler() | |
| hd.setFormatter(logging.Formatter("[DEBUG] %(asctime)s - %(message)s")) | |
| DBG.addHandler(hd) | |
| DBG.setLevel(logging.DEBUG) | |
| # --------------------------------------------------------------------------- # | |
| # CONFIGURATION (variables d’environnement) | |
| # --------------------------------------------------------------------------- # | |
| PORT = int(os.getenv("PORT", "7860")) | |
| DATA_ROOT = os.getenv("DATA_ROOT", "/tmp/data") | |
| os.makedirs(DATA_ROOT, exist_ok=True) | |
| EMB_PROVIDER = os.getenv("EMB_PROVIDER", "dummy").strip().lower() | |
| EMB_MODEL = os.getenv("EMB_MODEL", "sentence-transformers/all-mpnet-base-v2").strip() | |
| EMB_BATCH = int(os.getenv("EMB_BATCH", "32")) | |
| EMB_DIM = int(os.getenv("EMB_DIM", "64")) # dimension réduite (optimisation) | |
| MAX_WORKERS = int(os.getenv("MAX_WORKERS", "1")) | |
| # --------------------------------------------------------------------------- # | |
| # CACHE DIRECTORIES (évite PermissionError) | |
| # --------------------------------------------------------------------------- # | |
| def _setup_cache_dirs() -> Dict[str, str]: | |
| os.environ.setdefault("HOME", "/home/user") | |
| CACHE_ROOT = os.getenv("CACHE_ROOT", "/tmp/.cache").rstrip("/") | |
| paths = { | |
| "root": CACHE_ROOT, | |
| "hf_home": f"{CACHE_ROOT}/huggingface", | |
| "hf_hub": f"{CACHE_ROOT}/huggingface/hub", | |
| "hf_tf": f"{CACHE_ROOT}/huggingface/transformers", | |
| "torch": f"{CACHE_ROOT}/torch", | |
| "st": f"{CACHE_ROOT}/sentence-transformers", | |
| "mpl": f"{CACHE_ROOT}/matplotlib", | |
| } | |
| for p in paths.values(): | |
| try: | |
| os.makedirs(p, exist_ok=True) | |
| except Exception as e: | |
| LOG.warning("Impossible de créer %s : %s", p, e) | |
| os.environ["HF_HOME"] = paths["hf_home"] | |
| os.environ["HF_HUB_CACHE"] = paths["hf_hub"] | |
| os.environ["TRANSFORMERS_CACHE"] = paths["hf_tf"] | |
| os.environ["TORCH_HOME"] = paths["torch"] | |
| os.environ["SENTENCE_TRANSFORMERS_HOME"] = paths["st"] | |
| os.environ["MPLCONFIGDIR"] = paths["mpl"] | |
| os.environ.setdefault("HF_HUB_DISABLE_SYMLINKS_WARNING", "1") | |
| os.environ.setdefault("TOKENIZERS_PARALLELISM", "false") | |
| LOG.info("Caches configurés : %s", json.dumps(paths, indent=2)) | |
| return paths | |
| CACHE_PATHS = _setup_cache_dirs() | |
| # --------------------------------------------------------------------------- # | |
| # IMPORT DE LA CLASSE DE STATE (corrigé : import relatif) | |
| # --------------------------------------------------------------------------- # | |
| # Le fichier `index_state.py` se trouve dans `app/core/`. | |
| # En étant dans le répertoire `app`, on peut l’importer via le package `core`. | |
| from core.index_state import JobState # <-- IMPORT CORRIGÉ | |
| # --------------------------------------------------------------------------- # | |
| # GLOBALS | |
| # --------------------------------------------------------------------------- # | |
| JOBS: Dict[str, JobState] = {} | |
| def _now() -> str: | |
| return time.strftime("%H:%M:%S") | |
| def _proj_dirs(project_id: str) -> Tuple[str, str, str]: | |
| base = os.path.join(DATA_ROOT, project_id) | |
| ds_dir = os.path.join(base, "dataset") | |
| fx_dir = os.path.join(base, "faiss") | |
| os.makedirs(ds_dir, exist_ok=True) | |
| os.makedirs(fx_dir, exist_ok=True) | |
| return base, ds_dir, fx_dir | |
| def _add_msg(st: JobState, msg: str) -> None: | |
| st.messages.append(f"[{_now()}] {msg}") | |
| LOG.info("[%s] %s", st.job_id, msg) | |
| DBG.debug("[%s] %s", st.job_id, msg) | |
| def _set_stage(st: JobState, stage: str) -> None: | |
| st.stage = stage | |
| _add_msg(st, f"stage={stage}") | |
| # --------------------------------------------------------------------------- # | |
| # UTILITAIRES (chunking, normalisation, etc.) | |
| # --------------------------------------------------------------------------- # | |
| def _chunk_text(text: str, size: int = 200, overlap: int = 20) -> List[str]: | |
| text = (text or "").replace("\r\n", "\n") | |
| tokens = list(text) | |
| if size <= 0: | |
| return [text] if text else [] | |
| if overlap < 0: | |
| overlap = 0 | |
| chunks = [] | |
| i = 0 | |
| while i < len(tokens): | |
| j = min(i + size, len(tokens)) | |
| chunk = "".join(tokens[i:j]).strip() | |
| if chunk: | |
| chunks.append(chunk) | |
| if j == len(tokens): | |
| break | |
| i = j - overlap if (j - overlap) > i else j | |
| return chunks | |
| def _l2_normalize(x: np.ndarray) -> np.ndarray: | |
| n = np.linalg.norm(x, axis=1, keepdims=True) + 1e-12 | |
| return x / n | |
| # --------------------------------------------------------------------------- # | |
| # EMBEDDING PROVIDERS | |
| # --------------------------------------------------------------------------- # | |
| _ST_MODEL = None | |
| _HF_TOKENIZER = None | |
| _HF_MODEL = None | |
| def _emb_dummy(texts: List[str], dim: int = EMB_DIM) -> np.ndarray: | |
| vecs = np.zeros((len(texts), dim), dtype="float32") | |
| for i, t in enumerate(texts): | |
| h = hashlib.sha1((t or "").encode("utf-8")).digest() | |
| rng = np.random.default_rng(int.from_bytes(h[:8], "little", signed=False)) | |
| v = rng.standard_normal(dim).astype("float32") | |
| vecs[i] = v / (np.linalg.norm(v) + 1e-9) | |
| return vecs | |
| def _get_st_model(): | |
| global _ST_MODEL | |
| if _ST_MODEL is None: | |
| from sentence_transformers import SentenceTransformer | |
| _ST_MODEL = SentenceTransformer(EMB_MODEL, cache_folder=CACHE_PATHS["st"]) | |
| LOG.info("[st] modèle chargé : %s (cache=%s)", EMB_MODEL, CACHE_PATHS["st"]) | |
| return _ST_MODEL | |
| def _emb_st(texts: List[str]) -> np.ndarray: | |
| model = _get_st_model() | |
| vecs = model.encode( | |
| texts, | |
| batch_size=max(1, EMB_BATCH), | |
| convert_to_numpy=True, | |
| normalize_embeddings=True, | |
| show_progress_bar=False, | |
| ).astype("float32") | |
| return vecs | |
| def _get_hf_model(): | |
| global _HF_TOKENIZER, _HF_MODEL | |
| if _HF_MODEL is None or _HF_TOKENIZER is None: | |
| from transformers import AutoTokenizer, AutoModel | |
| _HF_TOKENIZER = AutoTokenizer.from_pretrained(EMB_MODEL, cache_dir=CACHE_PATHS["hf_tf"]) | |
| _HF_MODEL = AutoModel.from_pretrained(EMB_MODEL, cache_dir=CACHE_PATHS["hf_tf"]) | |
| _HF_MODEL.eval() | |
| LOG.info("[hf] modèle chargé : %s (cache=%s)", EMB_MODEL, CACHE_PATHS["hf_tf"]) | |
| return _HF_TOKENIZER, _HF_MODEL | |
| def _mean_pool(last_hidden_state: np.ndarray, attention_mask: np.ndarray) -> np.ndarray: | |
| mask = attention_mask[..., None].astype(last_hidden_state.dtype) | |
| summed = (last_hidden_state * mask).sum(axis=1) | |
| counts = mask.sum(axis=1).clip(min=1e-9) | |
| return summed / counts | |
| def _emb_hf(texts: List[str]) -> np.ndarray: | |
| import torch | |
| tok, mod = _get_hf_model() | |
| all_vecs: List[np.ndarray] = [] | |
| bs = max(1, EMB_BATCH) | |
| with torch.no_grad(): | |
| for i in range(0, len(texts), bs): | |
| batch = texts[i:i + bs] | |
| enc = tok(batch, padding=True, truncation=True, return_tensors="pt") | |
| out = mod(**enc) | |
| last = out.last_hidden_state # (b, t, h) | |
| pooled = _mean_pool(last.numpy(), enc["attention_mask"].numpy()) | |
| all_vecs.append(pooled.astype("float32")) | |
| return np.concatenate(all_vecs, axis=0) | |
| # --------------------------------------------------------------------------- # | |
| # DATASET / FAISS I/O | |
| # --------------------------------------------------------------------------- # | |
| def _save_dataset(ds_dir: str, rows: List[Dict[str, Any]], store_text: bool = True) -> None: | |
| os.makedirs(ds_dir, exist_ok=True) | |
| data_path = os.path.join(ds_dir, "data.jsonl") | |
| with open(data_path, "w", encoding="utf-8") as f: | |
| for r in rows: | |
| if not store_text: | |
| r = {k: v for k, v in r.items() if k != "text"} | |
| f.write(json.dumps(r, ensure_ascii=False) + "\n") | |
| meta = {"format": "jsonl", "columns": ["path", "text", "chunk_id"], "count": len(rows)} | |
| with open(os.path.join(ds_dir, "meta.json"), "w", encoding="utf-8") as f: | |
| json.dump(meta, f, ensure_ascii=False, indent=2) | |
| def _load_dataset(ds_dir: str) -> List[Dict[str, Any]]: | |
| data_path = os.path.join(ds_dir, "data.jsonl") | |
| if not os.path.isfile(data_path): | |
| return [] | |
| out: List[Dict[str, Any]] = [] | |
| with open(data_path, "r", encoding="utf-8") as f: | |
| for line in f: | |
| try: | |
| out.append(json.loads(line)) | |
| except Exception: | |
| continue | |
| return out | |
| def _save_faiss(fx_dir: str, xb: np.ndarray, meta: Dict[str, Any]) -> None: | |
| os.makedirs(fx_dir, exist_ok=True) | |
| idx_path = os.path.join(fx_dir, "emb.faiss") | |
| # ------------------- INDEX QUANTISÉ (IVF‑PQ) ------------------- # | |
| quantizer = faiss.IndexFlatIP(xb.shape[1]) # inner‑product (cosine si normalisé) | |
| index = faiss.IndexIVFPQ(quantizer, xb.shape[1], 100, 8, 8) # nlist=100, m=8, nbits=8 | |
| # entraînement sur un sous‑échantillon (max 10 k vecteurs) | |
| rng = np.random.default_rng(0) | |
| train = xb[rng.choice(xb.shape[0], min(10_000, xb.shape[0]), replace=False)] | |
| index.train(train) | |
| index.add(xb) | |
| faiss.write_index(index, idx_path) | |
| meta.update({"index_type": "IVF_PQ", "nlist": 100, "m": 8, "nbits": 8}) | |
| with open(os.path.join(fx_dir, "meta.json"), "w", encoding="utf-8") as f: | |
| json.dump(meta, f, ensure_ascii=False, indent=2) | |
| def _load_faiss(fx_dir: str) -> faiss.Index: | |
| idx_path = os.path.join(fx_dir, "emb.faiss") | |
| if not os.path.isfile(idx_path): | |
| raise FileNotFoundError(f"FAISS index introuvable : {idx_path}") | |
| # mmap → l’index reste sur disque, la RAM n’est utilisée que pour les requêtes | |
| return faiss.read_index(idx_path, faiss.IO_FLAG_MMAP) | |
| def _tar_dir_to_bytes(dir_path: str) -> bytes: | |
| bio = io.BytesIO() | |
| with tarfile.open(fileobj=bio, mode="w:gz", compresslevel=9) as tar: | |
| tar.add(dir_path, arcname=os.path.basename(dir_path)) | |
| bio.seek(0) | |
| return bio.read() | |
| # --------------------------------------------------------------------------- # | |
| # THREAD‑POOL (asynchrone) | |
| # --------------------------------------------------------------------------- # | |
| EXECUTOR = ThreadPoolExecutor(max_workers=max(1, MAX_WORKERS)) | |
| LOG.info("ThreadPoolExecutor initialisé : max_workers=%s", MAX_WORKERS) | |
| def _do_index_job( | |
| st: JobState, | |
| files: List[Dict[str, str]], | |
| chunk_size: int, | |
| overlap: int, | |
| batch_size: int, | |
| store_text: bool, | |
| ) -> None: | |
| """ | |
| Pipeline complet : | |
| 1️⃣ Chunking | |
| 2️⃣ Embedding (dummy / st / hf) | |
| 3️⃣ Réduction de dimension (PCA) si besoin | |
| 4️⃣ Sauvegarde du dataset (texte optionnel) | |
| 5️⃣ Index FAISS quantisé + mmap | |
| """ | |
| try: | |
| base, ds_dir, fx_dir = _proj_dirs(st.project_id) | |
| # ------------------- 1️⃣ Chunking ------------------- | |
| _set_stage(st, "chunking") | |
| rows: List[Dict[str, Any]] = [] | |
| st.total_files = len(files) | |
| for f in files: | |
| path = (f.get("path") or "unknown").strip() | |
| txt = f.get("text") or "" | |
| chunks = _chunk_text(txt, size=chunk_size, overlap=overlap) | |
| for i, ck in enumerate(chunks): | |
| rows.append({"path": path, "text": ck, "chunk_id": i}) | |
| st.total_chunks = len(rows) | |
| _add_msg(st, f"Total chunks = {st.total_chunks}") | |
| # ------------------- 2️⃣ Embedding ------------------- | |
| _set_stage(st, "embedding") | |
| texts = [r["text"] for r in rows] | |
| if EMB_PROVIDER == "dummy": | |
| xb = _emb_dummy(texts, dim=EMB_DIM) | |
| elif EMB_PROVIDER == "st": | |
| xb = _emb_st(texts) | |
| else: | |
| xb = _emb_hf(texts) | |
| # ------------------- 3️⃣ Réduction PCA (si besoin) ------------------- | |
| if xb.shape[1] != EMB_DIM: | |
| from sklearn.decomposition import PCA | |
| pca = PCA(n_components=EMB_DIM, random_state=0) | |
| xb = pca.fit_transform(xb).astype("float32") | |
| LOG.info("Réduction PCA appliquée : %d → %d dimensions", xb.shape[1], EMB_DIM) | |
| st.embedded = xb.shape[0] | |
| _add_msg(st, f"Embeddings générés : {st.embedded}") | |
| # ------------------- 4️⃣ Sauvegarde dataset ------------------- | |
| _save_dataset(ds_dir, rows, store_text=store_text) | |
| _add_msg(st, f"Dataset sauvegardé dans {ds_dir}") | |
| # ------------------- 5️⃣ Index FAISS ------------------- | |
| _set_stage(st, "indexing") | |
| meta = { | |
| "dim": int(xb.shape[1]), | |
| "count": int(xb.shape[0]), | |
| "provider": EMB_PROVIDER, | |
| "model": EMB_MODEL if EMB_PROVIDER != "dummy" else None, | |
| } | |
| _save_faiss(fx_dir, xb, meta) | |
| st.indexed = int(xb.shape[0]) | |
| _add_msg(st, f"FAISS écrit sur {os.path.join(fx_dir, 'emb.faiss')}") | |
| _set_stage(st, "done") | |
| st.finished_at = time.time() | |
| except Exception as e: | |
| LOG.exception("Job %s échoué", st.job_id) | |
| st.errors.append(str(e)) | |
| _add_msg(st, f"❌ Exception : {e}") | |
| st.stage = "failed" | |
| st.finished_at = time.time() | |
| def _submit_job( | |
| project_id: str, | |
| files: List[Dict[str, str]], | |
| chunk_size: int, | |
| overlap: int, | |
| batch_size: int, | |
| store_text: bool, | |
| ) -> str: | |
| job_id = hashlib.sha1(f"{project_id}{time.time()}".encode()).hexdigest()[:12] | |
| st = JobState(job_id=job_id, project_id=project_id, stage="pending", messages=[]) | |
| JOBS[job_id] = st | |
| LOG.info("Job %s créé – %d fichiers", job_id, len(files)) | |
| EXECUTOR.submit( | |
| _do_index_job, | |
| st, | |
| files, | |
| chunk_size, | |
| overlap, | |
| batch_size, | |
| store_text, | |
| ) | |
| st.stage = "queued" | |
| return job_id | |
| # --------------------------------------------------------------------------- # | |
| # FASTAPI | |
| # --------------------------------------------------------------------------- # | |
| fastapi_app = FastAPI(title="remote-indexer-async", version="3.0.0") | |
| fastapi_app.add_middleware( | |
| CORSMiddleware, | |
| allow_origins=["*"], | |
| allow_credentials=True, | |
| allow_methods=["*"], | |
| allow_headers=["*"], | |
| ) | |
| class FileItem(BaseModel): | |
| path: str | |
| text: str | |
| class IndexRequest(BaseModel): | |
| project_id: str | |
| files: List[FileItem] | |
| chunk_size: int = 200 | |
| overlap: int = 20 | |
| batch_size: int = 32 | |
| store_text: bool = True # on peut désactiver via le payload ou env | |
| def health(): | |
| return { | |
| "ok": True, | |
| "service": "remote-indexer-async", | |
| "provider": EMB_PROVIDER, | |
| "model": EMB_MODEL if EMB_PROVIDER != "dummy" else None, | |
| "cache_root": os.getenv("CACHE_ROOT", "/tmp/.cache"), | |
| "workers": MAX_WORKERS, | |
| "data_root": DATA_ROOT, | |
| "emb_dim": EMB_DIM, | |
| } | |
| def index(req: IndexRequest): | |
| """ | |
| Lancement asynchrone : renvoie immédiatement un `job_id`. | |
| """ | |
| try: | |
| files = [fi.model_dump() for fi in req.files] | |
| job_id = _submit_job( | |
| project_id=req.project_id, | |
| files=files, | |
| chunk_size=int(req.chunk_size), | |
| overlap=int(req.overlap), | |
| batch_size=int(req.batch_size), | |
| store_text=bool(req.store_text), | |
| ) | |
| return {"job_id": job_id} | |
| except Exception as e: | |
| LOG.exception("Erreur soumission index") | |
| raise HTTPException(status_code=500, detail=str(e)) | |
| def status(job_id: str): | |
| st = JOBS.get(job_id) | |
| if not st: | |
| raise HTTPException(status_code=404, detail="job inconnu") | |
| return JSONResponse(st.model_dump()) | |
| class SearchRequest(BaseModel): | |
| project_id: str | |
| query: str | |
| k: int = 5 | |
| def search(req: SearchRequest): | |
| base, ds_dir, fx_dir = _proj_dirs(req.project_id) | |
| # Vérifier que l’index existe | |
| if not (os.path.isfile(os.path.join(fx_dir, "emb.faiss")) and | |
| os.path.isfile(os.path.join(ds_dir, "data.jsonl"))): | |
| raise HTTPException(status_code=409, detail="Index non prêt (reviens plus tard)") | |
| rows = _load_dataset(ds_dir) | |
| if not rows: | |
| raise HTTPException(status_code=404, detail="dataset introuvable") | |
| # Embedding de la requête (même provider que l’index) | |
| if EMB_PROVIDER == "dummy": | |
| q = _emb_dummy([req.query], dim=EMB_DIM)[0:1, :] | |
| elif EMB_PROVIDER == "st": | |
| q = _emb_st([req.query])[0:1, :] | |
| else: | |
| q = _emb_hf([req.query])[0:1, :] | |
| # Recherche FAISS (mmap) | |
| index = _load_faiss(fx_dir) | |
| if index.d != q.shape[1]: | |
| raise HTTPException( | |
| status_code=500, | |
| detail=f"dim incompatibles : index.d={index.d} vs query={q.shape[1]}", | |
| ) | |
| scores, ids = index.search(q, int(max(1, req.k))) | |
| ids = ids[0].tolist() | |
| scores = scores[0].tolist() | |
| out = [] | |
| for idx, sc in zip(ids, scores): | |
| if idx < 0 or idx >= len(rows): | |
| continue | |
| r = rows[idx] | |
| out.append({"path": r.get("path"), "text": r.get("text"), "score": float(sc)}) | |
| return {"results": out} | |
| # --------------------------------------------------------------------------- # | |
| # EXPORT ARTIFACTS (gzip) | |
| # --------------------------------------------------------------------------- # | |
| def download_dataset(project_id: str): | |
| _, ds_dir, _ = _proj_dirs(project_id) | |
| if not os.path.isdir(ds_dir): | |
| raise HTTPException(status_code=404, detail="Dataset introuvable") | |
| buf = _tar_dir_to_bytes(ds_dir) | |
| hdr = {"Content-Disposition": f'attachment; filename="{project_id}_dataset.tgz"'} | |
| return StreamingResponse(io.BytesIO(buf), media_type="application/gzip", headers=hdr) | |
| def download_faiss(project_id: str): | |
| _, _, fx_dir = _proj_dirs(project_id) | |
| if not os.path.isdir(fx_dir): | |
| raise HTTPException(status_code=404, detail="FAISS introuvable") | |
| buf = _tar_dir_to_bytes(fx_dir) | |
| hdr = {"Content-Disposition": f'attachment; filename="{project_id}_faiss.tgz"'} | |
| return StreamingResponse(io.BytesIO(buf), media_type="application/gzip", headers=hdr) | |
| # --------------------------------------------------------------------------- # | |
| # GRADIO UI (facultatif – test rapide) | |
| # --------------------------------------------------------------------------- # | |
| def _ui_index(project_id: str, sample_text: str): | |
| files = [{"path": "sample.txt", "text": sample_text}] | |
| try: | |
| req = IndexRequest(project_id=project_id, files=[FileItem(**f) for f in files]) | |
| except Exception as e: | |
| return f"❌ Validation : {e}" | |
| try: | |
| res = index(req) | |
| return f"✅ Job lancé : {res['job_id']}" | |
| except Exception as e: | |
| return f"❌ Erreur : {e}" | |
| def _ui_search(project_id: str, query: str, k: int): | |
| try: | |
| res = search(SearchRequest(project_id=project_id, query=query, k=int(k))) | |
| return json.dumps(res, ensure_ascii=False, indent=2) | |
| except Exception as e: | |
| return f"❌ Erreur : {e}" | |
| with gr.Blocks(title="Remote Indexer (Async – Optimisé)", analytics_enabled=False) as ui: | |
| gr.Markdown("## Remote Indexer — Async (FAISS quantisé, mmap, texte optionnel)") | |
| with gr.Row(): | |
| pid = gr.Textbox(label="Project ID", value="DEMO") | |
| txt = gr.Textbox(label="Texte d’exemple", lines=4, value="Alpha bravo charlie delta echo foxtrot.") | |
| btn_idx = gr.Button("Lancer index (sample)") | |
| out_idx = gr.Textbox(label="Résultat") | |
| btn_idx.click(_ui_index, inputs=[pid, txt], outputs=[out_idx]) | |
| with gr.Row(): | |
| q = gr.Textbox(label="Query", value="alpha") | |
| k = gr.Slider(1, 20, value=5, step=1, label="Top‑K") | |
| btn_q = gr.Button("Rechercher") | |
| out_q = gr.Code(label="Résultats") | |
| btn_q.click(_ui_search, inputs=[pid, q, k], outputs=[out_q]) | |
| # Monte l’UI Gradio sur le même serveur FastAPI | |
| fastapi_app = gr.mount_gradio_app(fastapi_app, ui, path="/ui") | |
| # --------------------------------------------------------------------------- # | |
| # MAIN | |
| # --------------------------------------------------------------------------- # | |
| if __name__ == "__main__": | |
| import uvicorn | |
| LOG.info("Démarrage Uvicorn – port %s – UI disponible à /ui", PORT) | |
| uvicorn.run(fastapi_app, host="0.0.0.0", port=PORT) |