Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from sentence_transformers import CrossEncoder | |
| import torch | |
| import requests | |
| import ast | |
| import os | |
| # ------------------------------- | |
| # MODELS | |
| # ------------------------------- | |
| CROSS_ENCODER_RERANK = "cross-encoder/ms-marco-MiniLM-L-12-v2" | |
| JINA_MODEL = "jina-reranker-m0" | |
| JINA_API_KEY = os.getenv("JINA_API_KEY") # set in HF Space settings | |
| JINA_ENDPOINT = "https://api.jina.ai/v1/rerank" | |
| NV_MODEL = "NV-RerankQA-Mistral-4B-v3" | |
| HF_API_KEY = os.getenv("HF_API_KEY") # set in HF Space settings | |
| # ------------------------------- | |
| # Load models | |
| # ------------------------------- | |
| ce_rerank = CrossEncoder(CROSS_ENCODER_RERANK) | |
| # ------------------------------- | |
| # Pipeline Function | |
| # ------------------------------- | |
| def evaluate_models(query, docs_str): | |
| try: | |
| docs = ast.literal_eval(docs_str) | |
| assert isinstance(docs, list), "Input must be a Python list of strings" | |
| except Exception as e: | |
| return {"Error": f"β οΈ Error parsing documents list: {e}"} | |
| results = {} | |
| # 1. CrossEncoder reranker (MS MARCO) | |
| ce_rerank_scores = ce_rerank.predict([(query, d) for d in docs]) | |
| ce_rerank_scores = [round(torch.sigmoid(torch.tensor(s)).item(), 4) for s in ce_rerank_scores] | |
| results["CrossEncoder (MS MARCO)"] = ce_rerank_scores | |
| # 2. Jina Reranker | |
| if JINA_API_KEY: | |
| headers = {"Authorization": f"Bearer {JINA_API_KEY}", "Content-Type": "application/json"} | |
| payload = {"model": JINA_MODEL, "query": query, "documents": docs} | |
| try: | |
| r = requests.post(JINA_ENDPOINT, headers=headers, json=payload, timeout=30) | |
| r.raise_for_status() | |
| jina_scores = [0] * len(docs) | |
| for res in r.json()["results"]: | |
| jina_scores[res["index"]] = round(res["relevance_score"], 4) | |
| results["Jina Reranker"] = jina_scores | |
| except Exception as e: | |
| results["Jina Reranker"] = [f"Error: {e}"] | |
| else: | |
| results["Jina Reranker"] = ["Error: Missing JINA_API_KEY"] | |
| # 3. NV RerankQA Mistral-4B-v3 (HF Inference API) | |
| if HF_API_KEY: | |
| try: | |
| hf_endpoint = f"https://api-inference.huggingface.co/models/{NV_MODEL}" | |
| headers = {"Authorization": f"Bearer {HF_API_KEY}"} | |
| payload = {"inputs": {"query": query, "documents": docs}} | |
| r = requests.post(hf_endpoint, headers=headers, json=payload, timeout=60) | |
| r.raise_for_status() | |
| nv_scores = [round(res["score"], 4) for res in r.json()] | |
| results["NV-RerankQA-Mistral-4B-v3"] = nv_scores | |
| except Exception as e: | |
| results["NV-RerankQA-Mistral-4B-v3"] = [f"Error: {e}"] | |
| else: | |
| results["NV-RerankQA-Mistral-4B-v3"] = ["Error: Missing HF_API_KEY"] | |
| return results | |
| # ------------------------------- | |
| # Gradio UI | |
| # ------------------------------- | |
| with gr.Blocks(theme=gr.themes.Soft()) as demo: | |
| gr.Markdown("## π Ranking Battle (Aligned Scores)\nOutputs only **scores aligned to input docs** from 3 models.") | |
| query = gr.Textbox(label="Query", lines=2, placeholder="Enter your search query...") | |
| docs = gr.Textbox( | |
| label="Documents (Python list)", | |
| lines=6, | |
| placeholder='Example: [\"Doc one text\", \"Doc two text\", \"Doc three text\"]' | |
| ) | |
| out = gr.JSON(label="Model Scores") | |
| btn = gr.Button("Evaluate π") | |
| btn.click(evaluate_models, inputs=[query, docs], outputs=out) | |
| demo.launch() | |