Spaces:
Sleeping
Sleeping
| import os, re, json, traceback, pathlib | |
| from functools import lru_cache | |
| import gradio as gr | |
| import torch | |
| import regex as re2 # pip install regex | |
| from settings import SNAPSHOT_PATH, PERSIST_CONTENT | |
| from audit_log import log_event, hash_summary | |
| from privacy import redact_text | |
| # ---------- Environment / cache (Spaces-safe, writable) ---------- | |
| HOME = pathlib.Path.home() # /home/user on Spaces | |
| HF_HOME = str(HOME / ".cache" / "huggingface") | |
| HF_HUB_CACHE = str(HOME / ".cache" / "huggingface" / "hub") | |
| HF_TRANSFORMERS = str(HOME / ".cache" / "huggingface" / "transformers") | |
| ST_HOME = str(HOME / ".cache" / "sentence-transformers") | |
| GRADIO_TMP = str(HOME / "app" / "gradio") # you can switch to "/tmp/gradio" if preferred | |
| GRADIO_CACHE = GRADIO_TMP | |
| os.environ.setdefault("HF_HOME", HF_HOME) | |
| os.environ.setdefault("HF_HUB_CACHE", HF_HUB_CACHE) | |
| os.environ.setdefault("TRANSFORMERS_CACHE", HF_TRANSFORMERS) # deprecated warning is harmless | |
| os.environ.setdefault("SENTENCE_TRANSFORMERS_HOME", ST_HOME) | |
| os.environ.setdefault("GRADIO_TEMP_DIR", GRADIO_TMP) | |
| os.environ.setdefault("GRADIO_CACHE_DIR", GRADIO_CACHE) | |
| # Disable experimental xet; prefer stable transfer | |
| os.environ.setdefault("HF_HUB_ENABLE_XET", "0") | |
| os.environ.setdefault("HF_HUB_ENABLE_HF_TRANSFER", "1") | |
| for p in [HF_HOME, HF_HUB_CACHE, HF_TRANSFORMERS, ST_HOME, GRADIO_TMP, GRADIO_CACHE]: | |
| try: | |
| os.makedirs(p, exist_ok=True) | |
| except Exception: | |
| pass | |
| # Optional Cohere | |
| try: | |
| import cohere | |
| _HAS_COHERE = True | |
| except Exception: | |
| _HAS_COHERE = False | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| from huggingface_hub import login | |
| from safety import safety_filter, refusal_reply | |
| from retriever import init_retriever, retrieve_context | |
| from decision_math import compute_operational_numbers | |
| from prompt_templates import build_system_preamble | |
| from upload_ingest import extract_text_from_files | |
| from session_rag import SessionRAG | |
| from mdsi_analysis import capacity_projection, cost_estimate, outcomes_summary | |
| # ---------- Config ---------- | |
| MODEL_ID = os.getenv("MODEL_ID", "microsoft/Phi-3-mini-4k-instruct") # local fallback | |
| HF_TOKEN = os.getenv("HUGGINGFACE_HUB_TOKEN") or os.getenv("HF_TOKEN") | |
| COHERE_API_KEY = os.getenv("COHERE_API_KEY") | |
| USE_HOSTED_COHERE = bool(COHERE_API_KEY and _HAS_COHERE) | |
| MAX_NEW_TOKENS = int(os.getenv("MAX_NEW_TOKENS", "512")) | |
| # ---------- System Master (two-phase, LLM-only behavior) ---------- | |
| SYSTEM_MASTER = """ | |
| SYSTEM ROLE (fixed, always on) | |
| You are ClarityOps, a medical analytics system that interacts only via this chat. | |
| Absolute rules: | |
| - Use ONLY information provided in this conversation (scenario text + uploaded files). | |
| - Never invent data. If something required is missing after clarifications, output the literal token: INSUFFICIENT_DATA. | |
| - Always run in TWO PHASES: | |
| Phase 1: Ask up to 5 concise clarification questions, grouped by category (Prioritization, Capacity, Cost, Clinical, Recommendations). Then STOP and WAIT. | |
| Phase 2: After answers are provided, produce the final structured analysis exactly in the required format. | |
| Core behavior: | |
| - Read and synthesize any user-uploaded files (e.g., CSV/XLSX/PDF) relevant to the scenario. | |
| - Prefer analytics/longitudinal recommendations (risk targeting, follow-up, clustering) over generic ops advice. | |
| - Show all calculations explicitly for capacity and costs (e.g., “6 teams × 8 clients/day × 60 days = 2,880”). | |
| - Use correct clinical units and plausible ranges. | |
| - Include a brief “Provenance” section mapping each key output to scenario text, files, and/or clarified answers. | |
| Medical guardrails (always apply): | |
| - Units: BP in mmHg, A1c in %, BMI in kg/m², Total Cholesterol in mmol/L (or as provided), Percentages in %. | |
| - Plausible ranges: A1c 3–20 %, SBP 60–250 mmHg, DBP 30–150 mmHg, BMI 10–70 kg/m², Total Chol 2–12 mmol/L. | |
| - Privacy: avoid PHI; aggregate only; apply small-cell suppression where cohort < 10 (describe at a higher level). | |
| - When data includes mixed or ambiguous indicators, ask to confirm preferred indicators (e.g., obesity/metabolic syndrome vs self-reported diabetes). | |
| Formatting hard rules: | |
| - Phase 1 output MUST include the header line: “Clarification Questions” | |
| - Phase 2 output MUST include the header line: “Structured Analysis” | |
| - Phase 2 MUST follow this exact section order: | |
| 1. Prioritization | |
| 2. Capacity | |
| 3. Cost | |
| 4. Clinical Benefits | |
| 5. ClarityOps Top 3 Recommendations | |
| (Include a short Provenance block at the end.) | |
| """.strip() | |
| # ---------- Helpers ---------- | |
| def pick_dtype_and_map(): | |
| if torch.cuda.is_available(): | |
| return torch.float16, "auto" | |
| if torch.backends.mps.is_available(): | |
| return torch.float16, {"": "mps"} | |
| return torch.float32, "cpu" | |
| def is_identity_query(message, history): | |
| patterns = [ | |
| r"\bwho\s+are\s+you\b", r"\bwhat\s+are\s+you\b", r"\bwhat\s+is\s+your\s+name\b", | |
| r"\bwho\s+is\s+this\b", r"\bidentify\s+yourself\b", r"\btell\s+me\s+about\s+yourself\b", | |
| r"\bdescribe\s+yourself\b", r"\band\s+you\s*\?\b", r"\byour\s+name\b", | |
| r"\bwho\s+am\s+i\s+chatting\s+with\b", | |
| ] | |
| def match(t): return any(re.search(p, (t or "").strip().lower()) for p in patterns) | |
| if match(message): return True | |
| if history: | |
| last_user = history[-1][0] if isinstance(history[-1], (list, tuple)) else None | |
| if match(last_user): return True | |
| return False | |
| def _iter_user_assistant(history): | |
| for item in (history or []): | |
| if isinstance(item, (list, tuple)): | |
| u = item[0] if len(item) > 0 else "" | |
| a = item[1] if len(item) > 1 else "" | |
| yield u, a | |
| def _sanitize_text(s: str) -> str: | |
| if not isinstance(s, str): | |
| return s | |
| return re2.sub(r'[\p{C}--[\n\t]]+', '', s) | |
| def _history_to_prompt(message, history): | |
| parts = [f"System: {SYSTEM_MASTER}"] | |
| for u, a in _iter_user_assistant(history): | |
| if u: parts.append(f"User: {u}") | |
| if a: parts.append(f"Assistant: {a}") | |
| parts.append(f"User: {message}") | |
| parts.append("Assistant:") | |
| return "\n".join(parts) | |
| # ---------- Cohere first ---------- | |
| def cohere_chat(message, history): | |
| if not USE_HOSTED_COHERE: | |
| return None | |
| try: | |
| client = cohere.Client(api_key=COHERE_API_KEY) | |
| prompt = _history_to_prompt(message, history) | |
| resp = client.chat( | |
| model="command-r7b-12-2024", | |
| message=prompt, | |
| temperature=0.3, | |
| max_tokens=MAX_NEW_TOKENS, | |
| ) | |
| if hasattr(resp, "text") and resp.text: return resp.text.strip() | |
| if hasattr(resp, "reply") and resp.reply: return resp.reply.strip() | |
| if hasattr(resp, "generations") and resp.generations: return resp.generations[0].text.strip() | |
| return None | |
| except Exception: | |
| return None | |
| # ---------- Local model (HF) ---------- | |
| def load_local_model(): | |
| if not HF_TOKEN: | |
| raise RuntimeError("HUGGINGFACE_HUB_TOKEN is not set.") | |
| login(token=HF_TOKEN, add_to_git_credential=False) | |
| dtype, device_map = pick_dtype_and_map() | |
| tok = AutoTokenizer.from_pretrained( | |
| MODEL_ID, token=HF_TOKEN, use_fast=True, model_max_length=8192, | |
| padding_side="left", trust_remote_code=True, | |
| cache_dir=os.environ.get("TRANSFORMERS_CACHE") | |
| ) | |
| try: | |
| mdl = AutoModelForCausalLM.from_pretrained( | |
| MODEL_ID, token=HF_TOKEN, device_map=device_map, | |
| low_cpu_mem_usage=True, torch_dtype=dtype, trust_remote_code=True, | |
| cache_dir=os.environ.get("TRANSFORMERS_CACHE") | |
| ) | |
| except Exception: | |
| mdl = AutoModelForCausalLM.from_pretrained( | |
| MODEL_ID, token=HF_TOKEN, | |
| low_cpu_mem_usage=True, torch_dtype=dtype, trust_remote_code=True, | |
| cache_dir=os.environ.get("TRANSFORMERS_CACHE") | |
| ) | |
| mdl.to("cuda" if torch.cuda.is_available() else "cpu") | |
| if mdl.config.eos_token_id is None and tok.eos_token_id is not None: | |
| mdl.config.eos_token_id = tok.eos_token_id | |
| return mdl, tok | |
| def build_inputs(tokenizer, message, history): | |
| msgs = [{"role": "system", "content": SYSTEM_MASTER}] | |
| for u, a in _iter_user_assistant(history): | |
| if u: msgs.append({"role": "user", "content": u}) | |
| if a: msgs.append({"role": "assistant", "content": a}) | |
| msgs.append({"role": "user", "content": message}) | |
| return tokenizer.apply_chat_template( | |
| msgs, tokenize=True, add_generation_prompt=True, return_tensors="pt" | |
| ) | |
| def local_generate(model, tokenizer, input_ids, max_new_tokens=MAX_NEW_TOKENS): | |
| input_ids = input_ids.to(model.device) | |
| with torch.no_grad(): | |
| out = model.generate( | |
| input_ids=input_ids, max_new_tokens=max_new_tokens, | |
| do_sample=True, temperature=0.3, top_p=0.9, | |
| repetition_penalty=1.15, | |
| pad_token_id=tokenizer.eos_token_id, | |
| eos_token_id=tokenizer.eos_token_id, | |
| ) | |
| gen_only = out[0, input_ids.shape[-1]:] | |
| return tokenizer.decode(gen_only, skip_special_tokens=True).strip() | |
| # ---------- Snapshot, retriever, RAG ---------- | |
| def _load_snapshot(path=SNAPSHOT_PATH): | |
| try: | |
| with open(path, "r", encoding="utf-8") as f: | |
| return json.load(f) | |
| except Exception: | |
| return { | |
| "timestamp": None, "beds_total": 400, "staffed_ratio": 1.0, "occupied_pct": 0.97, | |
| "ed_census": 62, "ed_admits_waiting": 19, "avg_ed_wait_hours": 8, | |
| "discharge_ready_today": 11, "discharge_barriers": {"allied_health": 7, "placement": 4}, | |
| "rn_shortfall": {"med_ward_A": 1, "med_ward_B": 1}, | |
| "forecast_admits_next_24h": {"respiratory": 14, "other": 9}, | |
| "isolation_needs_waiting": {"contact": 3, "airborne": 1}, "telemetry_needed_waiting": 5 | |
| } | |
| init_retriever() | |
| _session_rag = SessionRAG() | |
| def _mdsi_block(): | |
| base_capacity = capacity_projection(18, 48, 6) | |
| cons_capacity = capacity_projection(12, 48, 6) | |
| opt_capacity = capacity_projection(24, 48, 6) | |
| cost_1200 = cost_estimate(1200, 74.0, 75000.0) | |
| outcomes = outcomes_summary() | |
| return json.dumps({ | |
| "capacity_projection": {"conservative": cons_capacity, "base": base_capacity, "optimistic": opt_capacity}, | |
| "cost_for_1200": cost_1200, | |
| "outcomes_summary": outcomes | |
| }, indent=2) | |
| # ---------- Core chat logic (two-phase) ---------- | |
| def clarityops_reply(user_msg, history, tz, uploaded_files_paths, awaiting_answers=False): | |
| """ | |
| awaiting_answers: | |
| - False: Phase 1 -> generate clarification questions and WAIT | |
| - True: Phase 2 -> consume clarifications and produce structured analysis | |
| """ | |
| try: | |
| log_event("user_message", None, {"sizes": {"chars": len(user_msg or "")}}) | |
| # Safety (input) | |
| safe_in, blocked_in, reason_in = safety_filter(user_msg, mode="input") | |
| if blocked_in: | |
| ans = refusal_reply(reason_in) | |
| return history + [(user_msg, ans)], awaiting_answers | |
| # Identity short-circuit | |
| if is_identity_query(safe_in, history): | |
| ans = "I am ClarityOps, your strategic decision making AI partner." | |
| return history + [(user_msg, ans)], awaiting_answers | |
| # Ingest uploads (text + artifacts like CSV headers) | |
| if uploaded_files_paths: | |
| ing = extract_text_from_files(uploaded_files_paths) | |
| chunks = ing.get("chunks", []) if isinstance(ing, dict) else (ing or []) | |
| artifacts = ing.get("artifacts", []) if isinstance(ing, dict) else [] | |
| if chunks: | |
| _session_rag.add_docs(chunks) | |
| if artifacts: | |
| _session_rag.register_artifacts(artifacts) | |
| log_event("uploads_added", None, {"chunks": len(chunks), "artifacts": len(artifacts)}) | |
| # Columns helper | |
| if re.search(r"\b(columns?|headers?)\b", (safe_in or "").lower()): | |
| cols = _session_rag.get_latest_csv_columns() | |
| if cols: | |
| return history + [(user_msg, "Here are the column names from your most recent CSV upload:\n\n- " + "\n- ".join(cols))], awaiting_answers | |
| # Session retrieval to enrich the system preamble | |
| session_snips = "\n---\n".join(_session_rag.retrieve( | |
| "diabetes screening Indigenous Métis mobile program cost throughput outcomes logistics", | |
| k=6 | |
| )) | |
| snapshot = _load_snapshot() | |
| policy_context = retrieve_context( | |
| "mobile diabetes screening Indigenous community outreach cultural safety data governance outcomes" | |
| ) | |
| computed = compute_operational_numbers(snapshot) | |
| user_lower = (safe_in or "").lower() | |
| mdsi_extra = _mdsi_block() if ("diabetes" in user_lower or "mdsi" in user_lower or "mobile screening" in user_lower) else "" | |
| scenario_block = safe_in if len((safe_in or "")) > 0 else "" | |
| system_preamble = build_system_preamble( | |
| snapshot=snapshot, | |
| policy_context=policy_context, | |
| computed_numbers=computed, | |
| scenario_text=scenario_block + (f"\n\nExecutive Pre-Computed Blocks:\n{mdsi_extra}" if mdsi_extra else ""), | |
| session_snips=session_snips | |
| ) | |
| # Phase directive | |
| if not awaiting_answers: | |
| phase_directive = ( | |
| "\n\n[INSTRUCTION TO MODEL]\n" | |
| "Produce **Phase 1** only: output a header 'Clarification Questions' and ask up to 5 concise, grouped questions " | |
| "(Prioritization, Capacity, Cost, Clinical, Recommendations). Then STOP and WAIT.\n" | |
| ) | |
| else: | |
| phase_directive = ( | |
| "\n\n[INSTRUCTION TO MODEL]\n" | |
| "Produce **Phase 2** only: output a header 'Structured Analysis' and follow the exact section order " | |
| "(Prioritization, Capacity, Cost, Clinical Benefits, ClarityOps Top 3 Recommendations). " | |
| "Use uploaded files + the user's latest answers as authoritative. Show calculations, units, and a brief Provenance.\n" | |
| ) | |
| augmented_user = SYSTEM_MASTER + "\n\n" + system_preamble + "\n\nUser message:\n" + safe_in + phase_directive | |
| # Call LLM | |
| out = cohere_chat(augmented_user, history) | |
| if not out: | |
| model, tokenizer = load_local_model() | |
| inputs = build_inputs(tokenizer, augmented_user, history) | |
| out = local_generate(model, tokenizer, inputs, max_new_tokens=MAX_NEW_TOKENS) | |
| # Clean + sanitize | |
| if isinstance(out, str): | |
| for tag in ("Assistant:", "System:", "User:"): | |
| if out.startswith(tag): | |
| out = out[len(tag):].strip() | |
| out = _sanitize_text(out) | |
| # Safety (output) | |
| safe_out, blocked_out, reason_out = safety_filter(out, mode="output") | |
| if blocked_out: | |
| safe_out = refusal_reply(reason_out) | |
| # Flip phase state based on headers | |
| new_awaiting = awaiting_answers | |
| low = safe_out.lower() | |
| if not awaiting_answers and "clarification questions" in low: | |
| new_awaiting = True | |
| elif awaiting_answers and "structured analysis" in low: | |
| new_awaiting = False | |
| log_event("assistant_reply", None, { | |
| **hash_summary("prompt", augmented_user if not PERSIST_CONTENT else ""), | |
| **hash_summary("reply", safe_out if not PERSIST_CONTENT else ""), | |
| "awaiting_next_phase": new_awaiting | |
| }) | |
| return history + [(user_msg, safe_out)], new_awaiting | |
| except Exception as e: | |
| err = f"Error: {e}" | |
| try: | |
| traceback.print_exc() | |
| except Exception: | |
| pass | |
| return history + [(user_msg, err)], awaiting_answers | |
| # ---------- Theme & CSS ---------- | |
| theme = gr.themes.Soft(primary_hue="teal", neutral_hue="slate", radius_size=gr.themes.sizes.radius_lg) | |
| custom_css = """ | |
| :root { --brand-bg: #e6f7f8; --brand-accent: #0d9488; --brand-text: #0f172a; --brand-text-light: #ffffff; } | |
| .gradio-container { background: var(--brand-bg); } | |
| h1 { color: var(--brand-text); font-weight: 700; font-size: 28px !important; } | |
| .chatbot header, .chatbot .label, .chatbot .label-wrap, .chatbot .top, .chatbot .header, .chatbot > .wrap > header { display: none !important; } | |
| .message.user, .message.bot { background: var(--brand-accent) !important; color: var(--brand-text-light) !important; border-radius: 12px !important; padding: 8px 12px !important; } | |
| textarea, input, .gr-input { border-radius: 12px !important; } | |
| /* Centered handshake overlay */ | |
| #handshake-overlay { | |
| position: absolute; | |
| z-index: 50; | |
| top: 50%; | |
| left: 50%; | |
| transform: translate(-50%, -50%); | |
| background: rgba(13, 148, 136, 0.92); | |
| color: #fff; | |
| padding: 18px 22px; | |
| border-radius: 14px; | |
| font-size: 16px; | |
| max-width: 720px; | |
| text-align: center; | |
| box-shadow: 0 10px 24px rgba(0,0,0,0.2); | |
| } | |
| #handshake-overlay.hidden { display: none; } | |
| #chat-container { position: relative; } | |
| """ | |
| # ---------- UI ---------- | |
| with gr.Blocks(theme=theme, css=custom_css, analytics_enabled=False) as demo: | |
| gr.Markdown("# ClarityOps Augmented Decision AI") | |
| with gr.Column(elem_id="chat-container"): | |
| chat = gr.Chatbot(label="", show_label=False, height=700) | |
| handshake = gr.HTML( | |
| value='<div id="handshake-overlay">ClarityOps loaded. Paste your scenario and attach files. I’ll ask up to 5 clarifications, then produce the structured analysis</div>' | |
| ) | |
| with gr.Row(): | |
| uploads = gr.Files( | |
| label="Upload docs/images (PDF, DOCX, CSV, PNG, JPG)", | |
| file_types=["file"], file_count="multiple", height=68 | |
| ) | |
| with gr.Row(): | |
| msg = gr.Textbox( | |
| label="", | |
| show_label=False, | |
| placeholder="Paste your scenario here (attach files below). ClarityOps will ask clarifications first.", | |
| scale=10 | |
| ) | |
| send = gr.Button("Send", scale=1) | |
| clear = gr.Button("Clear chat", scale=1) | |
| state_history = gr.State(value=[]) | |
| state_uploaded = gr.State(value=[]) | |
| state_awaiting = gr.State(value=False) # False -> Phase 1 next; True -> awaiting answers for Phase 2 | |
| def _store_uploads(files, current): | |
| paths = [] | |
| for f in (files or []): | |
| paths.append(getattr(f, "name", None) or f) | |
| return (current or []) + paths | |
| uploads.change(fn=_store_uploads, inputs=[uploads, state_uploaded], outputs=state_uploaded) | |
| def _on_send(user_msg, history, up_paths, awaiting): | |
| hide_overlay_js = gr.update(value='<div id="handshake-overlay" class="hidden"></div>') | |
| try: | |
| if not user_msg or not user_msg.strip(): | |
| return history, "", history, awaiting, hide_overlay_js | |
| new_history, new_awaiting = clarityops_reply( | |
| user_msg.strip(), history or [], None, up_paths or [], awaiting_answers=awaiting | |
| ) | |
| return new_history, "", new_history, new_awaiting, hide_overlay_js | |
| except Exception as e: | |
| err = f"Error: {e}" | |
| try: traceback.print_exc() | |
| except Exception: pass | |
| new_hist = (history or []) + [(user_msg or "", err)] | |
| return new_hist, "", new_hist, awaiting, hide_overlay_js | |
| send.click(_on_send, inputs=[msg, state_history, state_uploaded, state_awaiting], | |
| outputs=[chat, msg, state_history, state_awaiting, handshake], | |
| concurrency_limit=2, queue=True) | |
| msg.submit(_on_send, inputs=[msg, state_history, state_uploaded, state_awaiting], | |
| outputs=[chat, msg, state_history, state_awaiting, handshake], | |
| concurrency_limit=2, queue=True) | |
| def _on_clear(): | |
| return [], "", [], False, '<div id="handshake-overlay">ClarityOps loaded. Paste your scenario and attach files. I’ll ask up to 5 clarifications, then produce the structured analysis</div>' | |
| clear.click(_on_clear, None, [chat, msg, state_history, state_awaiting, handshake]) | |
| if __name__ == "__main__": | |
| port = int(os.environ.get("PORT", "7860")) | |
| demo.launch(server_name="0.0.0.0", server_port=port, show_api=False, max_threads=8) | |