Spaces:
Running
Running
Initial deployment with frontend architecture
Browse files- .dockerignore +51 -0
- Dockerfile +25 -0
- api/__pycache__/database.cpython-314.pyc +0 -0
- api/__pycache__/detector.cpython-314.pyc +0 -0
- api/__pycache__/main.cpython-314.pyc +0 -0
- api/__pycache__/models.cpython-314.pyc +0 -0
- api/database.py +35 -0
- api/detector.py +177 -0
- api/main.py +65 -0
- api/models.py +19 -0
- frontend/app.py +231 -0
- requirements.txt +21 -0
- run.sh +12 -0
.dockerignore
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ----------------------------------
|
| 2 |
+
# 1. Sensitive Information & Secrets
|
| 3 |
+
# ----------------------------------
|
| 4 |
+
.env
|
| 5 |
+
.env.*
|
| 6 |
+
*.pem
|
| 7 |
+
*.key
|
| 8 |
+
secret*.json
|
| 9 |
+
|
| 10 |
+
# ----------------------------------
|
| 11 |
+
# 2. Virtual Environments
|
| 12 |
+
# ----------------------------------
|
| 13 |
+
# CRITICAL: Do not copy your Windows environment into a Linux container!
|
| 14 |
+
env/
|
| 15 |
+
venv/
|
| 16 |
+
.venv/
|
| 17 |
+
env.bak/
|
| 18 |
+
|
| 19 |
+
# ----------------------------------
|
| 20 |
+
# 3. Local Databases
|
| 21 |
+
# ----------------------------------
|
| 22 |
+
# Prevents uploading your local test database. Let the container make a fresh one.
|
| 23 |
+
*.db
|
| 24 |
+
*.sqlite3
|
| 25 |
+
halluciguard.db
|
| 26 |
+
|
| 27 |
+
# ----------------------------------
|
| 28 |
+
# 4. Python Caches & Build Artifacts
|
| 29 |
+
# ----------------------------------
|
| 30 |
+
__pycache__/
|
| 31 |
+
*.py[cod]
|
| 32 |
+
*$py.class
|
| 33 |
+
*.so
|
| 34 |
+
.pytest_cache/
|
| 35 |
+
build/
|
| 36 |
+
dist/
|
| 37 |
+
*.egg-info/
|
| 38 |
+
|
| 39 |
+
# ----------------------------------
|
| 40 |
+
# 5. IDEs, Git, & Notebooks
|
| 41 |
+
# ----------------------------------
|
| 42 |
+
.vscode/
|
| 43 |
+
.idea/
|
| 44 |
+
.git/
|
| 45 |
+
.gitignore
|
| 46 |
+
*.swp
|
| 47 |
+
.DS_Store
|
| 48 |
+
|
| 49 |
+
# Jupyter (Ignore checkpoints and your research notebooks to keep the image small)
|
| 50 |
+
.ipynb_checkpoints/
|
| 51 |
+
*.ipynb
|
Dockerfile
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Use an official Python runtime as a parent image
|
| 2 |
+
FROM python:3.10-slim
|
| 3 |
+
|
| 4 |
+
# Hugging Face requires the app to run as user '1000'
|
| 5 |
+
RUN useradd -m -u 1000 user
|
| 6 |
+
USER user
|
| 7 |
+
ENV PATH="/home/user/.local/bin:$PATH"
|
| 8 |
+
|
| 9 |
+
# Set the working directory
|
| 10 |
+
WORKDIR /app
|
| 11 |
+
|
| 12 |
+
# Copy the current directory contents into the container
|
| 13 |
+
COPY --chown=user . /app
|
| 14 |
+
|
| 15 |
+
# Install dependencies
|
| 16 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 17 |
+
|
| 18 |
+
# Make the startup script executable
|
| 19 |
+
RUN chmod +x run.sh
|
| 20 |
+
|
| 21 |
+
# Expose the port Hugging Face looks for
|
| 22 |
+
EXPOSE 7860
|
| 23 |
+
|
| 24 |
+
# Command to run the dual-server script
|
| 25 |
+
CMD ["./run.sh"]
|
api/__pycache__/database.cpython-314.pyc
ADDED
|
Binary file (1.07 kB). View file
|
|
|
api/__pycache__/detector.cpython-314.pyc
ADDED
|
Binary file (10.1 kB). View file
|
|
|
api/__pycache__/main.cpython-314.pyc
ADDED
|
Binary file (3.7 kB). View file
|
|
|
api/__pycache__/models.cpython-314.pyc
ADDED
|
Binary file (1.15 kB). View file
|
|
|
api/database.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from dotenv import load_dotenv
|
| 3 |
+
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession
|
| 4 |
+
from sqlalchemy.orm import declarative_base, sessionmaker
|
| 5 |
+
|
| 6 |
+
# Load the .env file
|
| 7 |
+
load_dotenv()
|
| 8 |
+
|
| 9 |
+
# Grab the URL, with a fallback just in case
|
| 10 |
+
# --- COMMENT OUT YOUR POSTGRES URL ---
|
| 11 |
+
# DATABASE_URL = os.getenv("DATABASE_URL", "postgresql+asyncpg://postgres:postgres@localhost:5432/halluciguard")
|
| 12 |
+
|
| 13 |
+
# --- ADD THE SQLITE URL ---
|
| 14 |
+
DATABASE_URL = "sqlite+aiosqlite:///./halluciguard.db"
|
| 15 |
+
|
| 16 |
+
# Update your engine to include connect_args for SQLite
|
| 17 |
+
engine = create_async_engine(
|
| 18 |
+
DATABASE_URL,
|
| 19 |
+
echo=False,
|
| 20 |
+
connect_args={"check_same_thread": False} # Required for SQLite with FastAPI
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
# Create a session factory
|
| 24 |
+
AsyncSessionLocal = sessionmaker(
|
| 25 |
+
bind=engine,
|
| 26 |
+
class_=AsyncSession,
|
| 27 |
+
expire_on_commit=False
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
Base = declarative_base()
|
| 31 |
+
|
| 32 |
+
# Dependency to yield database sessions for our FastAPI routes
|
| 33 |
+
async def get_db():
|
| 34 |
+
async with AsyncSessionLocal() as session:
|
| 35 |
+
yield session
|
api/detector.py
ADDED
|
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import re
|
| 3 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
| 4 |
+
|
| 5 |
+
# ── Configuration Constants ──
|
| 6 |
+
TEMPERATURE = 1.5 # Logit smoothing factor (higher = softer distribution)
|
| 7 |
+
CONFIDENCE_THRESHOLD = 0.60 # Minimum raw probability to trust a classification
|
| 8 |
+
CHUNK_SIZE = 400 # Words per chunk
|
| 9 |
+
CHUNK_OVERLAP = 50 # Overlapping words between chunks
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def sliding_window_chunker(text: str, chunk_size: int = CHUNK_SIZE, overlap: int = CHUNK_OVERLAP) -> list[str]:
|
| 13 |
+
"""Splits a large text into overlapping chunks of a specific word count."""
|
| 14 |
+
words = text.split()
|
| 15 |
+
chunks = []
|
| 16 |
+
|
| 17 |
+
if not words:
|
| 18 |
+
return chunks
|
| 19 |
+
|
| 20 |
+
step = chunk_size - overlap
|
| 21 |
+
if step <= 0:
|
| 22 |
+
step = 1
|
| 23 |
+
|
| 24 |
+
for i in range(0, len(words), step):
|
| 25 |
+
chunk_words = words[i:i + chunk_size]
|
| 26 |
+
chunks.append(" ".join(chunk_words))
|
| 27 |
+
|
| 28 |
+
if i + chunk_size >= len(words):
|
| 29 |
+
break
|
| 30 |
+
|
| 31 |
+
return chunks
|
| 32 |
+
|
| 33 |
+
def split_into_claims(text: str) -> list[str]:
|
| 34 |
+
"""Splits the LLM output into individual sentences/claims to prevent conversational filler from ruining factual scores."""
|
| 35 |
+
raw_sentences = re.split(r'(?<=[.!?])\s+', text.strip())
|
| 36 |
+
|
| 37 |
+
valid_claims = []
|
| 38 |
+
for s in raw_sentences:
|
| 39 |
+
clean = s.strip()
|
| 40 |
+
# Only keep substantial claims to avoid evaluating numbering fragments (like "1.")
|
| 41 |
+
if len(clean.split()) >= 3:
|
| 42 |
+
valid_claims.append(clean)
|
| 43 |
+
|
| 44 |
+
if not valid_claims and text.strip():
|
| 45 |
+
valid_claims = [text.strip()]
|
| 46 |
+
|
| 47 |
+
return valid_claims
|
| 48 |
+
|
| 49 |
+
def normalize_scores(contradiction: float, entailment: float, neutral: float) -> tuple[float, float, float]:
|
| 50 |
+
"""Ensures the three scores sum to exactly 100.0%."""
|
| 51 |
+
total = contradiction + entailment + neutral
|
| 52 |
+
if total == 0:
|
| 53 |
+
return (0.0, 0.0, 100.0)
|
| 54 |
+
|
| 55 |
+
c = round((contradiction / total) * 100.0, 2)
|
| 56 |
+
e = round((entailment / total) * 100.0, 2)
|
| 57 |
+
n = round(100.0 - c - e, 2) # Assign remainder to neutral to guarantee sum = 100
|
| 58 |
+
return (c, e, n)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
class HallucinationDetector:
|
| 62 |
+
def __init__(self):
|
| 63 |
+
"""Initializes the model and tokenizer only once when the class is created."""
|
| 64 |
+
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 65 |
+
self.model_name = "cross-encoder/nli-deberta-v3-base"
|
| 66 |
+
|
| 67 |
+
print(f"Initializing Detector on {self.device.type.upper()}...")
|
| 68 |
+
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
|
| 69 |
+
self.model = AutoModelForSequenceClassification.from_pretrained(self.model_name).to(self.device)
|
| 70 |
+
print("Detector Ready!")
|
| 71 |
+
|
| 72 |
+
def _infer_chunk(self, chunk: str, claim: str) -> dict:
|
| 73 |
+
"""Runs NLI inference on a single chunk against a single claim."""
|
| 74 |
+
inputs = self.tokenizer(
|
| 75 |
+
chunk, claim,
|
| 76 |
+
return_tensors="pt", truncation=True, max_length=512
|
| 77 |
+
).to(self.device)
|
| 78 |
+
|
| 79 |
+
with torch.no_grad():
|
| 80 |
+
outputs = self.model(**inputs)
|
| 81 |
+
# Temperature Scaling
|
| 82 |
+
scaled_logits = outputs.logits / TEMPERATURE
|
| 83 |
+
probs = torch.nn.functional.softmax(scaled_logits, dim=-1)
|
| 84 |
+
|
| 85 |
+
c_raw = probs[0][0].item()
|
| 86 |
+
e_raw = probs[0][1].item()
|
| 87 |
+
n_raw = probs[0][2].item()
|
| 88 |
+
|
| 89 |
+
# Confidence Thresholding
|
| 90 |
+
max_score = max(c_raw, e_raw, n_raw)
|
| 91 |
+
if max_score < CONFIDENCE_THRESHOLD:
|
| 92 |
+
c_raw, e_raw, n_raw = 0.0, 0.0, 1.0 # Default to Neutral
|
| 93 |
+
|
| 94 |
+
return {
|
| 95 |
+
"contradiction": c_raw,
|
| 96 |
+
"entailment": e_raw,
|
| 97 |
+
"neutral": n_raw,
|
| 98 |
+
"spans": [] # Placeholder for Captum
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
def analyze(self, context: str, llm_response: str) -> dict:
|
| 102 |
+
"""
|
| 103 |
+
Hyper-Accurate Claim-by-Claim Analysis:
|
| 104 |
+
Splits LLM output into sentences, evaluates each sentence against context chunks,
|
| 105 |
+
and aggregates the results logically.
|
| 106 |
+
"""
|
| 107 |
+
chunks = sliding_window_chunker(context)
|
| 108 |
+
if not chunks:
|
| 109 |
+
chunks = [""]
|
| 110 |
+
|
| 111 |
+
claims = split_into_claims(llm_response)
|
| 112 |
+
sentence_scores = []
|
| 113 |
+
best_attribution_spans = []
|
| 114 |
+
|
| 115 |
+
for claim in claims:
|
| 116 |
+
# Score this claim against all context chunks
|
| 117 |
+
chunk_results = [self._infer_chunk(chunk, claim) for chunk in chunks]
|
| 118 |
+
|
| 119 |
+
s_max_e = max(r["entailment"] for r in chunk_results)
|
| 120 |
+
s_max_c = max(r["contradiction"] for r in chunk_results)
|
| 121 |
+
s_max_n = max(r["neutral"] for r in chunk_results)
|
| 122 |
+
|
| 123 |
+
# Priority Resolution ("Truth Wins") for THIS specific claim
|
| 124 |
+
if s_max_e >= CONFIDENCE_THRESHOLD and s_max_e >= s_max_c:
|
| 125 |
+
final_s_e = s_max_e
|
| 126 |
+
final_s_c = s_max_c * 0.25
|
| 127 |
+
final_s_n = max(0.0, 1.0 - final_s_e - final_s_c)
|
| 128 |
+
winning_spans = max(chunk_results, key=lambda x: x["entailment"])["spans"]
|
| 129 |
+
elif s_max_c >= CONFIDENCE_THRESHOLD and s_max_c > s_max_e:
|
| 130 |
+
final_s_c = s_max_c
|
| 131 |
+
final_s_e = s_max_e * 0.25
|
| 132 |
+
final_s_n = max(0.0, 1.0 - final_s_c - final_s_e)
|
| 133 |
+
winning_spans = max(chunk_results, key=lambda x: x["contradiction"])["spans"]
|
| 134 |
+
else:
|
| 135 |
+
final_s_c = s_max_c
|
| 136 |
+
final_s_e = s_max_e
|
| 137 |
+
final_s_n = s_max_n
|
| 138 |
+
winning_spans = []
|
| 139 |
+
|
| 140 |
+
sentence_scores.append({
|
| 141 |
+
"c": final_s_c,
|
| 142 |
+
"e": final_s_e,
|
| 143 |
+
"n": final_s_n,
|
| 144 |
+
"spans": winning_spans
|
| 145 |
+
})
|
| 146 |
+
|
| 147 |
+
# ── Document-level Aggregation ──
|
| 148 |
+
# 1. Contradiction runs on a "One Strike" rule: If ANY claim contradicts, the output is flawed.
|
| 149 |
+
doc_c = max(s["c"] for s in sentence_scores)
|
| 150 |
+
|
| 151 |
+
# 2. Entailment and Neutral run on an Average: Reflects the ratio of "Facts" vs "Neutral conversational filler".
|
| 152 |
+
doc_e = sum(s["e"] for s in sentence_scores) / len(sentence_scores)
|
| 153 |
+
doc_n = sum(s["n"] for s in sentence_scores) / len(sentence_scores)
|
| 154 |
+
|
| 155 |
+
# Clamp negatives and purely normalize
|
| 156 |
+
doc_c = max(doc_c, 0.0)
|
| 157 |
+
doc_e = max(doc_e, 0.0)
|
| 158 |
+
doc_n = max(doc_n, 0.0)
|
| 159 |
+
|
| 160 |
+
c_pct, e_pct, n_pct = normalize_scores(doc_c, doc_e, doc_n)
|
| 161 |
+
|
| 162 |
+
# Grab spans from the claim that scored the highest severity
|
| 163 |
+
if doc_c > doc_e:
|
| 164 |
+
best_spans = max(sentence_scores, key=lambda x: x["c"])["spans"]
|
| 165 |
+
else:
|
| 166 |
+
best_spans = max(sentence_scores, key=lambda x: x["e"])["spans"]
|
| 167 |
+
|
| 168 |
+
# True Hallucination criteria
|
| 169 |
+
is_hallucination = (c_pct > e_pct) and (doc_c >= CONFIDENCE_THRESHOLD)
|
| 170 |
+
|
| 171 |
+
return {
|
| 172 |
+
"contradiction_score": c_pct,
|
| 173 |
+
"entailment_score": e_pct,
|
| 174 |
+
"neutral_score": n_pct,
|
| 175 |
+
"is_hallucination": is_hallucination,
|
| 176 |
+
"attribution_spans": best_spans
|
| 177 |
+
}
|
api/main.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from contextlib import asynccontextmanager
|
| 2 |
+
from fastapi import FastAPI, Depends
|
| 3 |
+
from pydantic import BaseModel
|
| 4 |
+
from sqlalchemy.ext.asyncio import AsyncSession
|
| 5 |
+
|
| 6 |
+
from api.detector import HallucinationDetector
|
| 7 |
+
from api.database import engine, Base, get_db
|
| 8 |
+
from api.models import HallucinationLog
|
| 9 |
+
|
| 10 |
+
detector = HallucinationDetector()
|
| 11 |
+
|
| 12 |
+
# Lifespan context to create tables automatically on startup
|
| 13 |
+
@asynccontextmanager
|
| 14 |
+
async def lifespan(app: FastAPI):
|
| 15 |
+
async with engine.begin() as conn:
|
| 16 |
+
# In a real production app you'd use Alembic migrations,
|
| 17 |
+
# but this is perfect for our current phase.
|
| 18 |
+
await conn.run_sync(Base.metadata.create_all)
|
| 19 |
+
yield
|
| 20 |
+
|
| 21 |
+
app = FastAPI(
|
| 22 |
+
title="HalluciGuard API",
|
| 23 |
+
description="Async API for detecting LLM hallucinations using NLI.",
|
| 24 |
+
version="1.0.0",
|
| 25 |
+
lifespan=lifespan
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
class HallucinationRequest(BaseModel):
|
| 29 |
+
context: str
|
| 30 |
+
llm_output: str
|
| 31 |
+
|
| 32 |
+
@app.get("/")
|
| 33 |
+
async def root():
|
| 34 |
+
return {"status": "online", "message": "HalluciGuard API is running."}
|
| 35 |
+
|
| 36 |
+
# Notice we added `db: AsyncSession = Depends(get_db)` here
|
| 37 |
+
@app.post("/api/v1/score")
|
| 38 |
+
async def score_hallucination(request: HallucinationRequest, db: AsyncSession = Depends(get_db)):
|
| 39 |
+
|
| 40 |
+
# 1. Run the ML Model
|
| 41 |
+
results = detector.analyze(request.context, request.llm_output)
|
| 42 |
+
|
| 43 |
+
# 2. Package the data for PostgreSQL
|
| 44 |
+
new_log = HallucinationLog(
|
| 45 |
+
context=request.context,
|
| 46 |
+
llm_output=request.llm_output,
|
| 47 |
+
contradiction_score=results["contradiction_score"],
|
| 48 |
+
entailment_score=results["entailment_score"],
|
| 49 |
+
neutral_score=results["neutral_score"],
|
| 50 |
+
is_hallucination=results["is_hallucination"]
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
# 3. Async commit to the database
|
| 54 |
+
db.add(new_log)
|
| 55 |
+
await db.commit()
|
| 56 |
+
await db.refresh(new_log) # Grabs the auto-generated ID and Timestamp
|
| 57 |
+
|
| 58 |
+
# 4. Return to the user
|
| 59 |
+
return {
|
| 60 |
+
"log_id": new_log.id,
|
| 61 |
+
"context": request.context,
|
| 62 |
+
"llm_output": request.llm_output,
|
| 63 |
+
"results": results,
|
| 64 |
+
"timestamp": new_log.created_at
|
| 65 |
+
}
|
api/models.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from sqlalchemy import Column, Integer, String, Float, Boolean, DateTime
|
| 2 |
+
from sqlalchemy.sql import func
|
| 3 |
+
from api.database import Base
|
| 4 |
+
|
| 5 |
+
class HallucinationLog(Base):
|
| 6 |
+
__tablename__ = "hallucination_logs"
|
| 7 |
+
|
| 8 |
+
id = Column(Integer, primary_key=True, index=True)
|
| 9 |
+
context = Column(String, nullable=False)
|
| 10 |
+
llm_output = Column(String, nullable=False)
|
| 11 |
+
|
| 12 |
+
# ML Scores
|
| 13 |
+
contradiction_score = Column(Float)
|
| 14 |
+
entailment_score = Column(Float)
|
| 15 |
+
neutral_score = Column(Float)
|
| 16 |
+
is_hallucination = Column(Boolean)
|
| 17 |
+
|
| 18 |
+
# Auto-generate a timestamp when the record is created
|
| 19 |
+
created_at = Column(DateTime(timezone=True), server_default=func.now())
|
frontend/app.py
ADDED
|
@@ -0,0 +1,231 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import requests
|
| 3 |
+
import PyPDF2
|
| 4 |
+
|
| 5 |
+
# 1. Configure the page settings
|
| 6 |
+
st.set_page_config(page_title="HalluciGuard", page_icon="🛡️", layout="centered")
|
| 7 |
+
|
| 8 |
+
# --- CUSTOM Premium CSS ---
|
| 9 |
+
st.markdown("""
|
| 10 |
+
<style>
|
| 11 |
+
/* Global Font Import */
|
| 12 |
+
@import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;600;800&display=swap');
|
| 13 |
+
|
| 14 |
+
html, body, [class*="css"] {
|
| 15 |
+
font-family: 'Inter', sans-serif;
|
| 16 |
+
}
|
| 17 |
+
|
| 18 |
+
/* Glassmorphism for text areas */
|
| 19 |
+
.stTextArea textarea {
|
| 20 |
+
background-color: rgba(22, 27, 34, 0.6) !important;
|
| 21 |
+
border-radius: 12px;
|
| 22 |
+
border: 1px solid rgba(255, 255, 255, 0.08) !important;
|
| 23 |
+
color: #ffffff !important;
|
| 24 |
+
padding: 12px !important;
|
| 25 |
+
transition: all 0.3s ease;
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
.stTextArea textarea:focus {
|
| 29 |
+
border-color: #6C63FF !important;
|
| 30 |
+
box-shadow: 0 0 0 2px rgba(108, 99, 255, 0.3) !important;
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
/* Beautiful Button Gradient */
|
| 34 |
+
.stButton > button {
|
| 35 |
+
background: linear-gradient(135deg, #6C63FF 0%, #3B33D4 100%) !important;
|
| 36 |
+
color: white !important;
|
| 37 |
+
border-radius: 12px !important;
|
| 38 |
+
border: none !important;
|
| 39 |
+
padding: 0.6rem 1.5rem !important;
|
| 40 |
+
font-weight: 600 !important;
|
| 41 |
+
transition: all 0.3s ease !important;
|
| 42 |
+
box-shadow: 0 4px 15px rgba(108, 99, 255, 0.25) !important;
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
.stButton > button:hover {
|
| 46 |
+
transform: translateY(-2px);
|
| 47 |
+
box-shadow: 0 6px 20px rgba(108, 99, 255, 0.4) !important;
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
/* Glassmorphism Metric Cards */
|
| 51 |
+
div[data-testid="stMetric"] {
|
| 52 |
+
background-color: rgba(22, 27, 34, 0.7);
|
| 53 |
+
border: 1px solid rgba(255, 255, 255, 0.05);
|
| 54 |
+
padding: 1.5rem;
|
| 55 |
+
border-radius: 16px;
|
| 56 |
+
box-shadow: 0 8px 32px 0 rgba(0, 0, 0, 0.2);
|
| 57 |
+
backdrop-filter: blur(12px);
|
| 58 |
+
-webkit-backdrop-filter: blur(12px);
|
| 59 |
+
transition: transform 0.2s ease;
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
div[data-testid="stMetric"]:hover {
|
| 63 |
+
transform: translateY(-4px);
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
/* Fancy Header Gradient */
|
| 67 |
+
h1 {
|
| 68 |
+
background: -webkit-linear-gradient(45deg, #A78BFA, #6C63FF);
|
| 69 |
+
-webkit-background-clip: text;
|
| 70 |
+
-webkit-text-fill-color: transparent;
|
| 71 |
+
font-weight: 800 !important;
|
| 72 |
+
margin-bottom: 0rem !important;
|
| 73 |
+
padding-bottom: 1rem;
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
/* File Uploader tweaking */
|
| 77 |
+
div[data-testid="stFileUploader"] {
|
| 78 |
+
background-color: rgba(22, 27, 34, 0.4);
|
| 79 |
+
border-radius: 12px;
|
| 80 |
+
padding: 1.5rem;
|
| 81 |
+
border: 1px dashed rgba(255, 255, 255, 0.15);
|
| 82 |
+
transition: all 0.3s ease;
|
| 83 |
+
}
|
| 84 |
+
div[data-testid="stFileUploader"]:hover {
|
| 85 |
+
border-color: #6C63FF;
|
| 86 |
+
background-color: rgba(108, 99, 255, 0.05);
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
/* Subheaders */
|
| 90 |
+
h3 {
|
| 91 |
+
color: #e6edf3 !important;
|
| 92 |
+
font-weight: 600 !important;
|
| 93 |
+
}
|
| 94 |
+
</style>
|
| 95 |
+
""", unsafe_allow_html=True)
|
| 96 |
+
|
| 97 |
+
st.title("🛡️ HalluciGuard")
|
| 98 |
+
st.write("Research-grade LLM Hallucination Detection using NLI and Cross-Encoders.")
|
| 99 |
+
|
| 100 |
+
# 2. Define the Backend URL
|
| 101 |
+
API_URL = "http://127.0.0.1:8000/api/v1/score"
|
| 102 |
+
|
| 103 |
+
# 3. Create the Input Forms
|
| 104 |
+
st.subheader("Test an LLM Output")
|
| 105 |
+
|
| 106 |
+
# Initialize session state for context text and uploaded file tracking
|
| 107 |
+
if "context_text" not in st.session_state:
|
| 108 |
+
st.session_state.context_text = ""
|
| 109 |
+
if "uploaded_filename" not in st.session_state:
|
| 110 |
+
st.session_state.uploaded_filename = None
|
| 111 |
+
|
| 112 |
+
uploaded_file = st.file_uploader("Upload a document to use as Ground Truth (Optional)", type=["pdf"])
|
| 113 |
+
|
| 114 |
+
if uploaded_file is not None:
|
| 115 |
+
# Only process the file if it's a new upload or newly selected
|
| 116 |
+
if st.session_state.uploaded_filename != uploaded_file.name:
|
| 117 |
+
try:
|
| 118 |
+
reader = PyPDF2.PdfReader(uploaded_file)
|
| 119 |
+
extracted_text = ""
|
| 120 |
+
for page in reader.pages:
|
| 121 |
+
text = page.extract_text()
|
| 122 |
+
if text:
|
| 123 |
+
extracted_text += text + "\n"
|
| 124 |
+
|
| 125 |
+
if not extracted_text.strip():
|
| 126 |
+
st.warning("Could not extract text. Please ensure the PDF is text-searchable and not a scanned image.")
|
| 127 |
+
else:
|
| 128 |
+
st.session_state.context_text = extracted_text.strip()
|
| 129 |
+
st.session_state.uploaded_filename = uploaded_file.name
|
| 130 |
+
except Exception as e:
|
| 131 |
+
st.error(f"Error reading PDF: {e}")
|
| 132 |
+
|
| 133 |
+
context = st.text_area(
|
| 134 |
+
"Source Context (The factual ground truth):",
|
| 135 |
+
key="context_text",
|
| 136 |
+
height=150,
|
| 137 |
+
placeholder="e.g., The Q3 financial report states that the company's revenue grew by 15%, reaching $50 million."
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
llm_output = st.text_area(
|
| 141 |
+
"LLM Output (The generated response to evaluate):",
|
| 142 |
+
height=100,
|
| 143 |
+
placeholder="e.g., The company had a great Q3, bringing in $60 million in revenue."
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
# 4. The Action Button
|
| 147 |
+
if st.button("Detect Hallucination", type="primary", use_container_width=True):
|
| 148 |
+
if not context or not llm_output:
|
| 149 |
+
st.warning("⚠️ Please provide both a context and an LLM output to test.")
|
| 150 |
+
else:
|
| 151 |
+
# Show a premium animated status indicator while the API processes
|
| 152 |
+
with st.status("🔍 Extracting text and structuring context...", expanded=False) as status:
|
| 153 |
+
st.write("⏳ Creating overlapping sequence chunks (evading 512-token limit)...")
|
| 154 |
+
st.write("🧠 Evaluating chunk semantics against DeBERTa-v3 cross-encoder...")
|
| 155 |
+
|
| 156 |
+
try:
|
| 157 |
+
# Send data to our FastAPI backend
|
| 158 |
+
response = requests.post(
|
| 159 |
+
API_URL,
|
| 160 |
+
json={"context": context, "llm_output": llm_output}
|
| 161 |
+
)
|
| 162 |
+
response.raise_for_status() # Throw an error if the API crashes
|
| 163 |
+
|
| 164 |
+
status.update(label="✅ Analysis Complete & Scores Aggregated!", state="complete", expanded=True)
|
| 165 |
+
|
| 166 |
+
data = response.json()
|
| 167 |
+
results = data.get("results", {})
|
| 168 |
+
|
| 169 |
+
# 5. Display the Results
|
| 170 |
+
st.markdown("---")
|
| 171 |
+
|
| 172 |
+
# Big visual alert
|
| 173 |
+
if results.get("is_hallucination"):
|
| 174 |
+
st.error("🚨 **HALLUCINATION DETECTED (Contradiction > 60%)**")
|
| 175 |
+
else:
|
| 176 |
+
st.success("✅ **FACTUALLY CONSISTENT**")
|
| 177 |
+
|
| 178 |
+
# Display exact percentages cleanly
|
| 179 |
+
st.subheader("NLI Confidence Scores")
|
| 180 |
+
|
| 181 |
+
contradiction_score = results.get('contradiction_score', 0)
|
| 182 |
+
entailment_score = results.get('entailment_score', 0)
|
| 183 |
+
neutral_score = results.get('neutral_score', 0)
|
| 184 |
+
|
| 185 |
+
# Dynamic Plain-English Summary
|
| 186 |
+
if contradiction_score > 15:
|
| 187 |
+
st.warning("🚨 Warning: The AI is explicitly contradicting the source material.")
|
| 188 |
+
elif neutral_score > 50:
|
| 189 |
+
st.warning("⚠️ Note: The AI is going off-script. It is bringing in outside knowledge not found in your source context.")
|
| 190 |
+
elif entailment_score > 50 and contradiction_score < 5:
|
| 191 |
+
st.success("✅ Excellent: The AI is sticking strictly to the facts provided.")
|
| 192 |
+
|
| 193 |
+
# Generate dynamic contextual captions based on percentages
|
| 194 |
+
if contradiction_score >= 60:
|
| 195 |
+
c_text = "- Meaning: The AI is heavily contradicting the source facts. Critical hallucination risk."
|
| 196 |
+
elif contradiction_score > 10:
|
| 197 |
+
c_text = "- Meaning: Some distinct parts of the output directly oppose the source material."
|
| 198 |
+
else:
|
| 199 |
+
c_text = "- Meaning: The AI is largely staying true to the facts with zero serious contradictions."
|
| 200 |
+
|
| 201 |
+
if entailment_score >= 80:
|
| 202 |
+
e_text = "- Meaning: Excellent! The AI is strictly summarizing the facts given to it."
|
| 203 |
+
elif entailment_score > 40:
|
| 204 |
+
e_text = "- Meaning: The AI includes a fair mix of factual summaries alongside some conversational filler."
|
| 205 |
+
else:
|
| 206 |
+
e_text = "- Meaning: Very little of the output is strictly supported by the source text."
|
| 207 |
+
|
| 208 |
+
if neutral_score >= 60:
|
| 209 |
+
n_text = "- Meaning: The AI is heavily going off-script and bringing in outside knowledge/questions."
|
| 210 |
+
elif neutral_score > 10:
|
| 211 |
+
n_text = "- Meaning: The AI is adding a moderate amount of conversational filler or harmless additions."
|
| 212 |
+
else:
|
| 213 |
+
n_text = "- Meaning: The AI is strictly sticking to the provided context with almost no unverified additions."
|
| 214 |
+
|
| 215 |
+
col1, col2, col3 = st.columns(3)
|
| 216 |
+
with col1:
|
| 217 |
+
st.metric("Contradiction", f"{contradiction_score}%")
|
| 218 |
+
st.caption(c_text)
|
| 219 |
+
with col2:
|
| 220 |
+
st.metric("Entailment", f"{entailment_score}%")
|
| 221 |
+
st.caption(e_text)
|
| 222 |
+
with col3:
|
| 223 |
+
st.metric("Neutral", f"{neutral_score}%")
|
| 224 |
+
st.caption(n_text)
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
except requests.exceptions.ConnectionError:
|
| 229 |
+
st.error("🔌 Failed to connect to the backend. Is your FastAPI server running on port 8000?")
|
| 230 |
+
except Exception as e:
|
| 231 |
+
st.error(f"An error occurred: {e}")
|
requirements.txt
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --- Machine Learning & NLP Core ---
|
| 2 |
+
torch>=2.0.0
|
| 3 |
+
transformers>=4.30.0
|
| 4 |
+
datasets>=2.13.0
|
| 5 |
+
scikit-learn>=1.2.2
|
| 6 |
+
numpy>=1.24.0
|
| 7 |
+
pandas>=2.0.0
|
| 8 |
+
|
| 9 |
+
# --- API Backend & Database ---
|
| 10 |
+
fastapi>=0.100.0
|
| 11 |
+
uvicorn[standard]>=0.22.0
|
| 12 |
+
pydantic>=2.0.0
|
| 13 |
+
sqlalchemy>=2.0.0
|
| 14 |
+
asyncpg>=0.28.0
|
| 15 |
+
|
| 16 |
+
# --- Frontend Dashboard ---
|
| 17 |
+
streamlit>=1.25.0
|
| 18 |
+
|
| 19 |
+
# --- Utilities & Testing ---
|
| 20 |
+
python-dotenv>=1.0.0
|
| 21 |
+
pytest>=7.4.0
|
run.sh
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
# Start the FastAPI backend in the background
|
| 4 |
+
echo "Starting FastAPI Backend..."
|
| 5 |
+
uvicorn api.main:app --host 127.0.0.1 --port 8000 &
|
| 6 |
+
|
| 7 |
+
# Wait a few seconds to let the ML model load into memory
|
| 8 |
+
sleep 10
|
| 9 |
+
|
| 10 |
+
# Start the Streamlit frontend on Hugging Face's required port
|
| 11 |
+
echo "Starting Streamlit Frontend..."
|
| 12 |
+
streamlit run frontend/app.py --server.port 7860 --server.address 0.0.0.0
|