truth / app.py
aryn25's picture
Update app.py
34220a7 verified
# Historical Claim Verifier using RAG (Free Tools Only)
# Works on Hugging Face Spaces with Gradio + Wikipedia
import gradio as gr
import wikipedia
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
import torch
# Step 1: Define Wikipedia search and summary function
def search_wikipedia(query):
try:
page_titles = wikipedia.search(query, results=2)
summaries = [wikipedia.summary(title, sentences=3) for title in page_titles]
return "\n\n".join(summaries)
except Exception as e:
return f"Wikipedia search error: {str(e)}"
# Step 2: Load a free Hugging Face LLM manually to avoid TensorFlow dependency
model_name = "google/flan-t5-large"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
llm_pipeline = pipeline("text2text-generation", model=model, tokenizer=tokenizer, framework="pt")
# Step 3: Define the claim verification function
def verify_claim(claim):
context = search_wikipedia(claim)
if "error" in context.lower() or context.strip() == "":
return "Could not retrieve relevant information. Please try a different claim."
prompt = f"Claim: {claim}\n\nContext: {context}\n\nIs this claim true or false? Explain."
response = llm_pipeline(prompt, max_length=512, do_sample=False)[0]['generated_text']
return response
# Step 4: Gradio UI setup
demo = gr.Interface(
fn=verify_claim,
inputs=gr.Textbox(label="Enter a historical claim", placeholder="e.g., Alexander the Great died in 1971."),
outputs=gr.Textbox(label="Claim Verification Output"),
title="Historical Claim Verifier (RAG-Based)",
description="Uses Wikipedia + a free LLM to verify if a historical claim is true or false, and explains why."
)
# Step 5: Launch (will auto-run on Hugging Face Spaces)
demo.launch()