Commit
·
b4e73b5
1
Parent(s):
cd9a82f
require tokens to be input by user in the UI before enabling the classify button.
Browse files
app.py
CHANGED
|
@@ -15,17 +15,14 @@ from tavily import TavilyClient
|
|
| 15 |
from huggingface_hub import InferenceClient
|
| 16 |
|
| 17 |
text_classifier = None
|
| 18 |
-
TAVILY_KEY =
|
| 19 |
-
GOOGLE_KEY =
|
| 20 |
-
HF_TOKEN =
|
| 21 |
|
| 22 |
embed_model = SentenceTransformer("all-MiniLM-L6-v2")
|
| 23 |
explain_model = "meta-llama/Llama-3.1-8B-Instruct"
|
| 24 |
text_model = "rajyalakshmijampani/fever_finetuned_deberta"
|
| 25 |
-
|
| 26 |
-
inf_client = InferenceClient(token=HF_TOKEN)
|
| 27 |
wiki = wikipediaapi.Wikipedia(language='en', user_agent='fact-checker/1.0')
|
| 28 |
-
tavily = TavilyClient(api_key=TAVILY_KEY)
|
| 29 |
|
| 30 |
def get_text_classifier():
|
| 31 |
global text_classifier
|
|
@@ -66,7 +63,7 @@ def _safe_call(func, claim):
|
|
| 66 |
return []
|
| 67 |
|
| 68 |
def _from_google(claim):
|
| 69 |
-
|
| 70 |
url = "https://factchecktools.googleapis.com/v1alpha1/claims:search"
|
| 71 |
r = requests.get(url, params={"query": claim, "key": GOOGLE_KEY, "pageSize": 2}).json()
|
| 72 |
claims = r.get("claims", [])
|
|
@@ -80,7 +77,8 @@ def _from_google(claim):
|
|
| 80 |
return evid[:3]
|
| 81 |
|
| 82 |
def _from_tavily(claim):
|
| 83 |
-
|
|
|
|
| 84 |
try:
|
| 85 |
results = tavily.search(claim).get("results", [])
|
| 86 |
sents = []
|
|
@@ -118,7 +116,13 @@ def get_evidence_sentences(claim, k=3):
|
|
| 118 |
return (evid or ["Error: No relevant evidence found."])[:k]
|
| 119 |
|
| 120 |
# --- Classification Function ---
|
| 121 |
-
def classify_text(claim):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 122 |
claim=claim.lower().strip()
|
| 123 |
classifier = get_text_classifier()
|
| 124 |
evidences = get_evidence_sentences(claim)
|
|
@@ -159,6 +163,8 @@ def classify_text(claim):
|
|
| 159 |
{"role": "system", "content": "You are a reliable fact-checking assistant."},
|
| 160 |
{"role": "user", "content": prompt},
|
| 161 |
]
|
|
|
|
|
|
|
| 162 |
completion = inf_client.chat_completion( model=explain_model, messages=messages, max_tokens=256, temperature=0.3)
|
| 163 |
raw_response = completion.choices[0].message.content.strip()
|
| 164 |
|
|
@@ -204,10 +210,32 @@ with gr.Blocks() as demo:
|
|
| 204 |
gr.Markdown("# Multimodal Misinformation Detector")
|
| 205 |
|
| 206 |
with gr.Tab("Text Detector"):
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 211 |
|
| 212 |
|
| 213 |
with gr.Tab("Image Detector"):
|
|
|
|
| 15 |
from huggingface_hub import InferenceClient
|
| 16 |
|
| 17 |
text_classifier = None
|
| 18 |
+
TAVILY_KEY = None
|
| 19 |
+
GOOGLE_KEY = None
|
| 20 |
+
HF_TOKEN = None
|
| 21 |
|
| 22 |
embed_model = SentenceTransformer("all-MiniLM-L6-v2")
|
| 23 |
explain_model = "meta-llama/Llama-3.1-8B-Instruct"
|
| 24 |
text_model = "rajyalakshmijampani/fever_finetuned_deberta"
|
|
|
|
|
|
|
| 25 |
wiki = wikipediaapi.Wikipedia(language='en', user_agent='fact-checker/1.0')
|
|
|
|
| 26 |
|
| 27 |
def get_text_classifier():
|
| 28 |
global text_classifier
|
|
|
|
| 63 |
return []
|
| 64 |
|
| 65 |
def _from_google(claim):
|
| 66 |
+
global GOOGLE_KEY
|
| 67 |
url = "https://factchecktools.googleapis.com/v1alpha1/claims:search"
|
| 68 |
r = requests.get(url, params={"query": claim, "key": GOOGLE_KEY, "pageSize": 2}).json()
|
| 69 |
claims = r.get("claims", [])
|
|
|
|
| 77 |
return evid[:3]
|
| 78 |
|
| 79 |
def _from_tavily(claim):
|
| 80 |
+
global TAVILY_KEY
|
| 81 |
+
tavily = TavilyClient(api_key=TAVILY_KEY)
|
| 82 |
try:
|
| 83 |
results = tavily.search(claim).get("results", [])
|
| 84 |
sents = []
|
|
|
|
| 116 |
return (evid or ["Error: No relevant evidence found."])[:k]
|
| 117 |
|
| 118 |
# --- Classification Function ---
|
| 119 |
+
def classify_text(claim, hf_token, tavily_key, google_key):
|
| 120 |
+
|
| 121 |
+
global HF_TOKEN, TAVILY_KEY, GOOGLE_KEY
|
| 122 |
+
HF_TOKEN = hf_token.strip()
|
| 123 |
+
TAVILY_KEY = tavily_key.strip()
|
| 124 |
+
GOOGLE_KEY = google_key.strip()
|
| 125 |
+
|
| 126 |
claim=claim.lower().strip()
|
| 127 |
classifier = get_text_classifier()
|
| 128 |
evidences = get_evidence_sentences(claim)
|
|
|
|
| 163 |
{"role": "system", "content": "You are a reliable fact-checking assistant."},
|
| 164 |
{"role": "user", "content": prompt},
|
| 165 |
]
|
| 166 |
+
|
| 167 |
+
inf_client = InferenceClient(token=HF_TOKEN)
|
| 168 |
completion = inf_client.chat_completion( model=explain_model, messages=messages, max_tokens=256, temperature=0.3)
|
| 169 |
raw_response = completion.choices[0].message.content.strip()
|
| 170 |
|
|
|
|
| 210 |
gr.Markdown("# Multimodal Misinformation Detector")
|
| 211 |
|
| 212 |
with gr.Tab("Text Detector"):
|
| 213 |
+
with gr.Row():
|
| 214 |
+
with gr.Column(scale=3): # Left half — main inputs
|
| 215 |
+
claim = gr.Textbox(label="Enter Claim")
|
| 216 |
+
text_button = gr.Button("Classify Claim", interactive=False) # Disable until tokens provided
|
| 217 |
+
text_output = gr.Markdown( label="Model Output", value="Results will appear here...")
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
with gr.Column(scale=1): # Right half — user token inputs
|
| 221 |
+
gr.Markdown("## Enter your API keys")
|
| 222 |
+
hf_token = gr.Textbox(label="Hugging Face Token", type="password", value = "Required")
|
| 223 |
+
tavily_key = gr.Textbox(label="Tavily API Key", type="password", value = "Required")
|
| 224 |
+
google_key = gr.Textbox(label="Google Fact Check API Key", type="password", value = "Required")
|
| 225 |
+
|
| 226 |
+
# Enable button when all fields filled
|
| 227 |
+
def enable_button(hf, tavily, google):
|
| 228 |
+
ready = bool(hf and tavily and google)
|
| 229 |
+
return gr.update(interactive=ready)
|
| 230 |
+
|
| 231 |
+
hf_token.change(enable_button, inputs=[hf_token, tavily_key, google_key], outputs=text_button)
|
| 232 |
+
tavily_key.change(enable_button, inputs=[hf_token, tavily_key, google_key], outputs=text_button)
|
| 233 |
+
google_key.change(enable_button, inputs=[hf_token, tavily_key, google_key], outputs=text_button)
|
| 234 |
+
|
| 235 |
+
# Click handler (include all token inputs)
|
| 236 |
+
text_button.click(classify_text,
|
| 237 |
+
inputs=[claim, hf_token, tavily_key, google_key],
|
| 238 |
+
outputs=text_output)
|
| 239 |
|
| 240 |
|
| 241 |
with gr.Tab("Image Detector"):
|