kaisex's picture
Update app.py
199ef3a verified
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
# Load models
model_1_path = "Bert1"
model_2_path = "Bert2"
tokenizer_1 = AutoTokenizer.from_pretrained(model_1_path)
model_1 = AutoModelForSequenceClassification.from_pretrained(model_1_path )
tokenizer_2 = AutoTokenizer.from_pretrained(model_2_path)
model_2 = AutoModelForSequenceClassification.from_pretrained(model_2_path)
def predict(text, model_choice):
if model_choice == "Model 1":
tokenizer = tokenizer_1
model = model_1
else:
tokenizer = tokenizer_2
model = model_2
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
with torch.no_grad():
outputs = model(**inputs)
probs = torch.softmax(outputs.logits, dim=1)[0]
return {
"Real": float(probs[0]),
"Fake": float(probs[1])
}
iface = gr.Interface(
fn=predict,
inputs=[
gr.Textbox(lines=4, label="Enter News Text"),
gr.Radio(["Model 1", "Model 2"], label="Choose Model")
],
outputs=gr.Label(num_top_classes=2),
title="Fake News Detector - Multi Model"
)
iface.launch()