akhyar919's picture
Update app.py
946eda9 verified
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from peft import PeftModel
import torch
import gradio as gr
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
MAX_LEN = 512
LABELS = {0: "safe", 1: "phishing/spam"}
# --- LOAD LOCAL TOKENIZER ---
tokenizer = AutoTokenizer.from_pretrained("akhyar919/DeBERTa-V3-Base-Phishing-Email-Classification") # <- points to folder with tokenizer.json + config
# --- LOAD BASE MODEL + LoRA ---
base_model = AutoModelForSequenceClassification.from_pretrained(
"microsoft/deberta-v3-base", num_labels=2
)
model = PeftModel.from_pretrained(base_model, "akhyar919/DeBERTa-V3-Base-Phishing-Email-Classification") # <- your adapter files
model.to(DEVICE)
model.eval()
# --- PREDICTION FUNCTION ---
def classify_email(email_text):
inputs = tokenizer(
email_text,
truncation=True,
padding="max_length",
max_length=MAX_LEN,
return_tensors="pt"
).to(DEVICE)
with torch.no_grad():
logits = model(**inputs).logits
pred_id = torch.argmax(logits, dim=-1).item()
return LABELS[pred_id]
# --- GRADIO INTERFACE ---
iface = gr.Interface(
fn=classify_email,
inputs=gr.Textbox(lines=10, placeholder="Paste email here..."),
outputs="text",
title="🚨 Phishing Email Detector",
description="Paste an email and detect if it's safe or phishing. Built with DeBERTa + LoRA ⚑"
)
if __name__ == "__main__":
iface.launch()