|
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
|
import torch
|
|
import torch.nn.functional as F
|
|
from peft import PeftModel
|
|
|
|
|
|
model_name = "munzirmuneer/phishing_url_gemma_pytorch"
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
|
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
|
model = PeftModel.from_pretrained(model, model_name)
|
|
|
|
def predict(input_text):
|
|
|
|
inputs = tokenizer(input_text, return_tensors="pt", truncation=True, padding=True)
|
|
|
|
|
|
with torch.no_grad():
|
|
outputs = model(**inputs)
|
|
|
|
|
|
logits = outputs.logits
|
|
probs = F.softmax(logits, dim=-1)
|
|
|
|
|
|
pred_class = torch.argmax(probs, dim=-1)
|
|
return pred_class.item(), probs[0].tolist()
|
|
|