Inference

from transformers import AutoTokenizer, AutoModelForSequenceClassification
import time
import torch
import re

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = AutoModelForSequenceClassification.from_pretrained("Mr-Vicky-01/TP-FP").to(device)
tokenizer = AutoTokenizer.from_pretrained("Mr-Vicky-01/TP-FP")


start = time.time()

vuln = 'String password = "password123";'
vuln_desc = " Hardcoded credentials were found. This could allow an attacker to access sensitive resources. Replace hardcoded passwords with environment variables or a secure vault."
scanner = "docker"

question = f"""Vulnerability: {vuln} , Vulnerability_Description {vuln_desc} , Scanner: {scanner}"""
question = re.sub(r"[,?.'\"']", '', question)
inputs = tokenizer(question, return_tensors="pt").to(device)
with torch.no_grad():
    logits = model(**inputs).logits
predicted_class_id = logits.argmax().item()
predicted_class = model.config.id2label[predicted_class_id]

print(predicted_class)
print(time.time() - start)
Downloads last month
14
Safetensors
Model size
67M params
Tensor type
F32
·
Inference Providers NEW
This model is not currently available via any of the supported third-party Inference Providers, and the model is not deployed on the HF Inference API.