|
import gradio as gr |
|
import torch |
|
from transformers import AutoModelForSequenceClassification, AutoTokenizer |
|
|
|
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
|
|
|
|
model = AutoModelForSequenceClassification.from_pretrained("./", local_files_only=True).to(device) |
|
tokenizer = AutoTokenizer.from_pretrained("gpt2") |
|
|
|
def classify_text(text): |
|
inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=512).to(device) |
|
with torch.no_grad(): |
|
outputs = model(**inputs) |
|
logits = outputs.logits |
|
predicted_class_id = logits.argmax().item() |
|
return "Proper Naming Notfcn" if predicted_class_id == 1 else "Wrong Naming Notificn" |
|
|
|
iface = gr.Interface(fn=classify_text, inputs="text", outputs="text", title="Classification Naming", description="Classify naming notifications as proper or wrong.") |
|
iface.launch() |
|
|