File size: 1,398 Bytes
b069732 f260cda b069732 529e066 b069732 9a88ef0 b069732 be8f613 b069732 9a88ef0 b069732 04d2c5d f260cda 04d2c5d b069732 ee3c5c1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 |
import torch
from transformers import ViTForImageClassification, ViTImageProcessor
import torch.nn.functional as F
from PIL import Image
import gradio as gr
model = ViTForImageClassification.from_pretrained('Tirath5504/IPD-Image-ViT-Finetune')
processor = ViTImageProcessor.from_pretrained('google/vit-base-patch16-224')
class_names = ['cut_throat_gesture', 'finger_gun_to_the_head', 'middle_finger', 'slanted_eyes_gesture', 'swastika']
def predict(image):
inputs = processor(images=image, return_tensors="pt")
with torch.no_grad():
outputs = model(**inputs).logits
# predicted_class_idx = outputs.argmax(-1).item()
# predicted_class = class_names[predicted_class_idx]
# return predicted_class
probabilities = F.softmax(outputs, dim=1)
predicted_class_idx = probabilities.argmax(-1).item()
predicted_class = class_names[predicted_class_idx]
confidence_score = probabilities[0][predicted_class_idx].item()
return predicted_class, confidence_score
iface = gr.Interface(fn=predict,
inputs=gr.Image(type="pil"),
outputs=[gr.Label(num_top_classes=1, label="Class"), gr.Label(label="Score")],
title="Hateful Content Detection",
description="Upload an image to classify hateful gestures or symbols")
if __name__ == "__main__":
iface.launch() |