Edit model card

Model fine-tuned to classify images into happy and sad faces

How to test? Load model

from transformers import AutoModel model = ViTForImageClassification.from_pretrained("Ketanwip/happy_sad_model")

code to predict

from transformers import ViTImageProcessor, ViTForImageClassification from transformers import TrainingArguments, Trainer from torch.utils.data import Dataset from PIL import Image import os import torch from IPython.display import display

def predict_happiness_or_sadness(image_path, model, processor): image = Image.open(image_path).convert("RGB") inputs = processor(images=image, return_tensors="pt")

with torch.no_grad():  
    outputs = model(**inputs)

probs = torch.nn.functional.softmax(outputs.logits, dim=-1)
top_prob, top_lbl = torch.topk(probs, 1)
if top_lbl == 0:
    prediction = "Happy"
else:
    prediction = "Sad"

return prediction, top_prob.item()

processor = ViTImageProcessor.from_pretrained('google/vit-base-patch16-224-in21k')

prediction, probability = predict_happiness_or_sadness(image_path, model, processor)

print(f"The face is predicted to be: {prediction} with a confidence of {probability:.2%}") display(Image.open(image_path).convert("RGB"))

Downloads last month
52
Safetensors
Model size
85.8M params
Tensor type
F32
·

Finetuned from