Spaces:
Sleeping
Sleeping
File size: 2,203 Bytes
c73514d 8bfff95 c73514d 8bfff95 c73514d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
import gradio as gr
from transformers import ViTFeatureExtractor, ViTForImageClassification
from PIL import Image
import requests
img1_url = 'https://static3.depositphotos.com/1003410/160/i/450/depositphotos_1607848-stock-photo-portrait-of-old-man.jpg'
img1 = Image.open(requests.get(img1_url, stream=True).raw)
img2_url = "https://img.freepik.com/free-photo/excited-screaming-young-woman-standing-isolated-yellow_176420-39645.jpg"
img2 = Image.open(requests.get(img2_url, stream=True).raw)
img3_url = "https://img.freepik.com/free-photo/cute-adorable-boy-studio_58702-7629.jpg"
img3 = Image.open(requests.get(img3_url, stream=True).raw)
def age_emot_classifier(input_image):
# Init model, transforms
model_age = ViTForImageClassification.from_pretrained('nateraw/vit-age-classifier')
transforms_age = ViTFeatureExtractor.from_pretrained('nateraw/vit-age-classifier')
model_emot = ViTForImageClassification.from_pretrained("yangswei/visual-emotion-classification")
transforms_emot = ViTFeatureExtractor.from_pretrained("yangswei/visual-emotion-classification")
# Transform our image and pass it through the model
inputs_age = transforms_age(input_image, return_tensors='pt')
output_age = model_age(**inputs_age)
inputs_emot = transforms_emot(input_image, return_tensors='pt')
output_emot = model_emot(**inputs_emot)
# Predicted Class probabilities
proba_age = output_age.logits.softmax(1)
proba_emot = output_emot.logits.softmax(1)
# Predicted Classes With Confidences
labels_age = model_age.config.id2label
confidences_age = {labels_age[i]: proba_age[0][i].item() for i in range(len(labels_age))}
labels_emot = model_emot.config.id2label
confidences_emot = {labels_emot[i]: proba_emot[0][i].item() for i in range(len(labels_emot))}
return confidences_age, confidences_emot
output_age = gr.Label(num_top_classes=9, label="Age Prediction")
output_emotion = gr.Label(num_top_classes=8, label="Emotion Prediction")
with gr.Blocks(theme=gr.themes.Glass()) as demo:
gr.Interface(fn=age_emot_classifier, inputs="image", outputs=[output_age, output_emotion],
examples=[img1, img2, img3])
demo.launch() |