jvcanavarro's picture
Update app layout
d4c8cee
import gradio as gr
from src.core import load_model, predict_traits
TRAIT_NAMES = [
"Extraversion",
"Agreeableness",
"Conscientiousness",
"Neurotisicm",
"Openness",
]
DESCRIPTION = [
"**Extraversion**: outgoing, energetic, talkative, active, assertive, etc.",
"**Neuroticism**: worrying, self-pitying, unstable, tense, anxious, etc.",
"**Agreeableness**: sympathetic, forgiving, generous, kind, appreciative, etc.",
"**Conscientiousness**: responsible, organized, reliable, efficient, planful, etc.",
"**Openness**: artistic, curious, imaginative, insightful, original, wide interests, etc.",
]
def get_traits(video):
model = load_model()
trait_values = predict_traits(video, model)
return {k: float(v) for k, v in zip(TRAIT_NAMES, trait_values)}
params = dict(
description="Predicts the Big-Five psychology traits of a person based an short introduction video. Adapted from [Deep Impression: Audiovisual Deep Residual Networks for Multimodal Apparent Personality Trait Recognition](https://arxiv.org/abs/1609.05119).",
article=" ".join(DESCRIPTION),
thumbnail="https://cdn-icons-png.flaticon.com/512/3392/3392044.png",
)
primary_interface = gr.Interface(
get_traits,
inputs=gr.Video(label="Video", include_audio=True),
outputs=gr.Label(num_top_classes=5, label="Results"),
examples="egs",
cache_examples=True,
**params,
)
second_interface = gr.Interface(
get_traits,
inputs=gr.Video(label="Webcam", include_audio=True, source="webcam"),
outputs=gr.Label(num_top_classes=5, label="Results"),
**params,
)
app = gr.TabbedInterface(
[primary_interface, second_interface],
title="Personality Traits Prediction 📑",
tab_names=["Video Upload", "Webcam"],
)
app.launch()