Spaces:
Sleeping
Sleeping
import streamlit as st | |
from PIL import Image | |
import torch | |
from torch import nn as nn | |
from torchvision.transforms import transforms | |
class CNNModel(nn.Module): | |
def __init__(self): | |
super(CNNModel, self).__init__() | |
self.conv_layers = nn.Sequential( | |
nn.Conv2d(3, 32, kernel_size=3, padding=0), | |
nn.ReLU(), | |
nn.BatchNorm2d(32), | |
nn.MaxPool2d(kernel_size=2, stride=2), | |
nn.Conv2d(32, 64, kernel_size=3, padding=0), | |
nn.ReLU(), | |
nn.BatchNorm2d(64), | |
nn.MaxPool2d(kernel_size=2, stride=2), | |
nn.Conv2d(64, 128, kernel_size=3, padding=0), | |
nn.ReLU(), | |
nn.BatchNorm2d(128), | |
nn.MaxPool2d(kernel_size=2, stride=2) | |
) | |
self.fc_layers = nn.Sequential( | |
nn.Flatten(), | |
nn.Linear(128 * 30 * 30, 128), | |
nn.ReLU(), | |
nn.Dropout(0.1), | |
nn.Linear(128, 64), | |
nn.ReLU(), | |
nn.Dropout(0.1), | |
nn.Linear(64, 1), | |
nn.Sigmoid() | |
) | |
def forward(self, x): | |
x = self.conv_layers(x) | |
x = self.fc_layers(x) | |
return x | |
def load_checkpoint(checkpoint, model): | |
print("=> Loading checkpoint") | |
model.load_state_dict(checkpoint["state_dict"]) | |
model = CNNModel() | |
load_checkpoint(torch.load("emotion.pth.tar"), model) | |
model.eval() | |
class_names = ["angry", "disgust", "fear", "happy", "neutral", "sad", "surprize"] | |
st.title("Emotion Detector") | |
st.write("Upload an image and let the model predict your emotion!") | |
uploaded_image = st.file_uploader("Choose an image...", type=["jpg", "png", "jpeg"]) | |
if uploaded_image is not None: | |
image = Image.open(uploaded_image) | |
st.image(image, caption="Uploaded Image", use_column_width=True) | |
# Define the transformation to convert the image to a tensor | |
transform = transforms.Compose([ | |
transforms.Resize((256, 256)), | |
transforms.ToTensor(), | |
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) | |
]) | |
# Apply the transformation to the image | |
tensor_image = transform(image) | |
# Predict | |
predictions = model(tensor_image.unsqueeze(0)) | |
predicted_class_index = torch.argmax(predictions).item() | |
predicted_class = class_names[predicted_class_index] | |
st.write(f"Prediction: {predicted_class}") | |