File size: 1,462 Bytes
7f4df39
a63b185
 
 
7f4df39
a63b185
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7f4df39
a63b185
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import streamlit as st
import torch
from PIL import Image
from torchvision import transforms

# Load your model (ensure this is the correct path to your model file)
@st.cache(allow_output_mutation=True)
def load_model():
    model = torch.load('pretrained_vit_model_full.pth', map_location=torch.device('cpu'))
    model.eval()
    return model

model = load_model()

# Function to apply transforms to the image (update as per your model's requirement)
def transform_image(image):
    transform = transforms.Compose([
        transforms.Resize((224, 224)),  # Resize to the input size that your model expects
        transforms.ToTensor(),
        # Add other transformations as needed
    ])
    return transform(image).unsqueeze(0)  # Add batch dimension

st.title("Animal Facial Expression Recognition")

# Slider
x = st.slider('Select a value')
st.write(x, 'squared is', x * x)

# File uploader
uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "png", "jpeg"])
if uploaded_file is not None:
    image = Image.open(uploaded_file).convert('RGB')
    st.image(image, caption='Uploaded Image.', use_column_width=True)
    st.write("")
    st.write("Classifying...")

    # Transform the image
    input_tensor = transform_image(image)

    # Make prediction
    with torch.no_grad():
        prediction = model(input_tensor)

    # Display the prediction (modify as per your output)
    st.write('Predicted class:', prediction.argmax().item())