File size: 1,525 Bytes
2275a39
534fa09
 
 
2275a39
534fa09
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import streamlit as st
import torch
import torchvision.transforms as T
from PIL import Image

# Assuming the necessary packages (featup, clip, etc.) are installed and accessible
from featup.util import norm, unnorm
from featup.plotting import plot_feats

# Setup - ensure the repository content is accessible in the environment

# Streamlit UI
st.title("Feature Upsampling Demo")

# File uploader
uploaded_file = st.file_uploader("Choose an image...", type=["png", "jpg", "jpeg"])
if uploaded_file is not None:
    image = Image.open(uploaded_file).convert("RGB")

    # Image preprocessing
    input_size = 224
    transform = T.Compose([
        T.Resize(input_size),
        T.CenterCrop((input_size, input_size)),
        T.ToTensor(),
        norm
    ])

    image_tensor = transform(image).unsqueeze(0) # Assuming CUDA is available, .cuda()
    
    # Model selection
    model_option = st.selectbox(
        'Choose a model for feature upsampling',
        ('dino16', 'dinov2', 'clip', 'resnet50')
    )

    if st.button('Upsample Features'):
        # Load the selected model
        upsampler = torch.hub.load("mhamilton723/FeatUp", model_option).cuda()
        hr_feats = upsampler(image_tensor)
        lr_feats = upsampler.model(image_tensor)

        # Plotting - adjust the plot_feats function or find an alternative to display images in Streamlit
        # This step will likely need customization to display within Streamlit's interface
        plot_feats(unnorm(image_tensor)[0], lr_feats[0], hr_feats[0])