import streamlit as st import torch import torchvision.transforms as T from PIL import Image # Assuming the necessary packages (featup, clip, etc.) are installed and accessible from featup.util import norm, unnorm from featup.plotting import plot_feats # Setup - ensure the repository content is accessible in the environment # Streamlit UI st.title("Feature Upsampling Demo") # File uploader uploaded_file = st.file_uploader("Choose an image...", type=["png", "jpg", "jpeg"]) if uploaded_file is not None: image = Image.open(uploaded_file).convert("RGB") # Image preprocessing input_size = 224 transform = T.Compose([ T.Resize(input_size), T.CenterCrop((input_size, input_size)), T.ToTensor(), norm ]) image_tensor = transform(image).unsqueeze(0) # Assuming CUDA is available, .cuda() # Model selection model_option = st.selectbox( 'Choose a model for feature upsampling', ('dino16', 'dinov2', 'clip', 'resnet50') ) if st.button('Upsample Features'): # Load the selected model upsampler = torch.hub.load("mhamilton723/FeatUp", model_option).cuda() hr_feats = upsampler(image_tensor) lr_feats = upsampler.model(image_tensor) # Plotting - adjust the plot_feats function or find an alternative to display images in Streamlit # This step will likely need customization to display within Streamlit's interface plot_feats(unnorm(image_tensor)[0], lr_feats[0], hr_feats[0])