File size: 2,312 Bytes
5aa316f
20d1a10
804efd3
 
5aa316f
 
 
 
804efd3
 
5aa316f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
804efd3
 
 
534fa09
5aa316f
 
534fa09
5aa316f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
534fa09
804efd3
5aa316f
804efd3
 
5aa316f
 
 
 
 
 
 
 
 
 
534fa09
5aa316f
534fa09
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
import matplotlib.pyplot as plt
import torch
import torchvision.transforms as T
from PIL import Image
import gradio as gr
from featup.util import norm, unnorm, pca, remove_axes
from pytorch_lightning import seed_everything
import os


def plot_feats(image, lr, hr):
    assert len(image.shape) == len(lr.shape) == len(hr.shape) == 3
    seed_everything(0)
    [lr_feats_pca, hr_feats_pca], _ = pca([lr.unsqueeze(0), hr.unsqueeze(0)])
    fig, ax = plt.subplots(1, 3, figsize=(15, 5))
    ax[0].imshow(image.permute(1, 2, 0).detach().cpu())
    ax[0].set_title("Image")
    ax[1].imshow(lr_feats_pca[0].permute(1, 2, 0).detach().cpu())
    ax[1].set_title("Original Features")
    ax[2].imshow(hr_feats_pca[0].permute(1, 2, 0).detach().cpu())
    ax[2].set_title("Upsampled Features")
    remove_axes(ax)
    plt.tight_layout()
    plt.close(fig)  # Close plt to avoid additional empty plots
    return fig




if __name__ == "__main__":
    os.environ['TORCH_HOME'] = '/tmp/.cache'

    options = ['dino16','vit', 'dinov2', 'clip', 'resnet50']
    image_input = gr.Image(label="Choose an image to featurize", type="pil", image_mode='RGB')
    model_option = gr.Radio(options, value="dino16", label='Choose a backbone to upsample')

    models = {o:torch.hub.load("mhamilton723/FeatUp", o) for o in options}

    def upsample_features(image, model_option):
        # Image preprocessing
        input_size = 224
        transform = T.Compose([
            T.Resize(input_size),
            T.CenterCrop((input_size, input_size)),
            T.ToTensor(),
            norm
        ])
        image_tensor = transform(image).unsqueeze(0).cuda()

        # Load the selected model
        upsampler = models[model_option].cuda()
        hr_feats = upsampler(image_tensor)
        lr_feats = upsampler.model(image_tensor)
        upsampler.cpu()

        return plot_feats(unnorm(image_tensor)[0], lr_feats[0], hr_feats[0])


    demo = gr.Interface(fn=upsample_features,
                        inputs=[image_input, model_option],
                        outputs="plot",
                        title="Feature Upsampling Demo",
                        description="This demo allows you to upsample features of an image using selected models.")

    demo.launch(server_name="0.0.0.0", server_port=7860, debug=True)