File size: 2,186 Bytes
5287bf1
 
bfd8285
 
 
 
 
 
 
 
 
 
 
 
5287bf1
358b6e7
bfd8285
 
 
 
5287bf1
6d7226f
 
 
 
 
 
 
 
 
bfd8285
 
 
 
5287bf1
 
 
 
 
bfd8285
 
 
6d7226f
 
 
 
 
 
 
 
 
 
 
 
 
 
bfd8285
5287bf1
bfd8285
5287bf1
 
6d7226f
f4b23ea
5287bf1
 
bfd8285
 
 
 
5287bf1
6d7226f
e36d373
5287bf1
 
 
 
bfd8285
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
#!/usr/bin/env python

import torch
import numpy as np
import gradio as gr
from faiss import read_index
from PIL import Image, ImageOps
from datasets import load_dataset
import torchvision.transforms as T

from model import DINO

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

## Define Model and Dataset
dataset = load_dataset("ethz/food101")
model = DINO(batch_size_per_device=32, num_classes=1000).to(device)
model.load_state_dict(torch.load("./bin/model.ckpt", map_location=device)["state_dict"])


def augment(img: np.ndarray) -> torch.Tensor:
    """
    Helper Function to augment the image before we generate embeddings

    Args:
        img (np.ndarray): Input Image

    Returns:
        torch.Tensor
    """
    img = Image.fromarray(img)
    if img.mode == "L":
        # Convert grayscale image to RGB by duplicating the single channel three times
        img = ImageOps.colorize(img, black="black", white="white")

    transforms = T.Compose(
        [T.ToTensor(), T.Resize(244), T.CenterCrop(224), T.Normalize([0.5], [0.5])]
    )

    return transforms(img).unsqueeze(0)


def search_index(input_image: np.ndarray, k: int = 1) -> list:
    """
    Retrieve the Top k images from the given input image

    Args:
        input_image (np.ndarray): Input Image
        k (int): number of images to fetch

    Returns:
        list: List of top k images retrieved using the embeddings
          generated from the input image
    """
    images = []

    with torch.no_grad():
        embedding = model(augment(input_image).to(device))
        index = read_index("./bin/dino.index")
        _, results = index.search(np.array(embedding[0].reshape(1, -1)), k)
        indices = results[0]
        for _, index in enumerate(indices[:k]):
            retrieved_img = dataset["train"][int(index)]["image"]
            images.append(retrieved_img)
    return images


app = gr.Interface(
    search_index,
    inputs=[
        gr.Image(label="Input Image"),
        gr.Slider(minimum=1, maximum=10, value=4, step=1, label="Top K"),
    ],
    outputs=[
        gr.Gallery(label="Retrieved Images"),
    ],
)

if __name__ == "__main__":
    app.launch()