#!/usr/bin/env python import torch import numpy as np import gradio as gr from faiss import read_index from PIL import Image, ImageOps from datasets import load_dataset import torchvision.transforms as T from model import DINO device = torch.device("cuda" if torch.cuda.is_available() else "cpu") ## Define Model and Dataset dataset = load_dataset("ethz/food101") model = DINO(batch_size_per_device=32, num_classes=1000).to(device) model.load_state_dict(torch.load("./bin/model.ckpt", map_location=device)["state_dict"]) def augment(img: np.ndarray) -> torch.Tensor: """ Helper Function to augment the image before we generate embeddings Args: img (np.ndarray): Input Image Returns: torch.Tensor """ img = Image.fromarray(img) if img.mode == "L": # Convert grayscale image to RGB by duplicating the single channel three times img = ImageOps.colorize(img, black="black", white="white") transforms = T.Compose( [T.ToTensor(), T.Resize(244), T.CenterCrop(224), T.Normalize([0.5], [0.5])] ) return transforms(img).unsqueeze(0) def search_index(input_image: np.ndarray, k: int = 1) -> list: """ Retrieve the Top k images from the given input image Args: input_image (np.ndarray): Input Image k (int): number of images to fetch Returns: list: List of top k images retrieved using the embeddings generated from the input image """ images = [] with torch.no_grad(): embedding = model(augment(input_image).to(device)) index = read_index("./bin/dino.index") _, results = index.search(np.array(embedding[0].reshape(1, -1)), k) indices = results[0] for _, index in enumerate(indices[:k]): retrieved_img = dataset["train"][int(index)]["image"] images.append(retrieved_img) return images app = gr.Interface( search_index, inputs=[ gr.Image(label="Input Image"), gr.Slider(minimum=1, maximum=10, value=3, step=1, label="Top K"), ], outputs=[ gr.Gallery(label="Retrieved Images"), ], ) if __name__ == "__main__": app.launch()