import torch import numpy as np import gradio as gr from faiss import read_index from PIL import Image, ImageOps from datasets import load_dataset import torchvision.transforms as T from torchvision.models import resnet50 from model import DINO transforms = T.Compose( [T.ToTensor(), T.Resize(244), T.CenterCrop(224), T.Normalize([0.5], [0.5])] ) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") datset = load_dataset("ethz/food101") model = DINO(batch_size_per_device=32, num_classes=1000).to(device) model.load_state_dict(torch.load("./bin/model.ckpt", map_location=device)["state_dict"]) def augment(img, transforms=transforms) -> torch.Tensor: img = Image.fromarray(img) if img.mode == "L": # Convert grayscale image to RGB by duplicating the single channel three times img = ImageOps.colorize(img, black="black", white="white") return transforms(img).unsqueeze(0) def search_index(input_image, k: int): with torch.no_grad(): embedding = model(augment(input_image)) index = read_index("./bin/dino.index") _, I = index.search(np.array(embedding[0].reshape(1, -1)), k) indices = I[0] answer = "" for i, index in enumerate(indices[:3]): answer += index # retrieved_img = dataset["train"][int(index)]["image"] return answer app = gr.Interface( search_index, inputs=[gr.Image(), gr.Slider(value=3, minimum=1, step=1)], outputs="text", ) if __name__ == "__main__": app.launch()