import torch, torchvision from torchvision import transforms import numpy as np import gradio as gr from PIL import Image import os import lightning as L import torchmetrics from pytorch_grad_cam import GradCAM from pytorch_grad_cam.utils.image import show_cam_on_image from model import LightningModel pytorch_model = torch.hub.load('pytorch/vision', 'resnet18', weights=None) pytorch_model.fc = torch.nn.Linear(512, 10) model_pth = './epoch=22-step=16169.ckpt' lightning_model = LightningModel.load_from_checkpoint(checkpoint_path=model_pth, model=pytorch_model, map_location=torch.device("cpu")) inv_normalize = transforms.Normalize( mean=[-0.50/0.23, -0.50/0.23, -0.50/0.23], std=[1/0.23, 1/0.23, 1/0.23] ) classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') def inference(input_img, transparency = 0.5, target_layer_number = -1): transform = transforms.ToTensor() org_img = input_img input_img = transform(input_img) input_img = input_img input_img = input_img.unsqueeze(0) lightning_model.eval() with torch.no_grad(): outputs = lightning_model(input_img) print(f'outputs, {outputs.shape}') softmax = torch.nn.Softmax(dim=0) print() o = softmax(outputs.flatten()) confidences = {classes[i]: float(o[i]) for i in range(10)} _, prediction = torch.max(outputs, 1) target_layers = [pytorch_model.layer2[target_layer_number]] cam = GradCAM(model=lightning_model, target_layers=target_layers) grayscale_cam = cam(input_tensor=input_img, targets=None) grayscale_cam = grayscale_cam[0, :] img = input_img.squeeze(0) img = inv_normalize(img) rgb_img = np.transpose(img, (1, 2, 0)) rgb_img = rgb_img.numpy() visualization = show_cam_on_image(org_img/255, grayscale_cam, use_rgb=True, image_weight=transparency) print(confidences) return confidences, visualization title = "CIFAR10 trained on ResNet18 Model with GradCAM" description = "A simple Gradio interface to infer on ResNet model, and get GradCAM results" example1 = './cat.jpg' example2 = './dog.jpg' examples = [[example1, 0.5, -1], [example2, 0.5, -1]] gradio_app = gr.Interface( inference, inputs = [gr.Image(width=32, height=32, label="Input Image"), gr.Slider(0, 1, value = 0.5, label="Opacity of GradCAM"), gr.Slider(-2, -1, value = -2, step=1, label="Which Layer?")], outputs = [gr.Label(num_top_classes=3), gr.Image(width=32, height=32, label="Output")], title = title, description = description, examples = examples, ) gradio_app.launch()