import os import torch from PIL import Image from torchvision import transforms import gradio as gr #https://huggingface.co/spaces/yuhe6/final_project/blob/main/Net_Rotate9.pth #os.system("wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt") #model = torch.hub.load('huawei-noah/ghostnet', 'ghostnet_1x', pretrained=True) #model = torch.jit.load('https://huggingface.co/spaces/yuhe6/final_project/blob/main/Net_Rotate9.pth').eval().to(device) model = torch.jit.load('Net2_Flip_jit.pt', map_location = torch.device('cpu')) model.eval() model_categories = ["cat","dog"] # verify order n_categories = len(model_categories) #torch.hub.download_url_to_file('https://huggingface.co/spaces/yuhe6/final_project/blob/main/Net_Rotate9.pth', '/tmp/temporary_file') #model = torch.hub.load('/tmp', 'temporary_file', pretrained=True) #model.eval() # Download an example image from the pytorch website torch.hub.download_url_to_file("https://upload.wikimedia.org/wikipedia/commons/5/5b/Dog_%28Canis_lupus_familiaris%29_%281%29.jpg", "dog1.jpg") torch.hub.download_url_to_file("https://upload.wikimedia.org/wikipedia/commons/thumb/6/6e/Golde33443.jpg/640px-Golde33443.jpg", "dog2.jpg") torch.hub.download_url_to_file("https://upload.wikimedia.org/wikipedia/commons/c/c7/Tabby_cat_with_blue_eyes-3336579.jpg", "cat1.jpg") torch.hub.download_url_to_file("https://upload.wikimedia.org/wikipedia/commons/9/9e/Domestic_cat.jpg", "cat2.jpg") def inference(input_image): preprocess = transforms.Compose([ transforms.Resize(size = (256, 256)), # Fixed resize from transforms.Resize(256) #transforms.CenterCrop(224), transforms.ToTensor(), #transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) # Used print statements to detect shapes between input tensor & batch # e.g. input_tensor.shape input_tensor = preprocess(input_image) input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model # move the input and model to GPU for speed if available if torch.cuda.is_available(): input_batch = input_batch.to('cuda') model.to('cuda') with torch.no_grad(): output = model(input_batch) # model(input_tensor) # needed to have batch dimension # The output has unnormalized scores. To get probabilities, you can run a softmax on it. probabilities = torch.nn.functional.softmax(output[0]) # Read the categories #with open("dog_cat.txt", "r") as f: #categories = [s.strip() for s in f.readlines()] #with open("dog_cat.txt", "r") as f: #categories = [s.strip() for s in f.readlines()] # Show top categories per image top1_prob, top1_catid = torch.topk(probabilities, n_categories) result = {} for i in range(top1_prob.size(0)): result[model_categories[top1_catid[i]]] = top1_prob[i].item() return result inputs = gr.inputs.Image(type='pil') outputs = gr.outputs.Label(type="confidences", num_top_classes = n_categories) title = "STAT 430 Final Project App -- Made by Group DHZ" description = "This is our Cat & Dog Classifier for the final project, and the model we use is generated by our second neural network augmented by the flipping technique, which is would give the best accuracy. To use it, simply upload your image, or click one of the examples to load them. The authors are Xiongjie Dai (xdai12), Yu He (yuhe6), Mengjia Zeng (mengjia6)." #article = "

GhostNet: More Features from Cheap Operations | Github Repo

" examples = [ ['dog1.jpg'], ['cat1.jpg'], ['dog2.jpg'], ['cat2.jpg'] ] gr.Interface( inference, inputs, outputs, title = title, description = description, examples = examples, analytics_enabled = False).launch( #debug = True # Enabled debug mode to see the stacktrace on Google Colab. )