akhaliq HF staff commited on
Commit
40a4180
1 Parent(s): aa66810

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +56 -0
app.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import os
3
+ from PIL import Image
4
+ from torchvision import transforms
5
+ import gradio as gr
6
+
7
+ os.system("wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt")
8
+
9
+
10
+ model = torch.hub.load('pytorch/vision:v0.9.0', 'inception_v3', pretrained=True)
11
+ model.eval()
12
+
13
+ torch.hub.download_url_to_file("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
14
+
15
+
16
+ # sample execution (requires torchvision)
17
+ def inference(input_image):
18
+ preprocess = transforms.Compose([
19
+ transforms.Resize(299),
20
+ transforms.CenterCrop(299),
21
+ transforms.ToTensor(),
22
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
23
+ ])
24
+ input_tensor = preprocess(input_image)
25
+ input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
26
+
27
+ # move the input and model to GPU for speed if available
28
+ if torch.cuda.is_available():
29
+ input_batch = input_batch.to('cuda')
30
+ model.to('cuda')
31
+
32
+ with torch.no_grad():
33
+ output = model(input_batch)
34
+ # The output has unnormalized scores. To get probabilities, you can run a softmax on it.
35
+ probabilities = torch.nn.functional.softmax(output[0], dim=0)
36
+ # Read the categories
37
+ with open("imagenet_classes.txt", "r") as f:
38
+ categories = [s.strip() for s in f.readlines()]
39
+ # Show top categories per image
40
+ top5_prob, top5_catid = torch.topk(probabilities, 5)
41
+ result = {}
42
+ for i in range(top5_prob.size(0)):
43
+ result[categories[top5_catid[i]]] = top5_prob[i].item()
44
+ return result
45
+
46
+ inputs = gr.inputs.Image(type='pil')
47
+ outputs = gr.outputs.Label(type="confidences",num_top_classes=5)
48
+
49
+ title = "INCEPTION V3"
50
+ description = "Gradio demo for INCEPTION V3, a famous ConvNet trained on Imagenet from 2015. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
51
+ article = "<p style='text-align: center'><a href='https://arxiv.org/abs/1512.00567'>Rethinking the Inception Architecture for Computer Vision</a> | <a href='https://github.com/pytorch/vision/blob/master/torchvision/models/inception.py'>Github Repo</a></p>"
52
+
53
+ examples = [
54
+ ['dog.jpg']
55
+ ]
56
+ gr.Interface(inference, inputs, outputs, title=title, description=description, article=article, examples=examples, analytics_enabled=False).launch()