akhaliq HF staff commited on
Commit
45d6271
1 Parent(s): 78f5c86

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -0
app.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import gradio as gr
3
+ import torchvision.transforms as transforms
4
+
5
+ device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
6
+
7
+ resneXt = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_resneXt')
8
+ utils = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_convnets_processing_utils')
9
+
10
+ resneXt.eval().to(device)
11
+
12
+ def inference(img):
13
+ img_transforms = transforms.Compose(
14
+ [transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor()]
15
+ )
16
+ img = img_transforms(img)
17
+ with torch.no_grad():
18
+ # mean and std are not multiplied by 255 as they are in training script
19
+ # torch dataloader reads data into bytes whereas loading directly
20
+ # through PIL creates a tensor with floats in [0,1] range
21
+ mean = torch.tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1)
22
+ std = torch.tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1)
23
+ img = img.float()
24
+ img = img.unsqueeze(0).sub_(mean).div_(std)
25
+
26
+ batch = torch.cat(
27
+ [img]
28
+ ).to(device)
29
+
30
+ with torch.no_grad():
31
+ output = torch.nn.functional.softmax(resneXt(batch), dim=1)
32
+
33
+ results = utils.pick_n_best(predictions=output, n=5)
34
+
35
+ return results
36
+
37
+ title="ResNeXt101"
38
+ description="Gradio demo for ResNeXt101, ResNet with bottleneck 3x3 Convolutions substituted by 3x3 Grouped Convolutions, trained with mixed precision using Tensor Cores. To use it, simply upload your image or click on one of the examples below. Read more at the links below"
39
+
40
+ article = "<p style='text-align: center'><a href='https://arxiv.org/abs/1611.05431'>Aggregated Residual Transformations for Deep Neural Networks</a> | <a href='https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Classification/ConvNets/resnext101-32x4d'>Github Repo</a></p>"
41
+
42
+ examples=[['food.jpeg']]
43
+ gr.Interface(inference,gr.inputs.Image(type="pil"),"text",title=title,description=description,article=article,examples=examples).launch(enable_queue=True)