akhaliq HF staff commited on
Commit
f903f2f
1 Parent(s): 9758664

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +58 -0
app.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ model = torch.hub.load('pytorch/vision:v0.9.0', 'fcn_resnet101', pretrained=True)
3
+ model.eval()
4
+ # Download an example image from the pytorch website
5
+ import urllib
6
+ url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
7
+ try: urllib.URLopener().retrieve(url, filename)
8
+ except: urllib.request.urlretrieve(url, filename)
9
+ # sample execution (requires torchvision)
10
+ from PIL import Image
11
+ from torchvision import transforms
12
+ import gradio as gr
13
+ import matplotlib.pyplot as plt
14
+
15
+
16
+ def inference(input_image):
17
+ preprocess = transforms.Compose([
18
+ transforms.ToTensor(),
19
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
20
+ ])
21
+
22
+ input_tensor = preprocess(input_image)
23
+ input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
24
+
25
+ # move the input and model to GPU for speed if available
26
+ if torch.cuda.is_available():
27
+ input_batch = input_batch.to('cuda')
28
+ model.to('cuda')
29
+
30
+ with torch.no_grad():
31
+ output = model(input_batch)['out'][0]
32
+ output_predictions = output.argmax(0)
33
+ # create a color pallette, selecting a color for each class
34
+ palette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])
35
+ colors = torch.as_tensor([i for i in range(21)])[:, None] * palette
36
+ colors = (colors % 255).numpy().astype("uint8")
37
+
38
+ # plot the semantic segmentation predictions of 21 classes in each color
39
+ r = Image.fromarray(output_predictions.byte().cpu().numpy()).resize(input_image.size)
40
+ r.putpalette(colors)
41
+ plt.imshow(r)
42
+ return plt
43
+
44
+
45
+ title = "FCN-RESNET101"
46
+ description = "Gradio demo for FCN-RESNET101, Fully-Convolutional Network model with a ResNet-101 backbone. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
47
+ article = "<p style='text-align: center'><a href='https://arxiv.org/abs/1605.06211'>Fully Convolutional Networks for Semantic Segmentation</a> | <a href='https://github.com/pytorch/vision/blob/master/torchvision/models/segmentation/fcn.py'>Github Repo</a></p>"
48
+
49
+ gr.Interface(
50
+ inference,
51
+ gr.inputs.Image(type="pil", label="Input"),
52
+ gr.outputs.Image(type="plot", label="Output"),
53
+ title=title,
54
+ description=description,
55
+ article=article,
56
+ examples=[
57
+ ["dog.jpg"]
58
+ ]).launch()