hasibzunair commited on
Commit
3b639bc
1 Parent(s): 548c65d

update app.py

Browse files
Files changed (3) hide show
  1. README.md +3 -1
  2. app.py +94 -23
  3. requirements.txt +2 -1
README.md CHANGED
@@ -12,4 +12,6 @@ license: afl-3.0
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
14
 
15
- Demo app updated.
 
 
 
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
14
 
15
+ ### References
16
+ * https://huggingface.co/docs/hub/spaces#manage-app-with-github-actions
17
+ * https://www.gradio.app/image_classification_in_pytorch/
app.py CHANGED
@@ -1,30 +1,101 @@
1
  import torch
2
- import requests
 
3
  import gradio as gr
 
4
 
5
- from torchvision import transforms
6
 
7
  """
8
- Built following https://www.gradio.app/image_classification_in_pytorch/.
 
 
9
  """
10
 
11
- # Load model
12
- model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()
13
-
14
- # Download human-readable labels for ImageNet.
15
- response = requests.get("https://git.io/JJkYN")
16
- labels = response.text.split("\n")
17
-
18
- def predict(inp):
19
- inp = transforms.ToTensor()(inp).unsqueeze(0)
20
- with torch.no_grad():
21
- prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)
22
- confidences = {labels[i]: float(prediction[i]) for i in range(1000)}
23
- return confidences
24
-
25
- gr.Interface(fn=predict,
26
- inputs=gr.inputs.Image(type="pil"),
27
- outputs=gr.outputs.Label(num_top_classes=3),
28
- examples=["example1.jpg", "example2.jpg"],
29
- theme="default",
30
- css=".footer{display:none !important}").launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import torch
2
+ from PIL import Image
3
+ from torchvision import transforms
4
  import gradio as gr
5
+ import os
6
 
 
7
 
8
  """
9
+ Built following:
10
+ https://huggingface.co/spaces/pytorch/ResNet/tree/main
11
+ https://www.gradio.app/image_classification_in_pytorch/
12
  """
13
 
14
+ os.system("wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt")
15
+
16
+ model = torch.hub.load('pytorch/vision:v0.10.0', 'resnet18', pretrained=True)
17
+ model.eval()
18
+
19
+ # Download an example image from the pytorch website
20
+ torch.hub.download_url_to_file("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
21
+
22
+ def inference(input_image):
23
+ preprocess = transforms.Compose([
24
+ transforms.Resize(256),
25
+ transforms.CenterCrop(224),
26
+ transforms.ToTensor(),
27
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
28
+ ])
29
+ input_tensor = preprocess(input_image)
30
+ input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
31
+
32
+ # move the input and model to GPU for speed if available
33
+ if torch.cuda.is_available():
34
+ input_batch = input_batch.to('cuda')
35
+ model.to('cuda')
36
+
37
+ with torch.no_grad():
38
+ output = model(input_batch)
39
+ # The output has unnormalized scores. To get probabilities, you can run a softmax on it.
40
+ probabilities = torch.nn.functional.softmax(output[0], dim=0)
41
+
42
+ # Read the categories
43
+ with open("imagenet_classes.txt", "r") as f:
44
+ categories = [s.strip() for s in f.readlines()]
45
+ # Show top categories per image
46
+ top5_prob, top5_catid = torch.topk(probabilities, 5)
47
+ result = {}
48
+ for i in range(top5_prob.size(0)):
49
+ result[categories[top5_catid[i]]] = top5_prob[i].item()
50
+ return result
51
+
52
+ inputs = gr.inputs.Image(type='pil')
53
+ outputs = gr.outputs.Label(type="confidences",num_top_classes=5)
54
+
55
+ title = "An Image Classification Demo with ResNet"
56
+ description = "Demo of a ResNet image classifier trained on the ImageNet dataset. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
57
+ article = "<p style='text-align: center'><a href='https://arxiv.org/abs/1512.03385' target='_blank'>Deep Residual Learning for Image Recognition</a> | <a href='https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py' target='_blank'>Github Repo</a></p>"
58
+
59
+ gr.Interface(inference,
60
+ inputs,
61
+ outputs,
62
+ examples=["example1.jpg", "example2.jpg"],
63
+ title=title,
64
+ description=description,
65
+ article=article,
66
+ analytics_enabled=False).launch()
67
+
68
+ # import torch
69
+ # import requests
70
+ # import gradio as gr
71
+
72
+ # from torchvision import transforms
73
+
74
+ # """
75
+ # Built following https://www.gradio.app/image_classification_in_pytorch/.
76
+ # """
77
+
78
+ # # Load model
79
+ # model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()
80
+
81
+ # # Download human-readable labels for ImageNet.
82
+ # response = requests.get("https://git.io/JJkYN")
83
+ # labels = response.text.split("\n")
84
+
85
+ # def predict(inp):
86
+ # inp = transforms.ToTensor()(inp).unsqueeze(0)
87
+ # with torch.no_grad():
88
+ # prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)
89
+ # confidences = {labels[i]: float(prediction[i]) for i in range(1000)}
90
+ # return confidences
91
+
92
+ # title = "Image Classifier"
93
+
94
+ # article = "<p style='text-align: center'><a href='https://arxiv.org/abs/1512.03385' target='_blank'>Deep Residual Learning for Image Recognition</a> | <a href='https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py' target='_blank'>Github Repo</a></p>"
95
+
96
+ # gr.Interface(fn=predict,
97
+ # inputs=gr.inputs.Image(type="pil"),
98
+ # outputs=gr.outputs.Label(num_top_classes=3),
99
+ # examples=["example1.jpg", "example2.jpg"],
100
+ # theme="default",
101
+ # css=".footer{display:none !important}").launch()
requirements.txt CHANGED
@@ -1,2 +1,3 @@
1
  torch
2
- torchvision
 
 
1
  torch
2
+ torchvision
3
+ Pillow