mattritchey akhaliq HF staff commited on
Commit
9580bb0
0 Parent(s):

Duplicate from pytorch/vgg-nets

Browse files

Co-authored-by: AK <akhaliq@users.noreply.huggingface.co>

Files changed (4) hide show
  1. .gitattributes +27 -0
  2. README.md +12 -0
  3. app.py +62 -0
  4. requirements.txt +3 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Vgg Nets
3
+ emoji: 👀
4
+ colorFrom: pink
5
+ colorTo: indigo
6
+ sdk: gradio
7
+ app_file: app.py
8
+ pinned: false
9
+ duplicated_from: pytorch/vgg-nets
10
+ ---
11
+
12
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
app.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from PIL import Image
3
+ from torchvision import transforms
4
+ import gradio as gr
5
+ import os
6
+
7
+ os.system("wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt")
8
+
9
+ torch.hub.download_url_to_file("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
10
+
11
+ model = torch.hub.load('pytorch/vision:v0.9.0', 'vgg11', pretrained=True)
12
+ # or any of these variants
13
+ # model = torch.hub.load('pytorch/vision:v0.9.0', 'vgg11_bn', pretrained=True)
14
+ # model = torch.hub.load('pytorch/vision:v0.9.0', 'vgg13', pretrained=True)
15
+ # model = torch.hub.load('pytorch/vision:v0.9.0', 'vgg13_bn', pretrained=True)
16
+ # model = torch.hub.load('pytorch/vision:v0.9.0', 'vgg16', pretrained=True)
17
+ # model = torch.hub.load('pytorch/vision:v0.9.0', 'vgg16_bn', pretrained=True)
18
+ # model = torch.hub.load('pytorch/vision:v0.9.0', 'vgg19', pretrained=True)
19
+ # model = torch.hub.load('pytorch/vision:v0.9.0', 'vgg19_bn', pretrained=True)
20
+ model.eval()
21
+ def inference(input_image):
22
+
23
+ preprocess = transforms.Compose([
24
+ transforms.Resize(256),
25
+ transforms.CenterCrop(224),
26
+ transforms.ToTensor(),
27
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
28
+ ])
29
+ input_tensor = preprocess(input_image)
30
+ input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
31
+
32
+ # move the input and model to GPU for speed if available
33
+ if torch.cuda.is_available():
34
+ input_batch = input_batch.to('cuda')
35
+ model.to('cuda')
36
+
37
+ with torch.no_grad():
38
+ output = model(input_batch)
39
+ # The output has unnormalized scores. To get probabilities, you can run a softmax on it.
40
+ probabilities = torch.nn.functional.softmax(output[0], dim=0)
41
+
42
+ # Read the categories
43
+ with open("imagenet_classes.txt", "r") as f:
44
+ categories = [s.strip() for s in f.readlines()]
45
+ # Show top categories per image
46
+ top5_prob, top5_catid = torch.topk(probabilities, 5)
47
+ result = {}
48
+ for i in range(top5_prob.size(0)):
49
+ result[categories[top5_catid[i]]] = top5_prob[i].item()
50
+ return result
51
+
52
+ inputs = gr.inputs.Image(type='pil')
53
+ outputs = gr.outputs.Label(type="confidences",num_top_classes=5)
54
+
55
+ title = "VGG-NETS"
56
+ description = "Gradio demo for VGG-NETS, Award winning ConvNets from 2014 Imagenet ILSVRC challenge. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
57
+ article = "<p style='text-align: center'><a href='https://arxiv.org/abs/1409.1556'>Very Deep Convolutional Networks for Large-Scale Image Recognition</a> | <a href='https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py'>Github Repo</a></p>"
58
+
59
+ examples = [
60
+ ['dog.jpg']
61
+ ]
62
+ gr.Interface(inference, inputs, outputs, title=title, description=description, article=article, examples=examples, analytics_enabled=False).launch()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ torch
2
+ torchvision
3
+ Pillow