Muhammad Nouman Khan commited on
Commit
62bcb6f
1 Parent(s): 82bd341

Uploading Model, Config

Browse files
Files changed (5) hide show
  1. alexnet_model_v1.pth +3 -0
  2. app.py +28 -0
  3. car.jpeg +0 -0
  4. frog.jpeg +0 -0
  5. model.py +42 -0
alexnet_model_v1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5b4d32609c7c235550d9db88bbc70f9be1f9e96cbbce85e7d8ce93502636bf3
3
+ size 228185434
app.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import gradio as gr
3
+ from model import AlexNet
4
+ from torchvision import transforms
5
+
6
+ model_path = './alexnet_model_v1.pth'
7
+ model = AlexNet()
8
+ model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))
9
+ model.eval()
10
+
11
+ labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
12
+
13
+
14
+ def predict(inp):
15
+ inp = transforms.ToTensor()(inp).unsqueeze(0)
16
+ with torch.no_grad():
17
+ prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)
18
+ confidences = {labels[i]: float(prediction[i]) for i in range(10)}
19
+ return confidences
20
+
21
+
22
+
23
+ gr.Interface(fn=predict,
24
+ inputs=gr.components.Image(type="pil"),
25
+ outputs=gr.components.Label(num_top_classes=5),
26
+ examples=["frog.jpeg", "car.jpeg"],
27
+ theme="default",
28
+ css=".footer{display:none !important}").launch()
car.jpeg ADDED
frog.jpeg ADDED
model.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.optim as optim
4
+ import torchvision.transforms as transforms
5
+ from torchvision.datasets import CIFAR10
6
+ from torch.utils.data import DataLoader
7
+
8
+ class AlexNet(nn.Module):
9
+ def __init__(self, num_classes=10):
10
+ super(AlexNet, self).__init__()
11
+ self.features = nn.Sequential(
12
+ nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
13
+ nn.ReLU(inplace=True),
14
+ nn.MaxPool2d(kernel_size=3, stride=2),
15
+ nn.Conv2d(64, 192, kernel_size=5, padding=2),
16
+ nn.ReLU(inplace=True),
17
+ nn.MaxPool2d(kernel_size=3, stride=2),
18
+ nn.Conv2d(192, 384, kernel_size=3, padding=1),
19
+ nn.ReLU(inplace=True),
20
+ nn.Conv2d(384, 256, kernel_size=3, padding=1),
21
+ nn.ReLU(inplace=True),
22
+ nn.Conv2d(256, 256, kernel_size=3, padding=1),
23
+ nn.ReLU(inplace=True),
24
+ nn.MaxPool2d(kernel_size=3, stride=2),
25
+ )
26
+ self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
27
+ self.classifier = nn.Sequential(
28
+ nn.Dropout(),
29
+ nn.Linear(256 * 6 * 6, 4096),
30
+ nn.ReLU(inplace=True),
31
+ nn.Dropout(),
32
+ nn.Linear(4096, 4096),
33
+ nn.ReLU(inplace=True),
34
+ nn.Linear(4096, num_classes),
35
+ )
36
+
37
+ def forward(self, x):
38
+ x = self.features(x)
39
+ x = self.avgpool(x)
40
+ x = torch.flatten(x, 1)
41
+ x = self.classifier(x)
42
+ return x