Samuel Diaz commited on
Commit
7b29154
1 Parent(s): a387845

Fixed model

Browse files
Files changed (2) hide show
  1. app.py +18 -17
  2. model/model.zip +2 -2
app.py CHANGED
@@ -7,6 +7,7 @@ from sklearn.preprocessing import LabelEncoder
7
  import torch
8
  import torch.nn.functional as F
9
  from torchvision import transforms
 
10
  import torchvision.models as models
11
  from torchvision.datasets import ImageFolder
12
  from torch.utils.data.dataset import Dataset
@@ -16,21 +17,17 @@ from sklearn.model_selection import train_test_split
16
  from tqdm.notebook import tqdm
17
 
18
 
19
-
20
- class DogBreedPretrainedWideResnet(torch.nn.Module):
21
- def __init__(self):
22
- super().__init__()
23
-
24
- self.network = models.wide_resnet50_2(pretrained=True)
25
- # Replace last layer
26
- num_ftrs = self.network.fc.in_features
27
- self.network.fc = torch.nn.Sequential(
28
- torch.nn.Linear(num_ftrs, 120),
29
- torch.nn.LogSoftmax(dim=1)
30
- )
31
-
32
- def forward(self, xb):
33
- return self.network(xb)
34
 
35
  def get_default_device():
36
  if torch.cuda.is_available():
@@ -61,7 +58,11 @@ def transform_image(image_bytes):
61
  [0.229, 0.224, 0.225])])
62
  return my_transforms(image_bytes).unsqueeze(0)
63
 
64
- model = DogBreedPretrainedWideResnet()
 
 
 
 
65
  model.load_state_dict(torch.load(PATH,map_location))
66
  model.eval()
67
  breeds=['Chihuahua',
@@ -184,6 +185,6 @@ breeds=['Chihuahua',
184
  'dingo',
185
  'dhole',
186
  'African hunting dog']
187
- iface = gr.Interface(image_mod, gr.Image(type="pil"), "text", examples=["doggo1.jpg","doggo2.jpg","doggo3.png","doggo4.png"])
188
 
189
  iface.launch()
 
7
  import torch
8
  import torch.nn.functional as F
9
  from torchvision import transforms
10
+ import torchvision
11
  import torchvision.models as models
12
  from torchvision.datasets import ImageFolder
13
  from torch.utils.data.dataset import Dataset
 
17
  from tqdm.notebook import tqdm
18
 
19
 
20
+ class net50(torch.nn.Module):
21
+ def __init__(self, base_model, base_out_features, num_classes):
22
+ super(net50,self).__init__()
23
+ self.base_model=base_model
24
+ self.linear1 = torch.nn.Linear(base_out_features, 512)
25
+ self.output = torch.nn.Linear(512,num_classes)
26
+ def forward(self,x):
27
+ x = F.relu(self.base_model(x))
28
+ x = F.relu(self.linear1(x))
29
+ x = self.output(x)
30
+ return x
 
 
 
 
31
 
32
  def get_default_device():
33
  if torch.cuda.is_available():
 
58
  [0.229, 0.224, 0.225])])
59
  return my_transforms(image_bytes).unsqueeze(0)
60
 
61
+ res = torchvision.models.resnet50(pretrained=True)
62
+ for param in res.parameters(): ## Freezing layers
63
+ param.requires_grad=False
64
+
65
+ model = net50(base_model=res, base_out_features=res.fc.out_features, num_classes=120)
66
  model.load_state_dict(torch.load(PATH,map_location))
67
  model.eval()
68
  breeds=['Chihuahua',
 
185
  'dingo',
186
  'dhole',
187
  'African hunting dog']
188
+ iface = gr.Interface(image_mod, gr.Image(type="pil"), "text", examples=["doggo1.png","doggo2.jpg","doggo3.png","doggo4.png"])
189
 
190
  iface.launch()
model/model.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c7b67545692a07c4d4e255343f7fa5ff6a327c56ac4bcca38efc2867c8eb8a38
3
- size 268684285
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ac024f745a459387e271e7be9ceba0403f19b5caa78138926350b667caad5ff
3
+ size 104830125