Samuel Diaz commited on
Commit
dd693f2
1 Parent(s): 8153799

Fixed Model

Browse files
Files changed (6) hide show
  1. app.py +20 -72
  2. doggo1.jpg +0 -0
  3. doggo2.jpg +0 -0
  4. doggo3.png +0 -0
  5. doggo4.png +0 -0
  6. model/model.zip +2 -2
app.py CHANGED
@@ -1,6 +1,4 @@
1
  import gradio as gr
2
- import os
3
- import io
4
  import numpy as np
5
  import pandas as pd
6
  import matplotlib.pyplot as plt
@@ -8,7 +6,6 @@ from PIL import Image
8
  from sklearn.preprocessing import LabelEncoder
9
  import torch
10
  import torch.nn.functional as F
11
- import torchvision
12
  from torchvision import transforms
13
  import torchvision.models as models
14
  from torchvision.datasets import ImageFolder
@@ -16,38 +13,11 @@ from torch.utils.data.dataset import Dataset
16
  from torch.utils.data import Dataset, random_split, DataLoader
17
  from torch.utils.data import DataLoader
18
  from sklearn.model_selection import train_test_split
19
- import torchmetrics
20
  from tqdm.notebook import tqdm
21
 
22
- class ImageClassificationBase(torch.nn.Module):
23
- # training step
24
- def training_step(self, batch):
25
- img, targets = batch
26
- out = self(img)
27
- loss = F.nll_loss(out, targets)
28
- return loss
29
-
30
- # validation step
31
- def validation_step(self, batch):
32
- img, targets = batch
33
- out = self(img)
34
- loss = F.nll_loss(out, targets)
35
- acc = accuracy(out, targets)
36
- return {'val_acc':acc.detach(), 'val_loss':loss.detach()}
37
-
38
- # validation epoch end
39
- def validation_epoch_end(self, outputs):
40
- batch_losses = [x['val_loss'] for x in outputs]
41
- epoch_loss = torch.stack(batch_losses).mean()
42
- batch_accs = [x['val_acc'] for x in outputs]
43
- epoch_acc = torch.stack(batch_accs).mean()
44
- return {'val_loss':epoch_loss.item(), 'val_acc':epoch_acc.item()}
45
-
46
- # print result end epoch
47
- def epoch_end(self, epoch, result):
48
- print("Epoch [{}] : train_loss: {:.4f}, val_loss: {:.4f}, val_acc: {:.4f}".format(epoch, result["train_loss"], result["val_loss"], result["val_acc"]))
49
 
50
- class DogBreedPretrainedWideResnet(ImageClassificationBase):
51
  def __init__(self):
52
  super().__init__()
53
 
@@ -62,60 +32,38 @@ class DogBreedPretrainedWideResnet(ImageClassificationBase):
62
  def forward(self, xb):
63
  return self.network(xb)
64
 
65
- def predict_single(img):
66
- xb = transform_image(img) # adding extra dimension
67
- xb = to_device(img, device)
68
- preds = model(xb) # change model object here
69
- predictions = preds[0]
70
-
71
- max_val, kls = torch.max(predictions, dim=0)
72
- print('Predicted :', breeds[kls])
73
- plt.imshow(img.permute(1,2,0))
74
- plt.show()
75
-
76
  def get_default_device():
77
  if torch.cuda.is_available():
78
  return torch.device('cuda')
79
  else:
80
  return torch.device('cpu')
81
-
82
- def to_device(data, device):
83
- if isinstance(data, (list, tuple)):
84
- return [to_device(d, device) for d in data]
85
- else:
86
- return data.to(device, non_blocking=True)
87
-
88
- def accuracy(outputs, labels):
89
- _, preds = torch.max(outputs, dim=1)
90
- return torch.tensor(torch.sum(preds == labels).item() / len(preds))
91
 
 
 
 
 
 
 
 
 
 
 
 
 
92
  def image_mod(image):
93
- im_resize = image.resize((500, 500))
94
- buf = io.BytesIO()
95
- im_resize.save(buf, format='JPEG')
96
- byte_im = buf.getvalue()
97
- return predict_single(byte_im)
98
 
99
  def transform_image(image_bytes):
100
- my_transforms = transforms.Compose([transforms.Resize(255),
101
- transforms.CenterCrop(224),
102
  transforms.ToTensor(),
103
  transforms.Normalize(
104
  [0.485, 0.456, 0.406],
105
  [0.229, 0.224, 0.225])])
106
- image = Image.open(io.BytesIO(image_bytes))
107
- return my_transforms(image).unsqueeze(0)
108
-
109
- device = get_default_device()
110
- PATH = "./model/model.zip"
111
- map_location=torch.device('cpu')
112
- test_transform = transforms.Compose([
113
- transforms.Resize((224,224)),
114
- transforms.ToTensor(),
115
- # transforms.Normalize(*imagenet_stats, inplace=True)
116
- ])
117
  model = DogBreedPretrainedWideResnet()
118
  model.load_state_dict(torch.load(PATH,map_location))
 
119
  breeds=['Chihuahua',
120
  'Japanese spaniel',
121
  'Maltese dog',
@@ -236,6 +184,6 @@ breeds=['Chihuahua',
236
  'dingo',
237
  'dhole',
238
  'African hunting dog']
239
- iface = gr.Interface(image_mod, gr.Image(type="pil"), "image")
240
 
241
  iface.launch()
 
1
  import gradio as gr
 
 
2
  import numpy as np
3
  import pandas as pd
4
  import matplotlib.pyplot as plt
 
6
  from sklearn.preprocessing import LabelEncoder
7
  import torch
8
  import torch.nn.functional as F
 
9
  from torchvision import transforms
10
  import torchvision.models as models
11
  from torchvision.datasets import ImageFolder
 
13
  from torch.utils.data import Dataset, random_split, DataLoader
14
  from torch.utils.data import DataLoader
15
  from sklearn.model_selection import train_test_split
 
16
  from tqdm.notebook import tqdm
17
 
18
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
+ class DogBreedPretrainedWideResnet(torch.nn.Module):
21
  def __init__(self):
22
  super().__init__()
23
 
 
32
  def forward(self, xb):
33
  return self.network(xb)
34
 
 
 
 
 
 
 
 
 
 
 
 
35
  def get_default_device():
36
  if torch.cuda.is_available():
37
  return torch.device('cuda')
38
  else:
39
  return torch.device('cpu')
 
 
 
 
 
 
 
 
 
 
40
 
41
+
42
+ device = get_default_device()
43
+ PATH = "./model/model.zip"
44
+ map_location=torch.device('cpu')
45
+ def predict_single(img):
46
+ xb = transform_image(img) # Transforming image to Tensor
47
+ xb = xb.to(device)
48
+ preds = model(xb) # change model object here
49
+ max_val, kls = torch.max(preds, 1)
50
+ print('Predicted :', breeds[kls])
51
+ return breeds[kls]
52
+
53
  def image_mod(image):
54
+ return predict_single(image)
 
 
 
 
55
 
56
  def transform_image(image_bytes):
57
+ my_transforms = transforms.Compose([transforms.Resize((500)),
 
58
  transforms.ToTensor(),
59
  transforms.Normalize(
60
  [0.485, 0.456, 0.406],
61
  [0.229, 0.224, 0.225])])
62
+ return my_transforms(image_bytes).unsqueeze(0)
63
+
 
 
 
 
 
 
 
 
 
64
  model = DogBreedPretrainedWideResnet()
65
  model.load_state_dict(torch.load(PATH,map_location))
66
+ model.eval()
67
  breeds=['Chihuahua',
68
  'Japanese spaniel',
69
  'Maltese dog',
 
184
  'dingo',
185
  'dhole',
186
  'African hunting dog']
187
+ iface = gr.Interface(image_mod, gr.Image(type="pil"), "text", examples=["doggo1.jpg","doggo2.jpg","doggo3.png","doggo4.png"])
188
 
189
  iface.launch()
doggo1.jpg ADDED
doggo2.jpg ADDED
doggo3.png ADDED
doggo4.png ADDED
model/model.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:25145664111db820344b774c5f1b693e8daeaf119dafc5a55d2890c83a438126
3
- size 268701569
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7b67545692a07c4d4e255343f7fa5ff6a327c56ac4bcca38efc2867c8eb8a38
3
+ size 268684285