File size: 4,646 Bytes
567099f
0395eb2
 
 
 
 
 
 
 
7b29154
0395eb2
 
 
 
 
 
 
567099f
dd693f2
7b29154
 
 
 
 
 
 
 
 
 
 
0395eb2
 
 
 
 
 
 
dd693f2
 
 
 
 
 
 
 
 
 
 
 
f00614d
dd693f2
598ab76
 
dd693f2
598ab76
 
 
 
dd693f2
 
7b29154
 
 
 
 
12bcf1a
dd693f2
0395eb2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7b29154
567099f
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
import gradio as gr
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from PIL import Image
from sklearn.preprocessing import LabelEncoder
import torch
import torch.nn.functional as F
from torchvision import transforms
import torchvision
import torchvision.models as models
from torchvision.datasets import ImageFolder
from torch.utils.data.dataset import Dataset
from torch.utils.data import Dataset, random_split, DataLoader
from torch.utils.data import DataLoader
from sklearn.model_selection import train_test_split
from tqdm.notebook import tqdm


class net50(torch.nn.Module):
    def __init__(self, base_model, base_out_features, num_classes):
        super(net50,self).__init__()
        self.base_model=base_model
        self.linear1 = torch.nn.Linear(base_out_features, 512)
        self.output = torch.nn.Linear(512,num_classes)
    def forward(self,x):
        x = F.relu(self.base_model(x))
        x = F.relu(self.linear1(x))
        x = self.output(x)
        return x

def get_default_device():
    if torch.cuda.is_available():
        return torch.device('cuda')
    else:
        return torch.device('cpu')
        
            
device = get_default_device()
PATH = "./model/model.zip"
map_location=torch.device('cpu')
def predict_single(img):
    xb = transform_image(img)           # Transforming image to Tensor
    xb = xb.to(device)
    preds = model(xb)                   # change model object here
    max_val, kls = torch.max(preds, 1)
    print('Predicted :', breeds[kls])
    return breeds[kls]

def image_mod(image):
    return predict_single(image)
    
def transform_image(image_bytes):
    my_transforms = transforms.Compose([transforms.Resize((500)),
                                        transforms.ToTensor(),
                                        transforms.Normalize(
                                            [0.485, 0.456, 0.406],
                                            [0.229, 0.224, 0.225])])
    return my_transforms(image_bytes).unsqueeze(0)

res = torchvision.models.resnet50(pretrained=True)
for param in res.parameters():    ## Freezing layers
    param.requires_grad=False

model = net50(base_model=res, base_out_features=res.fc.out_features, num_classes=120)
model.load_state_dict(torch.load(PATH,map_location))
model.eval()
breeds=['Chihuahua',
 'Japanese spaniel',
 'Maltese dog',
 'Pekinese',
 'Shih Tzu',
 'Blenheim spaniel',
 'papillon',
 'toy terrier',
 'Rhodesian ridgeback',
 'Afghan hound',
 'basset',
 'beagle',
 'bloodhound',
 'bluetick',
 'black and tan coonhound',
 'Walker hound',
 'English foxhound',
 'redbone',
 'borzoi',
 'Irish wolfhound',
 'Italian greyhound',
 'whippet',
 'Ibizan hound',
 'Norwegian elkhound',
 'otterhound',
 'Saluki',
 'Scottish deerhound',
 'Weimaraner',
 'Staffordshire bullterrier',
 'American Staffordshire terrier',
 'Bedlington terrier',
 'Border terrier',
 'Kerry blue terrier',
 'Irish terrier',
 'Norfolk terrier',
 'Norwich terrier',
 'Yorkshire terrier',
 'wire haired fox terrier',
 'Lakeland terrier',
 'Sealyham terrier',
 'Airedale',
 'cairn',
 'Australian terrier',
 'Dandie Dinmont',
 'Boston bull',
 'miniature schnauzer',
 'giant schnauzer',
 'standard schnauzer',
 'Scotch terrier',
 'Tibetan terrier',
 'silky terrier',
 'soft coated wheaten terrier',
 'West Highland white terrier',
 'Lhasa',
 'flat coated retriever',
 'curly coated retriever',
 'golden retriever',
 'Labrador retriever',
 'Chesapeake Bay retriever',
 'German short haired pointer',
 'vizsla',
 'English setter',
 'Irish setter',
 'Gordon setter',
 'Brittany spaniel',
 'clumber',
 'English springer',
 'Welsh springer spaniel',
 'cocker spaniel',
 'Sussex spaniel',
 'Irish water spaniel',
 'kuvasz',
 'schipperke',
 'groenendael',
 'malinois',
 'briard',
 'kelpie',
 'komondor',
 'Old English sheepdog',
 'Shetland sheepdog',
 'collie',
 'Border collie',
 'Bouvier des Flandres',
 'Rottweiler',
 'German shepherd',
 'Doberman',
 'miniature pinscher',
 'Greater Swiss Mountain dog',
 'Bernese mountain dog',
 'Appenzeller',
 'EntleBucher',
 'boxer',
 'bull mastiff',
 'Tibetan mastiff',
 'French bulldog',
 'Great Dane',
 'Saint Bernard',
 'Eskimo dog',
 'malamute',
 'Siberian husky',
 'affenpinscher',
 'basenji',
 'pug',
 'Leonberg',
 'Newfoundland',
 'Great Pyrenees',
 'Samoyed',
 'Pomeranian',
 'chow',
 'keeshond',
 'Brabancon griffon',
 'Pembroke',
 'Cardigan',
 'toy poodle',
 'miniature poodle',
 'standard poodle',
 'Mexican hairless',
 'dingo',
 'dhole',
 'African hunting dog']
iface = gr.Interface(image_mod, gr.Image(type="pil"), "text", examples=["doggo1.png","doggo2.jpg","doggo3.png","doggo4.png"])

iface.launch()