DimitrisKatos commited on
Commit
e65b6ac
·
1 Parent(s): 9346e82

initial commit

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ effnetb2_model.pth filter=lfs diff=lfs merge=lfs -text
__pycache__/model.cpython-310.pyc ADDED
Binary file (742 Bytes). View file
 
app.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ### 1. Imports and class names setup
3
+ import gradio as gr
4
+ import os
5
+ import torch
6
+ import gradio as gr
7
+ import torchvision
8
+
9
+ #from model import create_effnetb2_model
10
+ from timeit import default_timer as timer
11
+ from typing import Dict, Tuple
12
+
13
+ class_names = ['butterfly', 'cat', 'chicken', 'cow', 'dog',
14
+ 'elephant', 'horse', 'sheep', 'spider', 'squirrel']
15
+
16
+ ### 2. Model and transforms prepartaion ###
17
+ effnetb2, effnetb2_transforms = create_effnetb2_model()
18
+
19
+ # Loade the save weights.
20
+ effnetb2.load_state_dict(torch.load(f = "effnetb2_model.pth",
21
+ map_location = torch.device("cpu")))
22
+
23
+ ### 3. Predict Function ###
24
+ effnetb2 = effnetb2.to('cpu')
25
+ def predict(img) -> Tuple[Dict, float]:
26
+ """Transforms and performs a prediction on img and returns prediction and time taken.
27
+ """
28
+
29
+ # Start the timer
30
+ start_time = timer()
31
+
32
+ # Transform the target image and add a batch dimension
33
+ img = effnetb2_transforms(img).unsqueeze(0)
34
+
35
+ # Put model into evaluation mode and turn on inference mode
36
+ effnetb2.eval()
37
+ with torch.inference_mode():
38
+ # Pass the transformed image through the model and turn the prediction logits into prediction probabilities
39
+ pred_probs = torch.softmax(effnetb2(img), dim=1)
40
+
41
+ # Create a prediction label and prediction probability dictionary for each prediction class (this is the required format for Gradio's output parameter)
42
+ pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))}
43
+
44
+ # Calculate the prediction time
45
+ pred_time = round(timer() - start_time, 5)
46
+
47
+ # Return the prediction dictionary and prediction time
48
+ return pred_labels_and_probs, pred_time
49
+
50
+ ### 4. ###
51
+
52
+ # Create title, description and article strings
53
+ title = "AnimalsClassification "
54
+ description = "An EfficientNetB2 feature extractor computer vision model to classify images of food as pizza, steak or sushi."
55
+ article = "ModelDeployment"
56
+
57
+ # Create example list.
58
+ example_list = [["animal_classification/examples/" + example] for example in os.listdir(animal_example)]
59
+
60
+ # Create the Gradio demo
61
+ demo = gr.Interface(fn=predict, # mapping function from input to output
62
+ inputs=gr.Image(type="pil"), # what are the inputs?
63
+ outputs=[gr.Label(num_top_classes=3, label="Predictions"), # what are the outputs?
64
+ gr.Number(label="Prediction time (s)")], # our fn has two outputs, therefore we have two outputs
65
+ examples=example_list,
66
+ title=title,
67
+ description=description,
68
+ article=article)
69
+
70
+ # Launch the demo!
71
+ demo.launch(debug=False, # print errors locally?
72
+ share=True) # generate a publically shareable URL?
effnetb2_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23f6d18d2f24ee08fddab5dbedbdc4ddc4f4ec88d7729dde671ef105e42dfd3c
3
+ size 31316986
examples/img1.jpg ADDED
examples/img2.jpg ADDED
examples/img3.jpg ADDED
model.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torchvision
3
+ from torch import nn
4
+
5
+ def create_effnetb2_model():
6
+ # 1. Setup the pretrained EffNetB2 weights
7
+ effnetb2_weights = torchvision.models.EfficientNet_B2_Weights.DEFAULT
8
+
9
+ # 2. Setup the transforms
10
+ effnetb2_transforms = effnetb2_weights.transforms()
11
+
12
+ # 3. Setup pretrained model instance
13
+ effnetb2 = torchvision.models.efficientnet_b2(weights = effnetb2_weights)
14
+
15
+ # 4. Freeze the layers
16
+ for param in effnetb2.parameters():
17
+ param.requires_grad = False
18
+
19
+ # 5. Change the classifier of the model
20
+ effnetb2.classifier = nn.Sequential(
21
+ nn.Dropout(p = 0.3, inplace = True),
22
+ nn.Linear(in_features = 1408, out_features = 10, bias = True)
23
+ )
24
+
25
+ return effnetb2, effnetb2_transforms
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+