Mazen Omar commited on
Commit
727da9f
Β·
1 Parent(s): 9f876bb

Livine 2.0

Browse files
__pycache__/app.cpython-310.pyc ADDED
Binary file (1.45 kB). View file
 
__pycache__/app.cpython-311.pyc ADDED
Binary file (2.76 kB). View file
 
__pycache__/main.cpython-310.pyc ADDED
Binary file (583 Bytes). View file
 
__pycache__/main.cpython-311.pyc ADDED
Binary file (752 Bytes). View file
 
__pycache__/model.cpython-310.pyc ADDED
Binary file (1.2 kB). View file
 
app.py CHANGED
@@ -1,8 +1,7 @@
1
  ### 1. Imports and class names setup ###
2
- import gradio as gr
3
  import os
4
  import torch
5
-
6
  from model import create_effnetb2_model
7
  from timeit import default_timer as timer
8
  from typing import Tuple, Dict
@@ -15,18 +14,18 @@ with open("class_names.txt", "r") as f: # reading them in from class_names.txt
15
 
16
  # Create model
17
  effnetb2, effnetb2_transforms = create_effnetb2_model(
18
- num_classes=101, # could also use len(class_names)
19
  )
20
 
21
  # Load saved weights
22
  effnetb2.load_state_dict(
23
- torch.load(
24
- f="livine_mini_model.pth",
25
- map_location=torch.device("cpu"), # load to CPU
26
- )
27
  )
28
 
29
- ### 3. Predict function ###
30
 
31
  # Create predict function
32
  def predict(img) -> Tuple[Dict, float]:
@@ -40,7 +39,7 @@ def predict(img) -> Tuple[Dict, float]:
40
 
41
  # Put model into evaluation mode and turn on inference mode
42
  effnetb2.eval()
43
- with torch.inference_mode():
44
  # Pass the transformed image through the model and turn the prediction logits into prediction probabilities
45
  pred_probs = torch.softmax(effnetb2(img), dim=1)
46
 
@@ -55,7 +54,6 @@ def predict(img) -> Tuple[Dict, float]:
55
 
56
  ### 4. Gradio app ###
57
 
58
- # Create title, description and article strings
59
  title = "Livine Mini Model πŸ”πŸ‘"
60
 
61
  # Create examples list from "examples/" directory
 
1
  ### 1. Imports and class names setup ###
 
2
  import os
3
  import torch
4
+ import gradio as gr
5
  from model import create_effnetb2_model
6
  from timeit import default_timer as timer
7
  from typing import Tuple, Dict
 
14
 
15
  # Create model
16
  effnetb2, effnetb2_transforms = create_effnetb2_model(
17
+ num_classes=102, # could also use len(class_names)
18
  )
19
 
20
  # Load saved weights
21
  effnetb2.load_state_dict(
22
+ torch.load(
23
+ f="model/model.pth",
24
+ map_location=torch.device("cpu"),
25
+ )["model_state_dict"]
26
  )
27
 
28
+ # ### 3. Predict function ###
29
 
30
  # Create predict function
31
  def predict(img) -> Tuple[Dict, float]:
 
39
 
40
  # Put model into evaluation mode and turn on inference mode
41
  effnetb2.eval()
42
+ with torch.no_grad():
43
  # Pass the transformed image through the model and turn the prediction logits into prediction probabilities
44
  pred_probs = torch.softmax(effnetb2(img), dim=1)
45
 
 
54
 
55
  ### 4. Gradio app ###
56
 
 
57
  title = "Livine Mini Model πŸ”πŸ‘"
58
 
59
  # Create examples list from "examples/" directory
class_names.txt CHANGED
@@ -65,6 +65,7 @@ macarons
65
  miso_soup
66
  mussels
67
  nachos
 
68
  omelette
69
  onion_rings
70
  oysters
@@ -98,4 +99,4 @@ tacos
98
  takoyaki
99
  tiramisu
100
  tuna_tartare
101
- waffles
 
65
  miso_soup
66
  mussels
67
  nachos
68
+ not_food
69
  omelette
70
  onion_rings
71
  oysters
 
99
  takoyaki
100
  tiramisu
101
  tuna_tartare
102
+ waffles
model.py CHANGED
@@ -4,7 +4,7 @@ import torchvision
4
  from torch import nn
5
 
6
 
7
- def create_effnetb2_model(num_classes:int=3,
8
  seed:int=42):
9
  """Creates an EfficientNetB2 feature extractor model and transforms.
10
 
 
4
  from torch import nn
5
 
6
 
7
+ def create_effnetb2_model(num_classes:int=102,
8
  seed:int=42):
9
  """Creates an EfficientNetB2 feature extractor model and transforms.
10
 
livine_mini_model.pth β†’ model/model.pth RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d4a6f01448e06bfac1dfad155175e356402764d8d6399013c4e3ed47cf505842
3
- size 31831029
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0afb84cd12c0ecc9cbc20a1538282ebc1f45d780cf6431f45ec62d4ff035986f
3
+ size 31836661