aagirre92 commited on
Commit
824c00e
1 Parent(s): 667cdcb

intitial commit

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ 09_pretrained_effnetb2_feature_extractor_food101_20_percent.pth filter=lfs diff=lfs merge=lfs -text
37
+ examples/pizza_dad.jpeg filter=lfs diff=lfs merge=lfs -text
09_pretrained_effnetb2_feature_extractor_food101_20_percent.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96fd40816922c8eb0ea201642420db9989f63dc2f970d15bb74f79258441b261
3
+ size 31857210
app.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ### 1. Import and class names setup ###
3
+ import gradio as gr
4
+ import os
5
+ from pathlib import Path
6
+ import torch
7
+
8
+ from model import create_effnetb2_model
9
+ from time import perf_counter
10
+ from typing import Tuple, Dict
11
+
12
+ from PIL import Image
13
+ import torchvision
14
+
15
+ # Setup class names (hardcoded, these shall reside in a json file or sth like that...)
16
+ # Open Food101 class names fromt file and import it to list
17
+
18
+ with open("demos/foodvision_big/class_names.txt","r") as f:
19
+ class_names = f.read().split("\n")
20
+
21
+ ### 2. Model and transforms preparation ###
22
+ effnetb2_model, effnetb2_transforms = create_effnetb2_model(num_classes=len(class_names))
23
+
24
+ # Load save weights
25
+ effnetb2_model.load_state_dict(torch.load(f="09_pretrained_effnetb2_feature_extractor_food101_20_percent.pth",
26
+ map_location=torch.device("cpu"))) # map location to cpu is a must, as we have trained our model in the GPU
27
+
28
+ ### 3. Predict function
29
+
30
+ def predict(img) -> Tuple[Dict,float]:
31
+ # Start a timer
32
+ start_time = perf_counter()
33
+
34
+ # Transform the input image for use with EffNetB2
35
+ effnetb2_transforms = torchvision.models.EfficientNet_B2_Weights.DEFAULT.transforms()
36
+ img_tensor = effnetb2_transforms(img)
37
+
38
+ # Put model in eval and inference
39
+ effnetb2_model.eval()
40
+ with torch.inference_mode():
41
+ y_logits = effnetb2_model(img_tensor.unsqueeze(dim=0))
42
+ y_pred_probs = torch.softmax(y_logits,dim=1)
43
+ y_pred_probs_list = y_pred_probs.squeeze().tolist()
44
+
45
+ # Creatae a prediction probability dictionary
46
+ pred_prob_dict = {class_names[i]:float(prob) for i,prob in enumerate(y_pred_probs_list)}
47
+
48
+ # End timer
49
+ end_time = perf_counter()
50
+
51
+ return pred_prob_dict, round(end_time-start_time,4)
52
+
53
+
54
+ ### 4. Launch app
55
+
56
+ import gradio as gr
57
+
58
+ foodvision_big_examples_path = "examples"
59
+
60
+ example_list = [str(path) for path in Path(foodvision_big_examples_path).rglob("*.jpg")]
61
+
62
+ # Create title, description and article
63
+ title = "FoodVisionBig V0 🥘 🧗"
64
+ description = "An <a href='https://pytorch.org/vision/main/models/generated/torchvision.models.efficientnet_b2.html#torchvision.models.efficientnet_b2'>EfficientNetB2</a> feature extractor computer vision model to classify 101 food images from Food101 dataset<br>I have yet to improve it to label non-food images. Paciencia muchachos"
65
+ article = "Created at <a href='#'>09_pytorch_model_deploy.ipynb</a> Google Colab notebook"
66
+
67
+ # Create the Gradio app
68
+ demo = gr.Interface(fn=predict,
69
+ inputs=gr.Image(type="pil"),
70
+ outputs=[gr.Label(num_top_classes=5, label="predictions"),
71
+ gr.Number(label="Prediction time (s)")],
72
+ examples=example_list,
73
+ title=title,
74
+ description=description,
75
+ article=article)
76
+
77
+ # Launch the demo
78
+ demo.launch()
79
+
80
+
81
+ # *** IMPORTANTE: The Flag button of the interface will create a folder named "flagged" that will contain the images and predictions of those images that someone has Flagged***
class_names.txt ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apple_pie
2
+ baby_back_ribs
3
+ baklava
4
+ beef_carpaccio
5
+ beef_tartare
6
+ beet_salad
7
+ beignets
8
+ bibimbap
9
+ bread_pudding
10
+ breakfast_burrito
11
+ bruschetta
12
+ caesar_salad
13
+ cannoli
14
+ caprese_salad
15
+ carrot_cake
16
+ ceviche
17
+ cheese_plate
18
+ cheesecake
19
+ chicken_curry
20
+ chicken_quesadilla
21
+ chicken_wings
22
+ chocolate_cake
23
+ chocolate_mousse
24
+ churros
25
+ clam_chowder
26
+ club_sandwich
27
+ crab_cakes
28
+ creme_brulee
29
+ croque_madame
30
+ cup_cakes
31
+ deviled_eggs
32
+ donuts
33
+ dumplings
34
+ edamame
35
+ eggs_benedict
36
+ escargots
37
+ falafel
38
+ filet_mignon
39
+ fish_and_chips
40
+ foie_gras
41
+ french_fries
42
+ french_onion_soup
43
+ french_toast
44
+ fried_calamari
45
+ fried_rice
46
+ frozen_yogurt
47
+ garlic_bread
48
+ gnocchi
49
+ greek_salad
50
+ grilled_cheese_sandwich
51
+ grilled_salmon
52
+ guacamole
53
+ gyoza
54
+ hamburger
55
+ hot_and_sour_soup
56
+ hot_dog
57
+ huevos_rancheros
58
+ hummus
59
+ ice_cream
60
+ lasagna
61
+ lobster_bisque
62
+ lobster_roll_sandwich
63
+ macaroni_and_cheese
64
+ macarons
65
+ miso_soup
66
+ mussels
67
+ nachos
68
+ omelette
69
+ onion_rings
70
+ oysters
71
+ pad_thai
72
+ paella
73
+ pancakes
74
+ panna_cotta
75
+ peking_duck
76
+ pho
77
+ pizza
78
+ pork_chop
79
+ poutine
80
+ prime_rib
81
+ pulled_pork_sandwich
82
+ ramen
83
+ ravioli
84
+ red_velvet_cake
85
+ risotto
86
+ samosa
87
+ sashimi
88
+ scallops
89
+ seaweed_salad
90
+ shrimp_and_grits
91
+ spaghetti_bolognese
92
+ spaghetti_carbonara
93
+ spring_rolls
94
+ steak
95
+ strawberry_shortcake
96
+ sushi
97
+ tacos
98
+ takoyaki
99
+ tiramisu
100
+ tuna_tartare
101
+ waffles
examples/burguer.jpeg ADDED
examples/pizza_dad.jpeg ADDED

Git LFS Details

  • SHA256: 0f00389758009e8430ca17c9a21ebb4564c6945e0c91c58cf058e6a93d267dc8
  • Pointer size: 132 Bytes
  • Size of remote file: 2.87 MB
examples/steak.jpeg ADDED
model.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import torch
3
+ import torchvision
4
+ from torch import nn
5
+ from torchvision.models._api import WeightsEnum
6
+ from torch.hub import load_state_dict_from_url
7
+
8
+ def create_effnetb2_model(num_classes:int=101, seed:int=42):
9
+ # https://pytorch.org/vision/main/models/generated/torchvision.models.efficientnet_b2.html#torchvision.models.efficientnet_b2
10
+
11
+ def get_state_dict(self, *args, **kwargs):
12
+ kwargs.pop("check_hash")
13
+ return load_state_dict_from_url(self.url, *args, **kwargs)
14
+ WeightsEnum.get_state_dict = get_state_dict
15
+
16
+
17
+ # 1. Setup pretrained EffNetB2 weights
18
+ effnetb2_weights = torchvision.models.EfficientNet_B2_Weights.DEFAULT # DEFAULT = BEST
19
+
20
+ # 2. Get EffNetB2 transforms
21
+ effnetb2_transforms = effnetb2_weights.transforms()
22
+
23
+ # 3. Setup pretrained model instance
24
+ effnetb2_model = torchvision.models.efficientnet_b2(weights=effnetb2_weights)
25
+
26
+ # 4. Freeze the base layers in the model
27
+ for param in effnetb2_model.features.parameters():
28
+ param.requires_grad = False
29
+
30
+ # 5. Modify the classifier
31
+ torch.manual_seed(seed)
32
+ effnetb2_model.classifier = nn.Sequential(
33
+ nn.Dropout(p=0.3, inplace=True),
34
+ nn.Linear(in_features=1408,out_features=num_classes,bias=True)
35
+ )
36
+
37
+ return effnetb2_model, effnetb2_transforms
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ torch
2
+ torchvision
3
+ gradio