fbrynpk commited on
Commit
2ee1a14
1 Parent(s): 0572b2a

Initial Commit

Browse files
EffNetB0_data_auto_10_epochs.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbec879cdf158c1d6a55dd56baa97521d201ad8cde2e80f48d1c0985ace221d8
3
+ size 16340835
app.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Load model
2
+ import torch
3
+ import torchvision
4
+ import os
5
+ import gradio as gr
6
+
7
+ from torchvision import transforms
8
+ from model import create_effnet
9
+ from typing import Tuple, Dict
10
+ from timeit import default_timer as timer
11
+
12
+ # Device agnostic code
13
+ if torch.backends.mps.is_available():
14
+ device = "mps"
15
+ elif torch.cuda.is_available():
16
+ device = "cuda"
17
+ else:
18
+ device = "cpu"
19
+
20
+ class_name = ["NORMAL", "COVID"]
21
+
22
+ EffNetB0_load_model, EffNetB0_transforms = create_effnet(
23
+ pretrained_weights=torchvision.models.EfficientNet_B0_Weights.DEFAULT,
24
+ model=torchvision.models.efficientnet_b0,
25
+ in_features=1280,
26
+ dropout=0.2,
27
+ out_features=2,
28
+ )
29
+
30
+ # Write a transform for image
31
+ data_transform = transforms.Compose(
32
+ [
33
+ # Resize our images to 64x64
34
+ transforms.Resize(size=(64, 64)),
35
+ # Flip the images randomly on the horizontal
36
+ transforms.RandomHorizontalFlip(p=0.5),
37
+ # Turns image into grayscale
38
+ transforms.Grayscale(num_output_channels=3),
39
+ # Turn the image into a torch.Tensor
40
+ transforms.ToTensor()
41
+ # Permute the channel height and width
42
+ ]
43
+ )
44
+
45
+ EffNetB0_load_model.load_state_dict(
46
+ torch.load("./EffNetB0_data_auto_10_epochs.pth"), map_location=torch.device("cpu")
47
+ )
48
+
49
+ ### Predict function ---------------------------------------------------- ###
50
+
51
+
52
+ def predict(img) -> Tuple[Dict, float]:
53
+ # Start a timer
54
+ start_time = timer()
55
+ class_names = ["normal", "covid"]
56
+ # Transform the input image for use with ViT Model
57
+ img = EffNetB0_transforms(img).unsqueeze(
58
+ 0
59
+ ) # unsqueeze = add batch dimension on 0th index (3, 224, 224) into (1, 3, 224, 224)
60
+ # Put model into eval mode, make prediction
61
+ EffNetB0_load_model.eval()
62
+ with torch.inference_mode():
63
+ # Pass transformed image through the model and turn the prediction logits into probabilities
64
+ pred_logits = EffNetB0_load_model(img)
65
+ pred_probs = torch.softmax(pred_logits, dim=1)
66
+ # Create a prediction label and prediction probability dictionary
67
+ pred_labels_and_probs = {
68
+ class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))
69
+ }
70
+
71
+ # Calculate pred time
72
+ end_timer = timer()
73
+ pred_time = round(end_timer - start_time, 4)
74
+
75
+ # Return pred dict and pred time
76
+ return pred_labels_and_probs, pred_time
77
+
78
+
79
+ # Create title and description
80
+ title = "Covid Prediction: EfficientNetB0 Model"
81
+ description = (
82
+ "An EfficientNet model trained on Covid-19 Dataset to classify X-RAY images"
83
+ )
84
+
85
+ # Create example list
86
+ example_list = [
87
+ ["Covid-19-Detection/examples" + example]
88
+ for example in os.listdir("Covid-19-Detection/examples")
89
+ ]
90
+
91
+ # Create the Gradio demo
92
+ demo = gr.Interface(
93
+ fn=predict,
94
+ inputs=gr.Image(type="pil"),
95
+ outputs=[
96
+ gr.Label(num_top_classes=2, label="Predictions"),
97
+ gr.Number(label="Prediction time(s)"),
98
+ ],
99
+ title=title,
100
+ description=description,
101
+ examples=example_list,
102
+ )
103
+ demo.launch()
examples/NORMAL2-IM-0035-0001.jpeg ADDED
examples/SARS-10.1148rg.242035193-g04mr34g0-Fig8a-day0.jpeg ADDED
model.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torchvision
3
+
4
+ from torch import nn
5
+
6
+
7
+ def create_effnet(
8
+ pretrained_weights: torchvision.models.Weights,
9
+ model: torchvision.models,
10
+ in_features: int,
11
+ dropout: int,
12
+ out_features: int,
13
+ device: torch.device,
14
+ ):
15
+ # Get the weights and setup the model
16
+ model = model(weights=pretrained_weights).to(device)
17
+ transforms = pretrained_weights.transforms()
18
+
19
+ # Freeze the base model layers
20
+ for param in model.features.parameters():
21
+ param.requires_grad = False
22
+
23
+ # Change the classifier head
24
+ model.classifier = nn.Sequential(
25
+ nn.Dropout(p=dropout, inplace=True),
26
+ nn.Linear(in_features=in_features, out_features=out_features),
27
+ ).to(device)
28
+
29
+ return model, transforms
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ torch==2.0.1
2
+ torchvision==0.15.2
3
+ gradio==3.23.0