Spaces:
Sleeping
Sleeping
Initial Commit for SkinGuard (#1)
Browse files- Initial Commit for SkinGuard (1fd6d2dc419c10e463f950d2ab45e9891b7ab6ce)
- .gitattributes +1 -0
- Final_ViT_Model_50_Epochs.pth +3 -0
- app.py +61 -0
- examples/ISIC_0000037.png +0 -0
- examples/ISIC_0000068.png +0 -0
- examples/ISIC_0000381.png +3 -0
- examples/image_136.png +0 -0
- examples/image_269.png +0 -0
- model.py +39 -0
- requirements.txt +3 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
examples/ISIC_0000381.png filter=lfs diff=lfs merge=lfs -text
|
Final_ViT_Model_50_Epochs.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e11954779371fb0c29ac8bbe5b5a504442ee7892b0a4b463b7a8e9c0517b00b6
|
3 |
+
size 343261946
|
app.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
### 1. Imports and class names setup ###
|
2 |
+
import gradio as gr
|
3 |
+
import os
|
4 |
+
import torch
|
5 |
+
|
6 |
+
from model import create_ViT
|
7 |
+
from timeit import default_timer as timer
|
8 |
+
|
9 |
+
# Setup class names
|
10 |
+
class_names = ['Normal', 'Malignant']
|
11 |
+
|
12 |
+
### 2. Model and transforms preparation ###
|
13 |
+
ViT, manual_transforms = create_ViT()
|
14 |
+
|
15 |
+
# Load saved weights
|
16 |
+
ViT.load_state_dict(
|
17 |
+
torch.load(f='Final_ViT_Model_50_Epochs.pth',
|
18 |
+
map_location=torch.device('cpu') # Load model to cpu
|
19 |
+
)
|
20 |
+
)
|
21 |
+
|
22 |
+
### 3. Predict function ###
|
23 |
+
def predict(img):
|
24 |
+
start_time = timer()
|
25 |
+
|
26 |
+
img = manual_transforms(img).unsqueeze(0)
|
27 |
+
|
28 |
+
ViT.eval()
|
29 |
+
with torch.inference_mode():
|
30 |
+
pred_prob = torch.sigmoid(ViT(img).squeeze())
|
31 |
+
pred_label_idx = int(torch.round(pred_prob))
|
32 |
+
if pred_label_idx:
|
33 |
+
pred_labels_and_preds = {class_names[pred_label_idx]: float(pred_prob.item())}
|
34 |
+
else:
|
35 |
+
pred_labels_and_preds = {class_names[pred_label_idx]: 1-float(pred_prob.item())}
|
36 |
+
|
37 |
+
end_time = timer()
|
38 |
+
pred_time = round(end_time-start_time, 4)
|
39 |
+
|
40 |
+
return pred_labels_and_preds, pred_time
|
41 |
+
|
42 |
+
### 4. Gradio App ###
|
43 |
+
# Create interface for gradio
|
44 |
+
title = 'SkinGuard'
|
45 |
+
description = 'An AI model that can predict whether a CLOSE-UP picture of skin is normal or shows a sign of Skin Cancer. \n IMPORTANT INSTRUCTION: When taking a picture of your skin, take the picture AS CLOSE AS POSSIBLE to the target area.'
|
46 |
+
article = 'This is our demo for the DEDA Entrepreneurship Competition 2024. Created by Imran, Brian, Lukas, and Rohit, all in Baez CSE Period 5. \n IMPORTANT NOTE: If you have followed the important instruction in the description of this demo and STILL get malignant results constantly, do NOT take it as an official diagnosis. Contact a professional healthcare professional for more information if you are concerned.'
|
47 |
+
# Create examples list
|
48 |
+
example_list = [['examples/' + example] for example in os.listdir('examples')]
|
49 |
+
|
50 |
+
# Create the gradio demo
|
51 |
+
demo = gr.Interface(fn=predict,# maps intputs to outputs
|
52 |
+
inputs=gr.Image(type='pil'),
|
53 |
+
outputs=[gr.Label(num_top_classes=1, label='Predictions'),
|
54 |
+
gr.Label(label='Prediction Time (s)')],
|
55 |
+
examples=example_list,
|
56 |
+
title=title,
|
57 |
+
description=description,
|
58 |
+
article=article)
|
59 |
+
# Launch the model!!
|
60 |
+
demo.launch(debug=False,
|
61 |
+
share=True)
|
examples/ISIC_0000037.png
ADDED
![]() |
examples/ISIC_0000068.png
ADDED
![]() |
examples/ISIC_0000381.png
ADDED
![]() |
Git LFS Details
|
examples/image_136.png
ADDED
![]() |
examples/image_269.png
ADDED
![]() |
model.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torchvision
|
3 |
+
|
4 |
+
from torch import nn
|
5 |
+
|
6 |
+
def create_ViT():
|
7 |
+
ViT_weights = torchvision.models.ViT_B_16_Weights.DEFAULT
|
8 |
+
ViT_model = torchvision.models.vit_b_16(weights=ViT_weights)
|
9 |
+
|
10 |
+
# Freeze pre-trained weights
|
11 |
+
for param in ViT_model.parameters():
|
12 |
+
param.requires_grad = False
|
13 |
+
|
14 |
+
# Find the encoder module and its layers
|
15 |
+
encoder = ViT_model.encoder
|
16 |
+
encoder_layers = encoder.layers
|
17 |
+
# Modify each Encoder layer to include dropout
|
18 |
+
for layer in encoder_layers:
|
19 |
+
# Access the Multi-head Self-Attention module (might be named differently)
|
20 |
+
attn_module = layer.self_attention # Replace with the actual module name in your model
|
21 |
+
# Add dropout layer after the attention module
|
22 |
+
attn_module.add_module('my_dropout', nn.Dropout(p=0.4))
|
23 |
+
# Add your new head for classification (same as before)
|
24 |
+
ViT_model.heads = nn.Sequential(
|
25 |
+
nn.Dropout(p=0.5),
|
26 |
+
nn.Linear(in_features=768, out_features=1, bias=True)
|
27 |
+
)
|
28 |
+
|
29 |
+
manual_transforms = torchvision.transforms.Compose([
|
30 |
+
torchvision.transforms.RandomRotation(25),
|
31 |
+
torchvision.transforms.RandomAffine(degrees=0, translate=(0.15, 0.15), shear=15),
|
32 |
+
torchvision.transforms.RandomHorizontalFlip(),
|
33 |
+
torchvision.transforms.RandomVerticalFlip(),
|
34 |
+
torchvision.transforms.ColorJitter(brightness=(0.9, 1.5)),
|
35 |
+
torchvision.transforms.Resize((224, 224)),
|
36 |
+
torchvision.transforms.ToTensor()
|
37 |
+
])
|
38 |
+
|
39 |
+
return ViT_model, manual_transforms
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
torch==2.2.1
|
2 |
+
torchvision==0.17.1
|
3 |
+
gradio==4.26.0
|