Jason Adrian commited on
Commit
d360108
1 Parent(s): eff3c10

bodypartxr classifier

Browse files
Files changed (2) hide show
  1. app.py +82 -4
  2. resnet18.py +129 -0
app.py CHANGED
@@ -1,7 +1,85 @@
1
  import gradio as gr
 
 
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
5
 
6
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ import torch
3
+ from torchvision.transforms import transforms
4
+ import numpy as np
5
 
6
+ from resnet18 import ResNet18
 
7
 
8
+ model = ResNet18(1, 5)
9
+
10
+ checkpoint = torch.load('C:\jason\semester 8\Magang\Hugging-face-bodypartxr\bodypartxr\acc=0.94.ckpt')
11
+
12
+ # The state dict will contains net.layer_name
13
+ # Our model doesn't contains `net.` so we have to rename it
14
+ state_dict = checkpoint['state_dict']
15
+ for key in list(state_dict.keys()):
16
+ if 'net.' in key:
17
+ state_dict[key.replace('net.', '')] = state_dict[key]
18
+ del state_dict[key]
19
+
20
+ model.load_state_dict(state_dict)
21
+ model.eval()
22
+
23
+ class_names = ['abdominal', 'adult', 'others', 'pediatric', 'spine']
24
+ class_names.sort()
25
+
26
+ transformation_pipeline = transforms.Compose([
27
+ transforms.ToPILImage(),
28
+ transforms.Grayscale(num_output_channels=1),
29
+ transforms.CenterCrop((384, 384)),
30
+ transforms.ToTensor(),
31
+ transforms.Normalize(mean=[0.50807575], std=[0.20823])
32
+ ])
33
+
34
+
35
+ def preprocess_image(image: np.ndarray):
36
+ """Preprocess the input image.
37
+
38
+ Note that the input image is in RGB mode.
39
+
40
+ Parameters
41
+ ----------
42
+ image: np.ndarray
43
+ Input image from callback.
44
+ """
45
+
46
+ image = transformation_pipeline(image)
47
+ image = torch.unsqueeze(image, 0)
48
+
49
+ return image
50
+
51
+
52
+ def image_classifier(inp):
53
+ """Image Classifier Function.
54
+
55
+ Parameters
56
+ ----------
57
+ inp: Optional[np.ndarray] = None
58
+ Input image from callback
59
+
60
+ Returns
61
+ -------
62
+ Dict
63
+ A dictionary class names and its probability
64
+ """
65
+
66
+ # If input not valid, return dummy data or raise error
67
+ if inp is None:
68
+ return {'cat': 0.3, 'dog': 0.7}
69
+
70
+ # preprocess
71
+ image = preprocess_image(inp)
72
+ image = image.to(dtype=torch.float32)
73
+
74
+ # inference
75
+ result = model(image)
76
+
77
+ # postprocess
78
+ result = torch.nn.functional.softmax(result, dim=1) # apply softmax
79
+ result = result[0].detach().numpy().tolist() # take the first batch
80
+ labeled_result = {name:score for name, score in zip(class_names, result)}
81
+
82
+ return labeled_result
83
+
84
+ demo = gr.Interface(fn=image_classifier, inputs="image", outputs="label")
85
+ demo.launch()
resnet18.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ import torch.nn as nn
4
+ import torch
5
+
6
+ class BasicBlock(nn.Module):
7
+ """ResNet Basic Block.
8
+
9
+ Parameters
10
+ ----------
11
+ in_channels : int
12
+ Number of input channels
13
+ out_channels : int
14
+ Number of output channels
15
+ stride : int, optional
16
+ Convolution stride size, by default 1
17
+ identity_downsample : Optional[torch.nn.Module], optional
18
+ Downsampling layer, by default None
19
+ """
20
+
21
+ def __init__(self,
22
+ in_channels: int,
23
+ out_channels: int,
24
+ stride: int = 1,
25
+ identity_downsample: Optional[torch.nn.Module] = None):
26
+ super(BasicBlock, self).__init__()
27
+ self.conv1 = nn.Conv2d(in_channels,
28
+ out_channels,
29
+ kernel_size = 3,
30
+ stride = stride,
31
+ padding = 1)
32
+ self.bn1 = nn.BatchNorm2d(out_channels)
33
+ self.relu = nn.ReLU()
34
+ self.conv2 = nn.Conv2d(out_channels,
35
+ out_channels,
36
+ kernel_size = 3,
37
+ stride = 1,
38
+ padding = 1)
39
+ self.bn2 = nn.BatchNorm2d(out_channels)
40
+ self.identity_downsample = identity_downsample
41
+
42
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
43
+ """Apply forward computation."""
44
+ identity = x
45
+ x = self.conv1(x)
46
+ x = self.bn1(x)
47
+ x = self.relu(x)
48
+ x = self.conv2(x)
49
+ x = self.bn2(x)
50
+
51
+ # Apply an operation to the identity output.
52
+ # Useful to reduce the layer size and match from conv2 output
53
+ if self.identity_downsample is not None:
54
+ identity = self.identity_downsample(identity)
55
+ x += identity
56
+ x = self.relu(x)
57
+ return x
58
+
59
+ class ResNet18(nn.Module):
60
+ """Construct ResNet-18 Model.
61
+
62
+ Parameters
63
+ ----------
64
+ input_channels : int
65
+ Number of input channels
66
+ num_classes : int
67
+ Number of class outputs
68
+ """
69
+
70
+ def __init__(self, input_channels, num_classes):
71
+
72
+ super(ResNet18, self).__init__()
73
+ self.conv1 = nn.Conv2d(input_channels,
74
+ 64, kernel_size = 7,
75
+ stride = 2, padding=3)
76
+ self.bn1 = nn.BatchNorm2d(64)
77
+ self.relu = nn.ReLU()
78
+ self.maxpool = nn.MaxPool2d(kernel_size = 3,
79
+ stride = 2,
80
+ padding = 1)
81
+
82
+ self.layer1 = self._make_layer(64, 64, stride = 1)
83
+ self.layer2 = self._make_layer(64, 128, stride = 2)
84
+ self.layer3 = self._make_layer(128, 256, stride = 2)
85
+ self.layer4 = self._make_layer(256, 512, stride = 2)
86
+
87
+ # Last layers
88
+ self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
89
+ self.fc = nn.Linear(512, num_classes)
90
+
91
+ def identity_downsample(self, in_channels: int, out_channels: int) -> nn.Module:
92
+ """Downsampling block to reduce the feature sizes."""
93
+ return nn.Sequential(
94
+ nn.Conv2d(in_channels,
95
+ out_channels,
96
+ kernel_size = 3,
97
+ stride = 2,
98
+ padding = 1),
99
+ nn.BatchNorm2d(out_channels)
100
+ )
101
+
102
+ def _make_layer(self, in_channels: int, out_channels: int, stride: int) -> nn.Module:
103
+ """Create sequential basic block."""
104
+ identity_downsample = None
105
+
106
+ # Add downsampling function
107
+ if stride != 1:
108
+ identity_downsample = self.identity_downsample(in_channels, out_channels)
109
+
110
+ return nn.Sequential(
111
+ BasicBlock(in_channels, out_channels, identity_downsample=identity_downsample, stride=stride),
112
+ BasicBlock(out_channels, out_channels)
113
+ )
114
+
115
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
116
+ x = self.conv1(x)
117
+ x = self.bn1(x)
118
+ x = self.relu(x)
119
+ x = self.maxpool(x)
120
+
121
+ x = self.layer1(x)
122
+ x = self.layer2(x)
123
+ x = self.layer3(x)
124
+ x = self.layer4(x)
125
+
126
+ x = self.avgpool(x)
127
+ x = x.view(x.shape[0], -1)
128
+ x = self.fc(x)
129
+ return x