909ahmed commited on
Commit
0f13a98
1 Parent(s): 497014a

init commit

Browse files
Files changed (5) hide show
  1. README.md +4 -4
  2. app.py +89 -0
  3. requirements.txt +8 -0
  4. resnet.py +143 -0
  5. save_model.pkl +3 -0
README.md CHANGED
@@ -1,8 +1,8 @@
1
  ---
2
- title: ERABB
3
- emoji: 📚
4
- colorFrom: purple
5
- colorTo: gray
6
  sdk: gradio
7
  sdk_version: 4.27.0
8
  app_file: app.py
 
1
  ---
2
+ title: Assignment
3
+ emoji: 💻
4
+ colorFrom: blue
5
+ colorTo: green
6
  sdk: gradio
7
  sdk_version: 4.27.0
8
  app_file: app.py
app.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch, torchvision
3
+ from torchvision import transforms
4
+ from resnet import ResNet18
5
+ from resnet import ResBlocks
6
+ from PIL import Image
7
+ import numpy as np
8
+ from pytorch_grad_cam import GradCAM
9
+ from pytorch_grad_cam.utils.image import show_cam_on_image
10
+ from pl_bolts.transforms.dataset_normalizations import cifar10_normalization
11
+
12
+ model = ResNet18(0.00333)
13
+
14
+ state_model = torch.load("save_model.pkl", map_location=torch.device('cpu'))
15
+ state_dict = state_model.state_dict()
16
+
17
+ model.load_state_dict(state_dict, strict=False)
18
+
19
+ classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
20
+ inv_normalize = transforms.Normalize(
21
+ mean=[-0.50/0.23, -0.50/0.23, -0.50/0.23],
22
+ std=[1/0.23, 1/0.23, 1/0.23]
23
+ )
24
+
25
+ def resize_image_pil(image, new_width, new_height):
26
+
27
+ img = Image.fromarray(np.array(image))
28
+
29
+ width, height = img.size
30
+
31
+ width_scale = new_width / width
32
+ height_scale = new_height / height
33
+ scale = min(width_scale, height_scale)
34
+
35
+ resized = img.resize((int(width*scale), int(height*scale)), Image.NEAREST)
36
+
37
+ resized = resized.crop((0, 0, new_width, new_height))
38
+ return np.array(resized)
39
+
40
+ def inference(input_img, transparency = 0.5, target_layer_number = -1):
41
+
42
+ input_img = resize_image_pil(input_img, 32, 32)
43
+ org_img = input_img
44
+
45
+ input_img = input_img.reshape((32, 32, 3))
46
+ transform = transforms.ToTensor()
47
+
48
+ input_img = transform(input_img)
49
+ # input_img = cifar10_normalization()(input_img)
50
+ input_img = input_img.unsqueeze(0)
51
+ outputs = model(input_img)
52
+ softmax = torch.nn.Softmax(dim=0)
53
+ o = softmax(outputs.flatten())
54
+
55
+ confidences = {classes[i]: float(o[i]) for i in range(10)}
56
+ _, prediction = torch.max(outputs, 1)
57
+
58
+ target_layers = [model.res_layers[2][target_layer_number]]
59
+ cam = GradCAM(model=model, target_layers=target_layers)
60
+ grayscale_cam = cam(input_tensor=input_img, targets=None)
61
+ grayscale_cam = grayscale_cam[0, :]
62
+
63
+ img = input_img.squeeze(0)
64
+ img = inv_normalize(img)
65
+ print(transparency)
66
+
67
+ visualization = show_cam_on_image(org_img/255, grayscale_cam, use_rgb=True, image_weight=transparency)
68
+ return classes[prediction[0].item()], visualization, confidences
69
+
70
+ title = "CIFAR10 trained on ResNet18 Model with GradCAM"
71
+ description = "A simple Gradio interface to infer on ResNet model, and get GradCAM results"
72
+
73
+ iface = gr.Interface(
74
+ inference,
75
+ inputs = [
76
+ gr.Image(width=256, height=256, label="Input Image"),
77
+ gr.Slider(0, 1, value = 0.5, label="Overall Opacity of Image"),
78
+ gr.Slider(-2, -1, value = -2, step=1, label="Which Layer?")
79
+ ],
80
+ outputs = [
81
+ "text",
82
+ gr.Image(width=256, height=256, label="Output"),
83
+ gr.Label(num_top_classes=3)
84
+ ],
85
+ title = title,
86
+ description = description,
87
+ )
88
+
89
+ iface.launch()
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ torch==2.1.2
2
+ torchvision
3
+ torch-lr-finder
4
+ grad-cam
5
+ pillow
6
+ numpy
7
+ pytorch_lightning==1.9.5
8
+ lightning-bolts
resnet.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from pytorch_lightning import LightningModule
4
+ from torch.optim.lr_scheduler import OneCycleLR
5
+ from torchmetrics import Accuracy
6
+ import torch.nn.functional as F
7
+
8
+ BATCH_SIZE = 256
9
+
10
+ class ResBlocks(LightningModule):
11
+
12
+ def __init__(self, inchannels, outchannels, stride):
13
+ super(ResBlocks, self).__init__()
14
+
15
+ self.conv1 = self.make_conv(inchannels, outchannels, stride=stride)
16
+ self.conv2 = self.make_conv(outchannels, outchannels)
17
+
18
+ if stride != 1 or inchannels != outchannels:
19
+ self.shortcut = nn.Sequential(
20
+ nn.Conv2d(inchannels, outchannels, kernel_size=1, stride=stride)
21
+ )
22
+
23
+ def make_conv(self, inchannels, outchannels, kernel=3, padding=1, stride=1):
24
+
25
+ layers = []
26
+
27
+ layers.append(nn.Conv2d(in_channels=inchannels, out_channels=outchannels, kernel_size=kernel, padding=padding, stride=stride))
28
+ layers.append(nn.BatchNorm2d(outchannels))
29
+ layers.append(nn.ReLU())
30
+
31
+ return nn.Sequential(*layers)
32
+
33
+ def forward(self, x):
34
+
35
+ shortcut = self.shortcut(x) if hasattr(self, 'shortcut') else x
36
+ out = self.conv1(x)
37
+ out = self.conv2(out)
38
+
39
+ return out + shortcut
40
+
41
+ class ResNet18(LightningModule):
42
+ def __init__(self, lr=0.05):
43
+ super(ResNet18, self).__init__()
44
+
45
+ self.save_hyperparameters()
46
+ self.avgpool = nn.AvgPool2d(kernel_size=4)
47
+ self.fc = self.make_FC()
48
+ self.accuracy = Accuracy(task="multiclass", num_classes=10)
49
+ self.in_layers = [64, 64, 128, 256]
50
+ self.out_layers = [64, 128, 256, 512]
51
+ self.strides = [1, 2, 2, 2]
52
+ self.num = [2, 2, 2, 2]
53
+
54
+ self.convin = nn.Sequential(
55
+ nn.Conv2d(3, 64, 3, bias=False),
56
+ nn.BatchNorm2d(64),
57
+ nn.ReLU()
58
+ )
59
+ self.res_layers = nn.ModuleList([self.make_res(self.in_layers[i], self.out_layers[i], self.num[i], self.strides[i]) for i in range(len(self.in_layers))])
60
+
61
+
62
+ def make_res(self, inchannels, outchannels, num, stride):
63
+
64
+ strides = [stride] + [1] * (num-1)
65
+ layers = []
66
+
67
+ for stride in strides:
68
+ layers.append(ResBlocks(inchannels=inchannels, outchannels=outchannels, stride=stride))
69
+ inchannels = outchannels
70
+
71
+ return nn.Sequential(*layers)
72
+
73
+
74
+ def make_FC(self):
75
+
76
+ layers = []
77
+
78
+ layers.append(nn.Linear(512, 256))
79
+ layers.append(nn.GELU())
80
+ layers.append(nn.Linear(256, 10))
81
+ layers.append(nn.LogSoftmax(dim=1))
82
+
83
+ return nn.Sequential(*layers)
84
+
85
+ def forward(self, x):
86
+
87
+ x = self.convin(x)
88
+
89
+ for layer in self.res_layers:
90
+ x = layer(x)
91
+
92
+ x = self.avgpool(x)
93
+ x = x.view(-1, 512)
94
+ x = self.fc(x)
95
+
96
+ return x
97
+
98
+ def training_step(self, batch, batch_idx):
99
+ x, y = batch
100
+ logits = self(x)
101
+ loss = F.nll_loss(logits, y)
102
+ self.log("train_loss", loss)
103
+ return loss
104
+
105
+ def evaluate(self, batch, stage=None):
106
+ x, y = batch
107
+ logits = self(x)
108
+ loss = F.nll_loss(logits, y)
109
+ preds = torch.argmax(logits, dim=1)
110
+ acc = self.accuracy(preds, y)
111
+
112
+ if stage:
113
+ self.log(f"{stage}_loss", loss, prog_bar=True)
114
+ self.log(f"{stage}_acc", acc, prog_bar=True)
115
+
116
+ def validation_step(self, batch, batch_idx):
117
+ self.evaluate(batch, "val")
118
+
119
+ def test_step(self, batch, batch_idx):
120
+ self.evaluate(batch, "test")
121
+
122
+ def configure_optimizers(self):
123
+ optimizer = torch.optim.Adam(
124
+ self.parameters(),
125
+ lr=self.hparams.lr,
126
+ weight_decay=5e-4,
127
+ )
128
+ steps_per_epoch = 45000 // BATCH_SIZE
129
+ scheduler_dict = {
130
+ "scheduler": OneCycleLR(
131
+ optimizer,
132
+ max_lr=1.26*1e-2,
133
+ steps_per_epoch=steps_per_epoch,
134
+ epochs=20,
135
+ pct_start=0.2,
136
+ div_factor=10,
137
+ three_phase=False,
138
+ final_div_factor=10,
139
+ anneal_strategy='linear'
140
+ ),
141
+ "interval": "step",
142
+ }
143
+ return {"optimizer": optimizer, "lr_scheduler": scheduler_dict}
save_model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9defecd2554c8ab9c20eb761089e10acce17a6a11e696c3632a97665877dfb5
3
+ size 626468869