Abdullah-Nazhat commited on
Commit
df7aea3
1 Parent(s): 904944e

Upload 2 files

Browse files
contextualizer_uniform_mlp.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+
4
+
5
+
6
+
7
+
8
+ class FeedForward(nn.Module):
9
+ def __init__(self, dim, hidden_dim, dropout):
10
+ super().__init__()
11
+ self.net = nn.Sequential(
12
+ nn.Linear(dim, hidden_dim),
13
+ nn.GELU(),
14
+ nn.Dropout(dropout),
15
+ nn.Linear(hidden_dim, dim),
16
+ nn.Dropout(dropout)
17
+ )
18
+ def forward(self, x):
19
+ return self.net(x)
20
+
21
+
22
+
23
+
24
+
25
+
26
+
27
+
28
+
29
+ class ContextualizerBlock(nn.Module):
30
+ def __init__(self, d_model,dropout,num_tokens):
31
+ super().__init__()
32
+
33
+ self.context_mlp = FeedForward(d_model,d_model,dropout)
34
+ self.mlp = FeedForward(d_model,d_model,dropout)
35
+ self.norm = nn.LayerNorm(d_model)
36
+ self.upsample = nn.Upsample(scale_factor=num_tokens,mode='nearest')
37
+ self.downsample = nn.Upsample(scale_factor= 1/num_tokens, mode='nearest')
38
+ def forward(self, x):
39
+ res = x
40
+ x = self.norm(x)
41
+
42
+ context = x
43
+ dim0 = context.shape[0]
44
+ dim1 = context.shape[1]
45
+ dim2 = context.shape[2]
46
+ context = context.reshape([dim0,1,dim1*dim2])
47
+
48
+ context = self.downsample(context)
49
+ context = context.reshape([dim0,dim2])
50
+ context = self.context_mlp(context)
51
+
52
+ context = context.reshape([dim0,1,dim2])
53
+ context = self.upsample(context)
54
+ context = context.reshape([dim0,dim1,dim2])
55
+ x = context
56
+ x = x + res
57
+ res = x
58
+ x = self.norm(x)
59
+ x = self.mlp(x)
60
+ out = x + res
61
+ return out
62
+ return
63
+
64
+
65
+
66
+
67
+ class Contextualizer(nn.Module):
68
+ def __init__(self, d_model, num_layers,dropout,num_tokens):
69
+ super().__init__()
70
+
71
+ self.model = nn.Sequential(
72
+
73
+ *[ContextualizerBlock(d_model,dropout,num_tokens) for _ in range(num_layers)],
74
+
75
+
76
+ )
77
+
78
+ def forward(self, x):
79
+
80
+ x = self.model(x)
81
+
82
+ return x
83
+
84
+
85
+
86
+
87
+
88
+
89
+
90
+
91
+
92
+
93
+
94
+
95
+
96
+
train_contextualizer_unifrom_mlp.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import csv
3
+ import torch
4
+ from torch import nn
5
+ from torch.utils.data import DataLoader
6
+ from torchvision import datasets
7
+ from torchvision.transforms import ToTensor, Normalize, RandomCrop, RandomHorizontalFlip, Compose
8
+ from contextualizer_uniform_mlp import Contextualizer
9
+
10
+
11
+
12
+ transform = Compose([
13
+ RandomCrop(32, padding=4),
14
+ RandomHorizontalFlip(),
15
+ ToTensor(),
16
+ Normalize((0.5, 0.5,0.5),(0.5, 0.5,0.5))
17
+
18
+ ])
19
+
20
+ training_data = datasets.CIFAR10(
21
+ root='data',
22
+ train=True,
23
+ download=True,
24
+ transform=transform
25
+ )
26
+
27
+ test_data = datasets.CIFAR10(
28
+ root='data',
29
+ train=False,
30
+ download=True,
31
+ transform=transform
32
+ )
33
+
34
+
35
+ batch_size = 128
36
+
37
+ train_dataloader = DataLoader(training_data, batch_size=batch_size,shuffle=True)
38
+ test_dataloader = DataLoader(test_data, batch_size=batch_size)
39
+
40
+
41
+ for X, y in test_dataloader:
42
+ print(f"Shape of X [N,C,H,W]:{X.shape}")
43
+ print(f"Shape of y:{y.shape}{y.dtype}")
44
+ break
45
+
46
+
47
+ def check_sizes(image_size, patch_size):
48
+ sqrt_num_patches, remainder = divmod(image_size, patch_size)
49
+ assert remainder == 0, "`image_size` must be divisibe by `patch_size`"
50
+ num_patches = sqrt_num_patches ** 2
51
+ return num_patches
52
+
53
+
54
+
55
+
56
+ device = "cuda" if torch.cuda.is_available() else "cpu"
57
+
58
+ print(f"using {device} device")
59
+
60
+
61
+
62
+ class ContextualizerumlpImageClassification(Contextualizer):
63
+ def __init__(
64
+ self,
65
+ image_size=32,
66
+ patch_size=4,
67
+ in_channels=3,
68
+ num_classes=10,
69
+ d_model = 256,
70
+ num_tokens = 64,
71
+ num_layers=4,
72
+ dropout=0.5
73
+ ):
74
+ num_patches = check_sizes(image_size, patch_size)
75
+ super().__init__(d_model,num_layers,dropout, num_tokens)
76
+ self.patcher = nn.Conv2d(
77
+ in_channels, d_model, kernel_size=patch_size, stride=patch_size
78
+ )
79
+ self.classifier = nn.Linear(d_model, num_classes)
80
+
81
+ def forward(self, x):
82
+
83
+ patches = self.patcher(x)
84
+ batch_size, num_channels, _, _ = patches.shape
85
+ patches = patches.permute(0, 2, 3, 1)
86
+ patches = patches.view(batch_size, -1, num_channels)
87
+ embedding = self.model(patches)
88
+ embedding = embedding.mean(dim=1)
89
+ out = self.classifier(embedding)
90
+ return out
91
+
92
+ model = ContextualizerumlpImageClassification().to(device)
93
+ print(model)
94
+
95
+
96
+
97
+ loss_fn = nn.CrossEntropyLoss()
98
+ optimizer = torch.optim.Adam(model.parameters(),lr=1e-3)
99
+
100
+
101
+
102
+
103
+ def train(dataloader, model, loss_fn, optimizer):
104
+ size = len(dataloader.dataset)
105
+ num_batches = len(dataloader)
106
+ model.train()
107
+ train_loss = 0
108
+ correct = 0
109
+ for batch, (X,y) in enumerate(dataloader):
110
+ X, y = X.to(device), y.to(device)
111
+
112
+
113
+ pred = model(X)
114
+ loss = loss_fn(pred,y)
115
+
116
+
117
+ optimizer.zero_grad()
118
+ loss.backward()
119
+ optimizer.step()
120
+ train_loss += loss.item()
121
+ _, labels = torch.max(pred.data, 1)
122
+ correct += labels.eq(y.data).type(torch.float).sum()
123
+
124
+
125
+
126
+
127
+ if batch % 100 == 0:
128
+ loss, current = loss.item(), batch * len(X)
129
+ print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
130
+
131
+ train_loss /= num_batches
132
+ train_accuracy = 100. * correct.item() / size
133
+ print(train_accuracy)
134
+ return train_loss,train_accuracy
135
+
136
+
137
+
138
+
139
+
140
+ def test(dataloader, model, loss_fn):
141
+ size = len(dataloader.dataset)
142
+ num_batches = len(dataloader)
143
+ model.eval()
144
+ test_loss = 0
145
+ correct = 0
146
+ with torch.no_grad():
147
+ for X,y in dataloader:
148
+ X,y = X.to(device), y.to(device)
149
+ pred = model(X)
150
+ test_loss += loss_fn(pred, y).item()
151
+ correct += (pred.argmax(1) == y).type(torch.float).sum().item()
152
+ test_loss /= num_batches
153
+ correct /= size
154
+ print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
155
+ test_accuracy = 100*correct
156
+ return test_loss, test_accuracy
157
+
158
+
159
+
160
+
161
+
162
+ logname = "/PATH/Contextualizer_uniform_mlp/Experiments_cifar10/logs_contextualizer/logs_cifar10.csv"
163
+ if not os.path.exists(logname):
164
+ with open(logname, 'w') as logfile:
165
+ logwriter = csv.writer(logfile, delimiter=',')
166
+ logwriter.writerow(['epoch', 'train loss', 'train acc',
167
+ 'test loss', 'test acc'])
168
+
169
+
170
+ epochs = 100
171
+ for epoch in range(epochs):
172
+ print(f"Epoch {epoch+1}\n-----------------------------------")
173
+ train_loss, train_acc = train(train_dataloader, model, loss_fn, optimizer)
174
+ test_loss, test_acc = test(test_dataloader, model, loss_fn)
175
+ with open(logname, 'a') as logfile:
176
+ logwriter = csv.writer(logfile, delimiter=',')
177
+ logwriter.writerow([epoch+1, train_loss, train_acc,
178
+ test_loss, test_acc])
179
+ print("Done!")
180
+
181
+
182
+
183
+ path = "/PATH/Contextualizer_uniform_mlp/Experiments_cifar10/weights_contextualizer"
184
+ model_name = "ContextualizerumlpImageClassification_cifar10"
185
+ torch.save(model.state_dict(), f"{path}/{model_name}.pth")
186
+ print(f"Saved Model State to {path}/{model_name}.pth ")
187
+