Sendeky commited on
Commit
7b15e2a
1 Parent(s): da8aece

Updated model.py to achieve over 63% accuracy

Browse files
Files changed (1) hide show
  1. model.py +8 -15
model.py CHANGED
@@ -88,32 +88,23 @@ class Net(nn.Module):
88
  def __init__(self):
89
  super(Net, self).__init__()
90
 
91
- # placing all layers in nn.Sequential brought +4% accuracy improvement
92
  self.network = nn.Sequential(
93
  # first 2 concolutional layers
94
  nn.Conv2d(nc, 16, kernel_size=3, stride=1, padding=1), # a convoltional layer with 3 input channels, 16 output channels,
95
  # a kernel size of 3, a stride of 1, and padding of 1
96
- nn.Conv2d(16, 64, kernel_size=3, stride=1, padding=1),
97
- nn.Conv2d(64, 32, kernel_size=3, stride=1, padding=1),
98
 
99
  # max pooling layers
100
  nn.MaxPool2d(kernel_size=2, stride=2), # a max pooling layer with kernel size of 3 and stride of 1
101
  # helps reduce spatial dimensions of feature maps
102
  nn.Flatten(),
103
- nn.Linear(32 * 16 * 16, 16), # adjust the input size based on the output of the last conv layer
 
104
  nn.Linear(16, output),
105
  )
106
 
107
 
108
  def forward(self, x):
109
- # x = self.pool(F.relu(self.conv1(x))) # First convoltional layer, then ReLU active, then max pooling
110
- # x = self.pool(F.relu(self.conv2(x))) # Second convolutional layer, then ReLu, then pooling
111
-
112
- # x = x.view(x.size(0), -1) # Flatten tensor before passing through fully connected layers
113
-
114
- # x = F.relu(self.fc1(x)) # First fully connected layer, then ReLu, then pooling
115
- # x = self.fc2(x) # Layer with predictions, fully connected
116
-
117
  return self.network(x)
118
 
119
  # creates instance of the model
@@ -121,8 +112,8 @@ model = Net()
121
 
122
  # create the optimizer and criterion
123
  criterion = nn.CrossEntropyLoss()
124
- optimizer = optim.SGD(model.parameters() , lr=learning_rate, momentum=0.9)
125
- # Maybe use Adam
126
 
127
  # moves model to device (ie. cpu/gpu)
128
  model.to(device)
@@ -203,4 +194,6 @@ print(f"Accuracy on the test dataset: {accuracy:.2%}")
203
 
204
  # ADDED: After adding all layers to nn.Sequential: ~55-57%
205
 
206
- # After adding a 3rd Conv2d layer (64, 32)
 
 
 
88
  def __init__(self):
89
  super(Net, self).__init__()
90
 
 
91
  self.network = nn.Sequential(
92
  # first 2 concolutional layers
93
  nn.Conv2d(nc, 16, kernel_size=3, stride=1, padding=1), # a convoltional layer with 3 input channels, 16 output channels,
94
  # a kernel size of 3, a stride of 1, and padding of 1
95
+ nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1),
 
96
 
97
  # max pooling layers
98
  nn.MaxPool2d(kernel_size=2, stride=2), # a max pooling layer with kernel size of 3 and stride of 1
99
  # helps reduce spatial dimensions of feature maps
100
  nn.Flatten(),
101
+ nn.Linear(32 * 16 * 16, 64), # adjust the input size based on the output of the last conv layer
102
+ nn.Linear(64, 16),
103
  nn.Linear(16, output),
104
  )
105
 
106
 
107
  def forward(self, x):
 
 
 
 
 
 
 
 
108
  return self.network(x)
109
 
110
  # creates instance of the model
 
112
 
113
  # create the optimizer and criterion
114
  criterion = nn.CrossEntropyLoss()
115
+ # Adam optimizer yields much better results than SGD
116
+ optimizer = optim.Adam(model.parameters(), lr=learning_rate)
117
 
118
  # moves model to device (ie. cpu/gpu)
119
  model.to(device)
 
194
 
195
  # ADDED: After adding all layers to nn.Sequential: ~55-57%
196
 
197
+ # ADDED: After using optim.Adam instead of optim.SGD: ~61-62%
198
+
199
+ # ADDED: After adding a nn.Linear(64, 16): ~62-63+%