Spaces:
Running
on
Zero
Running
on
Zero
File size: 1,524 Bytes
7f51798 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
import torch.nn as nn
from collections import OrderedDict
class LeNet5(nn.Module):
"""
Input - 1x32x32
C1 - 6@28x28 (5x5 kernel)
tanh
S2 - 6@14x14 (2x2 kernel, stride 2) Subsampling
C3 - 16@10x10 (5x5 kernel, complicated shit)
tanh
S4 - 16@5x5 (2x2 kernel, stride 2) Subsampling
C5 - 120@1x1 (5x5 kernel)
F6 - 84
tanh
F7 - 10 (Output)
"""
def __init__(self):
super(LeNet5, self).__init__()
self.convnet = nn.Sequential(OrderedDict([
('c1', nn.Conv2d(1, 6, kernel_size=(5, 5))),
('tanh1', nn.Tanh()),
('s2', nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=1)),
('c3', nn.Conv2d(6, 16, kernel_size=(5, 5))),
('tanh3', nn.Tanh()),
('s4', nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=1)),
('c5', nn.Conv2d(16, 120, kernel_size=(5, 5))),
('tanh5', nn.Tanh())
]))
self.fc = nn.Sequential(OrderedDict([
('f6', nn.Linear(120, 84)),
('tanh6', nn.Tanh()),
('f7', nn.Linear(84, 10)),
('sig7', nn.LogSoftmax(dim=-1))
]))
def forward(self, img):
output = self.convnet(img)
output = output.view(img.size(0), -1)
output = self.fc(output)
return output
def extract_features(self, img):
output = self.convnet(img.float())
output = output.view(img.size(0), -1)
output = self.fc[1](self.fc[0](output))
return output
|