Spaces:
Runtime error
Runtime error
| import torch | |
| from torch import nn | |
| import torch.nn.functional as F | |
| class NeuralNetwork(nn.Module): | |
| def __init__(self): | |
| super().__init__() | |
| n_filters = 64 | |
| self.conv_1 = nn.Conv1d( 1, n_filters, 8, stride=1, padding='same') | |
| self.norm_1 = nn.BatchNorm1d(n_filters) | |
| self.conv_2 = nn.Conv1d(n_filters, n_filters, 5, stride=1, padding='same') | |
| self.norm_2 = nn.BatchNorm1d(n_filters) | |
| self.conv_3 = nn.Conv1d(n_filters, n_filters, 3, stride=1, padding='same') | |
| self.norm_3 = nn.BatchNorm1d(n_filters) | |
| self.conv_4 = nn.Conv1d( 1, n_filters, 1, stride=1, padding='same') # Expanding for addition | |
| self.norm_4 = nn.BatchNorm1d(n_filters) | |
| self.conv_5 = nn.Conv1d( n_filters, n_filters*2, 8, stride=1, padding='same') | |
| self.norm_5 = nn.BatchNorm1d(n_filters*2) | |
| self.conv_6 = nn.Conv1d(n_filters*2, n_filters*2, 5, stride=1, padding='same') | |
| self.norm_6 = nn.BatchNorm1d(n_filters*2) | |
| self.conv_7 = nn.Conv1d(n_filters*2, n_filters*2, 3, stride=1, padding='same') | |
| self.norm_7 = nn.BatchNorm1d(n_filters*2) | |
| self.conv_8 = nn.Conv1d( n_filters, n_filters*2, 1, stride=1, padding='same') | |
| self.norm_8 = nn.BatchNorm1d(n_filters*2) | |
| self.conv_9 = nn.Conv1d(n_filters*2, n_filters*2, 8, stride=1, padding='same') | |
| self.norm_9 = nn.BatchNorm1d(n_filters*2) | |
| self.conv_10 = nn.Conv1d(n_filters*2, n_filters*2, 5, stride=1, padding='same') | |
| self.norm_10 = nn.BatchNorm1d(n_filters*2) | |
| self.conv_11 = nn.Conv1d(n_filters*2, n_filters*2, 3, stride=1, padding='same') | |
| self.norm_11 = nn.BatchNorm1d(n_filters*2) | |
| # self.conv_12 = nn.Conv1d(n_filters*2, n_filters*2, 1, stride=1, padding='same') | |
| self.norm_12 = nn.BatchNorm1d(n_filters*2) | |
| self.classifier = nn.Linear(128, 7) | |
| self.log_softmax = nn.LogSoftmax(dim=1) | |
| def forward(self, x): | |
| x = x.float() | |
| # Block 1 | |
| a = self.conv_1(x) | |
| a = self.norm_1(a) | |
| a = F.relu(a) | |
| b = self.conv_2(a) | |
| b = self.norm_2(b) | |
| b = F.relu(b) | |
| c = self.conv_3(b) | |
| c = self.norm_3(c) | |
| shortcut = self.conv_4(x) | |
| shortcut = self.norm_4(shortcut) | |
| output_1 = torch.add(c, shortcut) | |
| output_1 = F.relu(output_1) | |
| #Block 2 | |
| a = self.conv_5(output_1) | |
| a = self.norm_5(a) | |
| a = F.relu(a) | |
| b = self.conv_6(a) | |
| b = self.norm_6(b) | |
| b = F.relu(b) | |
| c = self.conv_7(b) | |
| c = self.norm_7(c) | |
| shortcut = self.conv_8(output_1) | |
| shortcut = self.norm_8(shortcut) | |
| output_2 = torch.add(c, shortcut) | |
| output_2 = F.relu(output_2) | |
| #Block 3 | |
| a = self.conv_9(output_2) | |
| a = self.norm_9(a) | |
| a = F.relu(a) | |
| b = self.conv_10(a) | |
| b = self.norm_10(b) | |
| b = F.relu(b) | |
| c = self.conv_11(b) | |
| c = self.norm_11(c) | |
| # shortcut = self.conv_12(output_2) | |
| shortcut = self.norm_12(shortcut) | |
| output_3 = torch.add(c, shortcut) | |
| output_3 = F.relu(output_3) | |
| logits = self.classifier(output_3.mean((2,))) | |
| res = self.log_softmax(logits) | |
| return res |