|
import torch.nn as nn |
|
|
|
class FlowersImagesDetectionModel(nn.Module): |
|
def __init__(self, num_classes): |
|
super(FlowersImagesDetectionModel, self).__init__() |
|
self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, stride=1, padding=1) |
|
self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1) |
|
self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1) |
|
self.fc1 = nn.Linear(128 * 28 * 28, 512) |
|
self.fc2 = nn.Linear(512, num_classes) |
|
self.relu = nn.ReLU() |
|
self.pool = nn.MaxPool2d(kernel_size=2, stride=2) |
|
|
|
def forward(self, x): |
|
x = self.pool(self.relu(self.conv1(x))) |
|
x = self.pool(self.relu(self.conv2(x))) |
|
x = self.pool(self.relu(self.conv3(x))) |
|
x = x.view(-1, 128 * 28 * 28) |
|
x = self.relu(self.fc1(x)) |
|
x = self.fc2(x) |
|
return x |