Update app.py
#3
by
Gosula
- opened
app.py
CHANGED
@@ -28,26 +28,7 @@ XCnn_train, XCnn_test, y_train, y_test = train_test_split(XCnn, y, test_size=0.2
|
|
28 |
|
29 |
from PIL import Image
|
30 |
import torchvision.transforms as transforms
|
31 |
-
class Cnn(nn.Module):
|
32 |
-
def __init__(self, dropout=0.5):
|
33 |
-
super(Cnn, self).__init__()
|
34 |
-
self.conv1 = nn.Conv2d(1, 32, kernel_size=3)
|
35 |
-
self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
|
36 |
-
self.conv2_drop = nn.Dropout2d(p=dropout)
|
37 |
-
self.fc1 = nn.Linear(1600, 100) # 1600 = number channels * width * height
|
38 |
-
self.fc2 = nn.Linear(100, 10)
|
39 |
-
self.fc1_drop = nn.Dropout(p=dropout)
|
40 |
|
41 |
-
def forward(self, x):
|
42 |
-
x = torch.relu(F.max_pool2d(self.conv1(x), 2))
|
43 |
-
x = torch.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
|
44 |
-
|
45 |
-
# flatten over channel, height and width = 1600
|
46 |
-
x = x.view(-1, x.size(1) * x.size(2) * x.size(3))
|
47 |
-
|
48 |
-
x = torch.relu(self.fc1_drop(self.fc1(x)))
|
49 |
-
x = torch.softmax(self.fc2(x), dim=-1)
|
50 |
-
return x
|
51 |
torch.manual_seed(0)
|
52 |
|
53 |
|
|
|
28 |
|
29 |
from PIL import Image
|
30 |
import torchvision.transforms as transforms
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
torch.manual_seed(0)
|
33 |
|
34 |
|