import torch
import torch.nn as nn
import torch.nn.functional as F

# torch.jit.ScriptModule


class Digit(nn.Module):
    def __init__(self):
        super().__init__()
        self.conv1 = nn.Conv2d(1, 20, 5)
        self.conv2 = nn.Conv2d(20, 40, 3)  # kernel is 3*3
        self.fc1 = nn.Linear(40*46*46, 700)
        self.fc2 = nn.Linear(700, 7)

    def forward(self, x):
        input_size = x.size(0)
        # batch_size * 1 * 28 * 28( 0 means the first one -- batch_size )  batch_size * 3 * 50 * 50
        x = self.conv1(x)
        # input : batchsize * 1 * 28 *28 ,output : batchsize * 10 * 24 *24   b * 20 * 46 *46
        x = F.relu(x)
        x = F.max_pool2d(x, 2, 2)
        # input : batchsize * 10 * 24 * 24 ,output : batchsize * 10 * 12 * 12  b * 20 * 23 * 23
        x = self.conv2(x)
        # input : batchsize * 10 * 12 * 12 ,output : batchsize * 20 * 10 * 10  b * 40 * 21 * 21 b*40*96*96
        x = F.relu(x)

        x = x.view(input_size, -1)
        # laping , -1 is calculate the dimision : 20 * 10 * 10 = 2000
        x = self.fc1(x)
        # input : batchsize * 2000 output : batchsize * 500
        x = F.relu(x)
        x = self.fc2(x)
        # input : batchsize * 500 output : batchsize * 10
        output = F.log_softmax(x, dim=1)
        # calculate the % of each number
        return output
