import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

from raw.base_model import BaseModel

training_params = {
    'num_epochs': 10,
    'batch_size': 64,
    'learning_rate': 0.01,
    'momentum': 0.5
}

# 卷积网络
# 1*28*28 -> 10*24*24 -> 10*12*12 -> 20*8*8 -> 20*4*4 -> 40*2*2 -> 40*1*1 -> 40 -> 10
class Model(BaseModel):
    def __init__(self, options):
        super().__init__('MNIST_old', training_params, dataset_name = 'MNIST')
        self.conv1 = nn.Conv2d(1, 10, 5) # 1个输入频道、10个输出频道、5x5的局部感受野
        self.conv2 = nn.Conv2d(10, 20 ,5)
        self.conv3 = nn.Conv2d(20, 40, 3)
        self.fc = nn.Linear(40, 10)

    def forward(self, x):                          # 前向传递函数
        x = F.max_pool2d(F.relu(self.conv1(x)), 2) # 在2x2区域上最大值混合（池化）      1*28*28 -> 10*24*24 -> 10*12*12
        x = F.max_pool2d(F.relu(self.conv2(x)), 2) #                                  10*12*12 -> 20*8*8 -> 20*4*4
        x = F.max_pool2d(F.relu(self.conv3(x)), 2) #                                  20*4*4 -> 40*2*2 -> 40*1*1 
        x = x.view(x.size(0), -1)                  #                                  40*1*1 -> 40
        x = self.fc(x)                             #                                  40 -> 10
        return F.log_softmax(x, dim = 1)

    def get_optimizer(self):
        return optim.SGD(self.parameters(),
                        lr = training_params['learning_rate'],
                        momentum = training_params['momentum'])
        # lr,momentum详情见https://blog.csdn.net/u013989576/article/details/70241121
