import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

from raw.base_model import BaseModel

training_params = {
    'num_epochs': 20,
    'batch_size': 100,
    'learning_rate': 0.05,
    'momentum': 0.9,
    'decay': 1e-6
}

class Model(BaseModel):
    def __init__(self, options):
        super().__init__('MNIST', training_params)

        input_channels = 1 * options.get('thermometer_level', 1)

        self.conv32 = nn.Sequential(
            nn.Conv2d(input_channels, 32, 3),
            nn.ReLU(),
            nn.Conv2d(32, 32, 3),
            nn.ReLU(),
            nn.MaxPool2d(2)
        )
        self.conv64 = nn.Sequential(
            nn.Conv2d(32, 64, 3),
            nn.ReLU(),
            nn.Conv2d(64, 64, 3),
            nn.ReLU(),
            nn.MaxPool2d(2)
        )
        self.fc1 = nn.Linear(4 * 4 * 64, 200)
        self.dropout = nn.Dropout2d(p = 0.5)
        self.fc2 = nn.Linear(200, 200)
        self.fc3 = nn.Linear(200, 10)

    def forward(self, x):
        out = self.conv32(x)
        out = self.conv64(out)
        out = out.reshape(-1, 4 * 4 * 64)
        out = F.relu(self.fc1(out))
        out = self.dropout(out)
        out = F.relu(self.fc2(out))
        out = self.fc3(out)
        out = out - torch.max(out, dim = 1, keepdim = True)[0]
        return out

    def get_optimizer(self):
        return optim.SGD(self.parameters(),
                        lr = training_params['learning_rate'],
                        momentum = training_params['momentum'],
                        weight_decay = training_params['decay'],
                        nesterov = True)
