# alexnet
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import torch.nn.functional as F
from torch.utils import data
from torchvision import datasets, transforms

BATCH_SIZE = 4
IMG_SIZE = 227


class AlexNet(nn.Module):
    def __init__(self, num_classes=1000):
        super(AlexNet, self).__init__()

        self.features = nn.Sequential(
            nn.Conv2d(3, 64, 11, stride=4, padding=2),
            # (227 - 11 + 2 * 2) /4 + 1 = 56
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
            # # (56 - 3) / 2 + 1 = 27.5
            # fixme is this right
            nn.Conv2d(64, 192, 5, padding=2),
            # (27 - 5 + 4) + 1 = 27
            nn.ReLU(inplace=True),
            nn.MaxPool2d(3, 2),
            # (27 -3) / 2 + 1 = 13
            nn.Conv2d(192, 384, kernel_size=3, padding=1),
            # (13 - 3 + 2) + 1 = 13
            nn.ReLU(inplace=True),
            nn.Conv2d(384, 256, 3, padding=1),
            # (13 - 3 + 2) + 1 = 13
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, 3, padding=1),
            # (13 - 3 + 2) + 1 = 13
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
            # (13 - 3) /2 + 1 = 6
        )

        self.classifier = nn.Sequential(
            nn.Dropout(),
            nn.Linear(256 * 6 * 6, 4096),
            nn.ReLU(inplace=True),
            nn.Dropout(),
            nn.Linear(4096, 4096),
            nn.ReLU(inplace=True),
            nn.Linear(4096, num_classes)
        )

    def forward(self, x):
        # print(x.data.shape)
        x = self.features(x)
        # print(x.data.shape)
        x = x.view(-1, 256 * 6 * 6)
        x = self.classifier(x)
        return x


def train(model, opt, data_loader, epoch):
    model.train()

    for batch_idx, (data, target) in enumerate(data_loader):
        data, target = data.cuda(), target.cuda()
        data, target = Variable(data), Variable(target)
        opt.zero_grad()
        output = model(data)
        loss = F.nll_loss(F.log_softmax(output), target)
        loss.backward()
        opt.step()
        if batch_idx % 100 == 0:
            print(batch_idx)
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(data_loader.dataset),
                100. * batch_idx / len(data_loader), loss.data[0]
            ))


if __name__ == '__main__':
    import sys

    data_dir = sys.argv[1]

    kwargs = {}

    train_loader = torch.utils.data.DataLoader(
        datasets.CIFAR10(data_dir, train=True, download=False, transform=transforms.Compose([
            transforms.Scale(IMG_SIZE),
            transforms.ToTensor(),
        ])),
        batch_size=BATCH_SIZE, shuffle=True, **kwargs
    )

    test_loader = torch.utils.data.DataLoader(
        datasets.CIFAR10(data_dir, train=False, download=False, transform=transforms.Compose([
            transforms.Scale(IMG_SIZE),
            transforms.ToTensor(),
        ])),
        batch_size=BATCH_SIZE, shuffle=False, **kwargs
    )

    m = AlexNet(10).cuda()

    optimizer = optim.SGD(params=m.parameters(), lr=1e-3)

    for e in range(1):
        train(m, optimizer, train_loader, e)


