import torch
import torch.nn as nn
from torch.autograd import Variable
import torchvision
from torch.utils.data import DataLoader

BATCH_SIZE = 64
LR = 0.005
EPOCH = 100

N_TEST_IMG = 5


class AutoEncoder(nn.Module):
    def __init__(self):
        super(AutoEncoder, self).__init__()

        self.encoder = nn.Sequential(
            nn.Linear(28 * 28, 128),
            nn.Tanh(),
            nn.Linear(128, 64),
            nn.Tanh(),
            nn.Linear(64, 12),
            nn.Tanh(),
            nn.Linear(12, 3),
        )

        self.decoder = nn.Sequential(
            nn.Linear(3, 12),
            nn.Tanh(),
            nn.Linear(12, 64),
            nn.Tanh(),
            nn.Linear(64, 128),
            nn.Tanh(),
            nn.Linear(128, 28 * 28),
            nn.Sigmoid(),
        )

    def forward(self, x):
        encode_x = self.encoder(x)
        decode_x = self.decoder(encode_x)

        return encode_x, decode_x


if __name__ == '__main__':
    import sys

    data_dir = sys.argv[1]
    model_file = sys.argv[2]

    train_data = torchvision.datasets.MNIST(root=data_dir, train=True,
                                            transform=torchvision.transforms.ToTensor(),
                                            download=False)

    train_loader = DataLoader(dataset=train_data, batch_size=BATCH_SIZE,
                              shuffle=True)

    auto_encoder = AutoEncoder().cuda()
    optimizer = torch.optim.Adam(auto_encoder.parameters(), lr=LR)
    loss_fn = nn.MSELoss()

    N_steps = len(train_loader)
    total_loss = 0

    for e in range(EPOCH):
        for step, (x, y) in enumerate(train_loader):
            b_x = Variable(x.view(-1, 28*28)).cuda()
            b_y = Variable(x.view(-1, 28*28)).cuda()

            b_label = Variable(y)

            encoded_data, decode_data = auto_encoder(b_x)

            loss = loss_fn(decode_data, b_y)

            optimizer.zero_grad()

            loss.backward()
            optimizer.step()

            # total_loss += loss.data[0]

            if step % 100 == 0:
                print("epoch: {} step: {}/{} loss: {}".format(e, step, N_steps, loss.data[0]))

    torch.save(auto_encoder.state_dict(), model_file)
