import torch
import torch.nn as nn


from torch.utils.data import Dataset, DataLoader

EPOCH  = 1500
LR = 0.001
BATCH_SIZE = 128

class AutoEncoder(nn.Module):
    def __init__(self):
        super(AutoEncoder, self).__init__()
        # self.encoder = nn.Sequential(
        #     nn.Linear(14, 8),
        #     nn.Tanh(),
        #     nn.Linear(8, 4),
        # )
        # self.decoder = nn.Sequential(
        #     nn.Linear(4, 8),
        #     nn.Tanh(),
        #     nn.Linear(8, 14),
        #     # nn.Sigmoid()
        # )

        self.encoder = nn.Sequential(
            nn.Linear(14, 32),
            nn.Tanh(),
            nn.Linear(32, 64),
            nn.Tanh(),
            nn.Linear(64, 128),
        )
        self.decoder = nn.Sequential(
            nn.Linear(128, 64),
            nn.Tanh(),
            nn.Linear(64, 32),
            nn.Tanh(),
            nn.Linear(32, 14),
        )

    def forward(self, x):
        encoded = self.encoder(x)
        decoded = self.decoder(encoded)
        return encoded, decoded


def trainAutoEncoder(train_data):
    loader = DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)

    Coder = AutoEncoder()
    print(Coder)


    optimizer = torch.optim.Adam(Coder.parameters(), lr=LR)
    loss_func = nn.MSELoss()

    for epoch in range(EPOCH):
        for step, (data, label) in enumerate(loader):
            data = data.float()
            label = label.float()
            encoded, decoded = Coder(data)
            loss = loss_func(decoded, data)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if epoch % 100 == 0:
                print('Epoch :', epoch, '|', 'train_loss:%.7f' % loss.data)


    torch.save(Coder, './AutoEncoder.pkl')
    print('________________________________________')
    print('finish training')