import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader


class DiabetesDataset(Dataset):
    def __init__(self, filepath):
        data = np.loadtxt(filepath, delimiter = ',', dtype = np.float32)
        self.len = data.shape[0]
        self.x_data = torch.from_numpy(data[:, :-1])
        self.y_data = torch.from_numpy(data[:, [-1]])

    def __getitem__(self, index):
        return self.x_data[index], self.y_data[index]

    def __len__(self):
        return self.len


class Model(torch.nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.linear1 = torch.nn.Linear(8, 6)
        self.linear2 = torch.nn.Linear(6, 4)
        self.linear3 = torch.nn.Linear(4, 1)
        self.relu = torch.nn.ReLU()
        self.sigmoid = torch.nn.Sigmoid()

    def forward(self, x):
        x = self.relu(self.linear1(x))
        x = self.relu(self.linear2(x))
        x = self.relu(self.linear3(x))
        x = self.sigmoid(self.sigmoid(x))
        return x


if __name__ == '__main__':
    dataset = DiabetesDataset('dataset/diabetes/diabetes.csv')
    train_loader = DataLoader(dataset = dataset, batch_size = 32, shuffle = True, num_workers = 2)

    model = Model()

    criterion = torch.nn.BCELoss(size_average = False)
    optimizer = torch.optim.SGD(model.parameters(), lr = 0.01)

    for epoch in range(1000):
        for i, (x, y) in enumerate(train_loader, 0):
            y_pred = model(x)
            loss = criterion(y_pred, y)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        if (epoch % 10 == 0):
            print("after %s epochs, loss is %f" % (epoch, loss.item()))
