import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils import data

class NeuralNetwork(nn.Module):
    def __init__(self, num_inputs, num_outputs):
        super(NeuralNetwork, self).__init__()
        self.layers = nn.Sequential(
            # 1st hidden layer
            nn.Linear(num_inputs, 30),
            nn.ReLU(),
            # 2nd hidden layer
            nn.Linear(30, 20),
            nn.ReLU(),
            # output layer
            nn.Linear(20, num_outputs)
        )

    def forward(self, x):
        logits = self.layers(x)
        return logits


class ToyDataset(data.Dataset):
    def __init__(self, X, y):
        self.X = X
        self.y = y

    def __len__(self):
        return len(self.X)
    
    def __getitem__(self, idx):
        return self.X[idx], self.y[idx]


def load_data():
    X_train = torch.tensor([
        [-1.2, 3.1],
        [-0.9, 2.9],
        [-0.5, 2.6],
        [2.3, -1.1],
        [2.7, -1.5]
    ])
    y_train = torch.tensor([0, 0, 0, 1, 1])

    X_test = torch.tensor([
        [-0.8, 2.8],
        [2.6, -1.6],
    ])
    y_test = torch.tensor([0, 1])

    return X_train, y_train, X_test, y_test


def main():
    torch.manual_seed(123)
    model = NeuralNetwork(num_inputs=2, num_outputs=2)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model.to(device)

    optimizer = optim.SGD(model.parameters(), lr=0.5)
    num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    print(model)
    print(f"Number of parameters: {num_params}")

    X_train, y_train, X_test, y_test = load_data()
    train_dataset = ToyDataset(X_train, y_train)
    test_dataset = ToyDataset(X_test, y_test)

    train_loader = data.DataLoader(train_dataset, batch_size=2, shuffle=True, num_workers=0, drop_last=True)
    test_loader = data.DataLoader(test_dataset, batch_size=2, shuffle=False, num_workers=0, drop_last=True)
    
    num_epochs = 3
    for epoch in range(num_epochs):
        model.train()
        for batch_id, (X, y) in enumerate(train_loader):
            X = X.to(device)
            y = y.to(device)

            y_pred = model(X)
            loss = F.cross_entropy(y_pred, y)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            print(f"Epoch {epoch+1:03d}/{num_epochs:03d}, "
                  f"| Batch {batch_id+1:03d}/{len(train_loader):03d}: "
                  f"| Loss = {loss.item():.4f}")
    
    model.eval()
    with torch.no_grad():
        X_test = X_test.to(device)
        y_test = y_test.to(device)

        outputs = model(X_test)
        preds = torch.softmax(outputs, dim=1)
        preds_label = torch.argmax(preds, dim=1)
        acc = (preds_label == y_test).float().mean()
        
        loss = F.cross_entropy(preds, y_test)
        print(f"Preds Label: {preds_label}")
        print(f"Test Loss = {loss.item():.4f}, "
              f"Accuracy = {acc.item():.4f}")

    # save model
    torch.save(model.state_dict(), "model.pth")


if __name__ == "__main__":
    main()

