from sklearn import datasets
import torch
from torch import nn
from TranEncoder import TransformerEncoder
from tqdm import tqdm


class TranE(nn.Module):

    def __init__(self, net, dim, time, cls):
        super(TranE, self).__init__()
        self.net = net
        self.head1 = nn.Linear(dim, cls)
        self.act = nn.ReLU()
        self.head2 = nn.Linear(time, 1)

    def forward(self, x):
        x = self.net(x)
        x = self.head1(x)
        x = self.act(x)
        x = self.head2(x.transpose(-1, -2))
        return x.squeeze(-1)


def print_model_parm_nums(model):
    total = sum([param.nelement() for param in model.parameters()])
    print('  + Number of params: %.2fM' % (total / 1e6))


ds = datasets.load_digits()
X = torch.tensor(ds.data/16).reshape((-1, 8, 8)).float()
Y = torch.tensor(ds.target).long()
split_num = int(len(X) * 0.8)
X_train, Y_train = X[0:split_num], Y[0:split_num]
X_val, Y_val = X[split_num:-1], Y[split_num:-1]


batch_size, time, embed_dim = 1, 8, 8
hidden_dim, num_layers, num_heads, dropout = 256, 12, 2, 0.1
cls = 10

feature_net = TransformerEncoder(
    embed_dim, hidden_dim, num_layers, num_heads, dropout)
model = TranE(feature_net, embed_dim, time, cls)
proc_bar = tqdm(range(1000))

loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

for epoch in proc_bar:
    # Forward pass
    output = model(X_train)
    loss = loss_fn(output, Y_train)

    # Backward pass
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
    proc_bar.set_postfix({f'Epoch': f'{epoch}\tLoss: {loss.item()}'})

print_model_parm_nums(model)

ACC = (model(X_val).argmax(axis=1) == Y_val).sum() / len(Y_val)
print(f'ACC: {ACC:.2f}')
