import numpy as np
import torch

from config import Config
from hdgi import HDGI
from load import prepare_data

conf = Config()

if torch.cuda.is_available():
    torch.cuda.set_device(2)


def train(ft, adj):
    nb_nodes = ft.shape[1]
    ft_size = ft.shape[2]
    model = HDGI(ft_size, conf.hid_units, conf.semantic_hid_units, p, conf.non_linearity)
    optimiser = torch.optim.Adam(model.parameters(), lr=conf.lr, weight_decay=conf.l2_coef)
    if torch.cuda.is_available():
        print('Using CUDA')

        model.cuda()
        ft = ft.cuda()
        adj = adj.cuda()

    b_loss = torch.nn.BCEWithLogitsLoss()
    cnt_wait = 0
    best = 1e9

    for epoch in range(conf.nb_epochs):
        model.train()
        optimiser.zero_grad()
        idx = np.random.permutation(nb_nodes)

        shuffle_fts = ft[:, idx, :]
        lbl_1 = torch.ones(conf.batch_size, nb_nodes)
        lbl_2 = torch.zeros(conf.batch_size, nb_nodes)
        lbl = torch.cat((lbl_1, lbl_2), 1)

        if torch.cuda.is_available():
            shuffle_fts = shuffle_fts.cuda()
            lbl = lbl.cuda()

        logits = model(ft, shuffle_fts, adj, conf.sparse, None, None, None)
        loss = b_loss(logits, lbl)
        print('Epoch: {}, Loss:{}'.format(epoch, loss.item()))

        if loss < best:
            best = loss
            cnt_wait = 0
            torch.save(model.state_dict(), 'best_hdgi.pkl')
        else:
            cnt_wait += 1

        if cnt_wait == conf.patience:
            print('Early stopping!')
            break

        loss.backward()
        optimiser.step()


if __name__ == '__main__':
    features, labels, sp_adj_list, p, idx_train, idx_val, idx_test = prepare_data(conf.dataset_name, conf.sparse)

    train(features, sp_adj_list)
