import numpy as np
import torch

from config import Config
from dgi import DGI
from layers import LogReg
from load import prepare_data

conf = Config()


def train(ft, adjacent, y, train_idx, test_idx):
    model = DGI(ft_size, conf.hid_units, conf.non_linearity)
    optimiser = torch.optim.Adam(model.parameters(), lr=conf.lr, weight_decay=conf.l2_coef)
    if torch.cuda.is_available():
        print('Using CUDA')

        model.cuda()
        ft = ft.cuda()
        adjacent = adjacent.cuda()
        y = y.cuda()
        train_idx = train_idx.cuda()
        test_idx = test_idx.cuda()

    b_loss = torch.nn.BCEWithLogitsLoss()
    x_loss = torch.nn.CrossEntropyLoss()
    cnt_wait = 0
    best = 1e9

    # for epoch in range(conf.nb_epochs):
    #     model.train()
    #     optimiser.zero_grad()
    #     idx = np.random.permutation(nb_nodes)
    #
    #     shuffle_fts = ft[idx, :]
    #     lbl_1 = torch.ones(nb_nodes)
    #     lbl_2 = torch.zeros(nb_nodes)
    #     lbl = torch.cat((lbl_1, lbl_2), 0)
    #
    #     if torch.cuda.is_available():
    #         shuffle_fts = shuffle_fts.cuda()
    #         lbl = lbl.cuda()
    #
    #     logits = model(ft, shuffle_fts, adjacent, conf.sparse, None, None, None)
    #     loss = b_loss(logits, lbl)
    #     print('Epoch: {}, Loss:{}'.format(epoch, loss.item()))
    #
    #     if loss < best:
    #         best = loss
    #         cnt_wait = 0
    #         torch.save(model.state_dict(), 'best_dgi.pkl')
    #     else:
    #         cnt_wait += 1
    #
    #     if cnt_wait == conf.patience:
    #         print('Early stopping!')
    #         break
    #
    #     loss.backward()
    #     optimiser.step()

    print('Loading model')
    model.load_state_dict(torch.load('best_dgi.pkl'))
    embeds, _ = model.embed(ft, adjacent, conf.sparse, None)
    train_embeds = embeds[train_idx]
    test_embeds = embeds[test_idx]

    train_pred = torch.argmax(y[train_idx], dim=1)
    test_pred = torch.argmax(y[test_idx], dim=1)

    tot = torch.zeros(1)
    tot = tot.cuda()
    accuracy = []

    for i in range(50):
        log = LogReg(conf.hid_units, nb_classes)
        opt = torch.optim.Adam(log.parameters(), lr=0.01, weight_decay=0.0)
        log.cuda()

        for _ in range(100):
            log.train()
            opt.zero_grad()

            logits = log(train_embeds)
            loss = x_loss(logits, train_pred)

            loss.backward()
            opt.step()

        logits = log(test_embeds)
        pred = torch.argmax(logits, dim=1)
        acc = torch.sum(pred == test_pred).float() / test_pred.shape[0]
        accuracy.append(acc * 100)
        print('Epoch:{}, Accuracy: {}'.format(i, acc.item()))
        tot += acc

    acc = torch.stack(accuracy)
    print('Average accuracy: {}, Mean Accuracy: {}, Std Accuracy: {}'.format((tot / 50).item(), acc.mean().item(),
                                                                             acc.std(dim=0).item()))


if __name__ == '__main__':
    features, adj, labels, idx_train, idx_test, nb_nodes, ft_size, nb_classes = prepare_data(conf.dataset_name,
                                                                                             conf.sparse)

    train(features, adj, labels, idx_train, idx_test)
