import glob
import os

import numpy as np
import torch
from sklearn import metrics

from config import Config
from hdgi import HDGI
from layers import LogReg
from load import prepare_data

conf = Config()

if torch.cuda.is_available():
    torch.cuda.set_device(3)


def macro_f1(pred, y):
    y = y.to(torch.device("cpu")).numpy()
    pred = pred.to(torch.device("cpu")).numpy()
    macro = metrics.f1_score(y, pred, average='macro')
    return macro


def test(ft, adj, y, train_idx, val_idx, test_idx):
    ft_size = features.shape[2]
    nb_classes = labels.shape[1]
    model = HDGI(ft_size, conf.hid_units, conf.semantic_hid_units, p, conf.non_linearity)
    x_loss = torch.nn.CrossEntropyLoss()
    if torch.cuda.is_available():
        print('Using CUDA')

        model.cuda()
        ft = ft.cuda()
        adj = adj.cuda()

        y = y.cuda()
        train_idx = train_idx.cuda()
        test_idx = test_idx.cuda()
        val_idx = val_idx.cuda()

    print('Loading model')
    model.load_state_dict(torch.load('best_hdgi.pkl'))

    embeds, _ = model.embed(ft, adj, conf.sparse, None)
    train_embeds = embeds[0, train_idx]
    test_embeds = embeds[0, test_idx]
    val_embeds = embeds[0, val_idx]

    train_pred = torch.argmax(y[0, idx_train], dim=1)
    test_pred = torch.argmax(y[0, idx_test], dim=1)
    val_pred = torch.argmax(y[0, idx_val], dim=1)

    tot = torch.zeros(1)
    tot = tot.cuda()
    accuracy = []
    mac_f1_list = []
    loss_values = []
    best = 10000
    bad_counter = 0
    best_epoch = 0

    for i in range(50):
        log = LogReg(conf.hid_units, nb_classes)
        opt = torch.optim.Adam(log.parameters(), lr=0.01, weight_decay=0.0)
        if torch.cuda.is_available():
            log.cuda()

        for epoch in range(10000):
            log.train()
            opt.zero_grad()

            logits = log(train_embeds)
            loss = x_loss(logits, train_pred)

            logits_val = log(val_embeds)
            loss_val = x_loss(logits_val, val_pred)
            loss_values.append(loss_val)

            loss.backward()
            opt.step()

            torch.save(log.state_dict(), '{}.mlp.pkl'.format(epoch))

            if loss_values[-1] < best:
                best = loss_values[-1]
                best_epoch = epoch
                bad_counter = 0
            else:
                bad_counter += 1

            if bad_counter == conf.patience:
                break

            files = glob.glob('*.mlp.pkl')
            for file in files:
                epoch_nb = int(file.split('.')[0])
                if epoch_nb < best_epoch:
                    os.remove(file)

        files = glob.glob('*.mlp.pkl')
        for file in files:
            epoch_nb = int(file.split('.')[0])
            if epoch_nb > best_epoch:
                os.remove(file)

        print("Optimization Finished!")
        print('Loading {}th epoch'.format(best_epoch))
        log.load_state_dict(torch.load('{}.mlp.pkl'.format(best_epoch)))

        files = glob.glob('*.mlp.pkl')
        for file in files:
            os.remove(file)

        logits = log(test_embeds)
        pred = torch.argmax(logits, dim=1)
        acc = torch.sum(pred == test_pred).float() / test_pred.shape[0]
        accuracy.append(acc)
        mac = torch.Tensor(np.array(macro_f1(pred, test_pred)))
        mac_f1_list.append(mac)

    acc = torch.stack(accuracy)
    mac_f1 = torch.stack(mac_f1_list)
    print('Mean Accuracy: {}, Std Accuracy: {}, Mean Mac F1: {}, Std Mac F1: {}'.format(acc.mean().item(),
                                                                                        acc.std(dim=0).item(),
                                                                                        mac_f1.mean().item(),
                                                                                        mac_f1.std(dim=0).item()))


if __name__ == '__main__':
    features, labels, sp_adj_list, p, idx_train, idx_val, idx_test = prepare_data(conf.dataset_name, conf.sparse)

    test(features, sp_adj_list, labels, idx_train, idx_val, idx_test)
