import numpy as np
import scipy.sparse as sp
import torch
from sklearn.metrics import roc_auc_score, average_precision_score

from config import Config
from gic import GIC
from load import load_data, mask_test_edges, preprocess_features, normalize_adj, sparse_mx_to_torch_sparse_tensor

conf = Config()


def get_roc_score(edges_pos, edges_neg, embeddings, adjacent_sparse):
    """
    copy from https://github.com/tkipf/gae"
    """
    score_matrix = np.dot(embeddings, embeddings.T)

    def sigmoid(x):
        return 1 / (1 + np.exp(-x))

    # Store positive edge predictions, actual values
    pred_pos = []
    pos = []
    for edge in edges_pos:
        pred_pos.append(sigmoid(score_matrix[edge[0], edge[1]]))  # predicted score
        pos.append(adjacent_sparse[edge[0], edge[1]])  # actual value (1 for positive)

    # Store negative edge predictions, actual values
    pred_neg = []
    neg = []
    for edge in edges_neg:
        pred_neg.append(sigmoid(score_matrix[edge[0], edge[1]]))  # predicted score
        neg.append(adjacent_sparse[edge[0], edge[1]])  # actual value (0 for negative)

    pred_all = np.hstack([pred_pos, pred_neg])
    labels_all = np.hstack([np.ones(len(pred_pos)), np.zeros(len(pred_neg))])

    roc_score = roc_auc_score(labels_all, pred_all)
    ap_score = average_precision_score(labels_all, pred_all)
    return roc_score, ap_score


def train(ft, adjacent):
    b_xent = torch.nn.BCEWithLogitsLoss()
    model = GIC(nb_nodes, ft_size, conf.hid_units, conf.non_linearity, conf.num_clusters, conf.beta)
    optimiser = torch.optim.Adam(model.parameters(), lr=conf.lr, weight_decay=conf.l2_coef)
    if torch.cuda.is_available():
        print('Using CUDA')
        model.cuda()

    cnt_wait = 0
    best = 1e9

    for epoch in range(conf.nb_epochs):
        model.train()
        optimiser.zero_grad()

        idx = np.random.permutation(nb_nodes)
        shuffle_fts = ft[:, idx, :]

        lbl_1 = torch.ones(conf.batch_size, nb_nodes)
        lbl_2 = torch.zeros(conf.batch_size, nb_nodes)
        lbl = torch.cat((lbl_1, lbl_2), 1)

        if torch.cuda.is_available():
            shuffle_fts = shuffle_fts.cuda()
            lbl = lbl.cuda()

        logits1, logits2 = model(ft, shuffle_fts, adjacent, conf.sparse, None, None, None, conf.beta)
        loss = conf.alpha * b_xent(logits1, lbl) + (1 - conf.alpha) * b_xent(logits2, lbl)
        print('Epoch: {}, Loss:{}'.format(epoch, loss.item()))

        if loss < best:
            best = loss
            cnt_wait = 0
            torch.save(model.state_dict(), conf.dataset_name + '-link.pkl')
        else:
            cnt_wait += 1

        if cnt_wait == conf.patience:
            print('Early stopping!')
            break

        loss.backward()
        optimiser.step()

    model.load_state_dict(torch.load(conf.dataset_name + '-link.pkl'))
    embeds, _, _, s = model.embed(features, adjacent, conf.sparse, None)
    embeddings = embeds[0, :]
    embeddings = embeddings / embeddings.norm(dim=1)[:, None]

    sc_roc, sc_ap = get_roc_score(test_edges, test_edges_false, embeddings.cpu().detach().numpy(), adj_sparse)
    print('Dataset', conf.dataset_name)
    print('AUC', sc_roc, 'AP', sc_ap)


if __name__ == '__main__':
    adj, features, labels, idx_train, idx_test = load_data(conf.dataset_name)
    adj_sparse = adj

    adj_train, train_edges, train_edges_false, val_edges, val_edges_false, test_edges, test_edges_false = \
        mask_test_edges(adj, test_frac=0.1, val_frac=0.05)
    adj = adj_train
    features, _ = preprocess_features(features)
    nb_nodes = features.shape[0]
    ft_size = features.shape[1]
    nb_classes = labels.shape[1]

    adj = normalize_adj(adj + sp.eye(adj.shape[0]))

    features = torch.FloatTensor(features[np.newaxis])
    labels = torch.FloatTensor(labels[np.newaxis])
    if torch.cuda.is_available():
        features = features.cuda()
        labels = labels.cuda()

    if conf.sparse:
        sp_adj = sparse_mx_to_torch_sparse_tensor(adj)
        if torch.cuda.is_available():
            sp_adj = sp_adj.cuda()
        train(features, sp_adj)
    else:
        adj = (adj + sp.eye(adj.shape[0])).todense()
        adj = torch.FloatTensor(adj[np.newaxis])
        if torch.cuda.is_available():
            adj = adj.cuda()
        train(features, adj)
