import numpy as np
import torch
import scipy.sparse as sp
import load_data


def onehot_encode(labes):
    classes = set(labes)
    classes_dict = {c: np.identity(len(classes))[i, :] for i, c in enumerate(classes)}
    labes_onehot = np.array(list(map(classes_dict.get, labes)), dtype=np.int32)
    return labes_onehot


def normalize(mx):
    """Row-normalize sparse matrix"""
    rowsum = np.array(mx.sum(1))
    r_inv = np.power(rowsum, -1).flatten()
    r_inv[np.isinf(r_inv)] = 0.
    r_mat_inv = sp.diags(r_inv)
    mx = r_mat_inv.dot(mx)
    return mx


def load_data():
    print("load dataset ..... ")
    train_feat, train_label, train_wei, test_feat, test_label, test_wei = load_data.load()
    train_feat = torch.FloatTensor(train_feat)
    train_label = onehot_encode(train_label)
    train_label = torch.LongTensor(np.where(train_label)[1])
    train_wei = torch.FloatTensor(train_wei)

    test_feat = torch.FloatTensor(test_feat)
    test_label = onehot_encode(test_label)
    test_label = torch.LongTensor(np.where(test_label)[1])
    test_wei = torch.FloatTensor(test_wei)

    return train_feat, train_label, train_wei, train_sim, test_feat, test_label, test_wei, test_sim


def accuracy(output, labels):
    preds = output.max(1)[1].type_as(labels)
    correct = preds.eq(labels).double()
    correct = correct.sum()
    return correct / len(labels)


def stastic_indicators(output, labels):
    TP = ((output.max(1)[1] == 1) & (labels == 1)).sum()
    TN = ((output.max(1)[1] == 0) & (labels == 0)).sum()
    FN = ((output.max(1)[1] == 0) & (labels == 1)).sum()
    FP = ((output.max(1)[1] == 1) & (labels == 0)).sum()
    return TP, TN, FN, FP


if __name__ == '__main__':
    train_feat, train_label, train_wei, train_sim, test_feat, test_label, test_wei, test_sim = load_data()
