import numpy as np
import pandas as pd
import torch


def gnn_load_data(path='./dataset/cora/', name='cora'):
    print('loading {} from {}'.format(name, path))
    data_content = pd.read_csv('{}{}.content'.format(path, name), sep='\t', header=None)
    data_cites = pd.read_csv('{}{}.cites'.format(path, name), sep='\t', header=None)

    ids = data_content.iloc[:, 0]
    ids = np.array(ids, dtype=str)
    ids_to_idxs = dict(zip(list(ids), list(data_content.index)))

    features = data_content.iloc[:, 1:-1]
    features = np.array(features, dtype=float)

    labels = data_content.iloc[:, -1]
    labels = pd.get_dummies(labels)
    labels = torch.LongTensor(np.where(labels)[1])

    adj = np.zeros((data_content.shape[0], data_content.shape[0]), dtype=float)
    data_cites = np.array(data_cites, dtype=str)
    for p in data_cites:
        if p[0] in ids_to_idxs.keys() and p[1] in ids_to_idxs.keys():
            i0 = ids_to_idxs[p[0]]
            i1 = ids_to_idxs[p[1]]
        else:
            continue
        adj[i0][i1] = 1.
        adj[i1][i0] = 1.

    return features, adj, labels


def gcn_preprocessing(features, adj, sumnormalize=True):
    if sumnormalize is True:
        gcn_features = normalize(features)
    else :
        gcn_features = features
    gcn_features = torch.FloatTensor(gcn_features)

    gcn_adj = adj + np.identity(adj.shape[0], dtype=float)
    rowsum = np.array(gcn_adj.sum(axis=1))
    rowrec = np.power(rowsum, -0.5).flatten()
    rowrec[np.isinf(rowrec)] = 0.
    rowrec_diag = np.diag(rowrec)
    gcn_adj = np.matmul(np.matmul(rowrec_diag, gcn_adj), rowrec_diag)
    gcn_adj = torch.FloatTensor(gcn_adj)
    return gcn_features, gcn_adj


def gin_preprocessing(features, adj, sumnormalize=True):
    if sumnormalize is True:
        gin_features = normalize(features)
    else :
        gin_features = features
    gin_features = torch.FloatTensor(gin_features)

    gin_adj = torch.FloatTensor(adj)
    return gin_features, gin_adj


def gat_preprocessing(features, adj, sumnormalize=True):
    if sumnormalize is True:
        gat_features = normalize(features)
    else :
        gat_features = features
    gat_features = torch.FloatTensor(gat_features)

    gat_adj = torch.FloatTensor(adj)

    return gat_features, gat_adj

def normalize(mat):
    rowsum = np.array(mat.sum(axis=1))
    rowrec = np.power(rowsum, -1).flatten()
    rowrec[np.isinf(rowrec)] = 0.
    rowrec_diag = np.diag(rowrec)
    return rowrec_diag.dot(mat)


def get_idx(total, train, val, test):
    train_size = int(total * (train / (train + val + test)))
    val_size = int(total * (val / (train + val + test)))
    test_size = int(total * (test / (train + val + test)))
    
    idx_train = range(train_size)
    idx_val = range(train_size, train_size + val_size)
    idx_test = range(train_size + val_size, train_size + val_size + test_size)   
    
    idx_train = torch.LongTensor(idx_train)
    idx_val = torch.LongTensor(idx_val)
    idx_test = torch.LongTensor(idx_test)
    return idx_train, idx_val, idx_test


def accuracy(output, labels):
    preds = output.max(1)[1].type_as(labels)
    correct = preds.eq(labels).double()
    correct = correct.sum()
    return correct / len(labels)
