import pickle as pkl
import random

import numpy as np
import scipy.sparse as sp
import torch


def prepare_data(dataset, sparse=False):
    adj_list, features, labels, idx_train, idx_val, idx_test = load_data(dataset)
    features, _ = preprocess_features(features)

    p = int(len(adj_list))

    sp_adj_list = []
    for adj in adj_list:
        adj = normalize_adj(adj + sp.eye(adj.shape[0]))

        if sparse:
            sp_adj = sparse_mx_to_torch_sparse_tensor(adj)
            sp_adj_list.append(sp_adj)
        else:
            adj = (adj + sp.eye(adj.shape[0])).todense()
            adj = adj[np.newaxis]
            sp_adj_list.append(adj)
    features = torch.FloatTensor(features[np.newaxis])
    sp_adj_list = torch.FloatTensor(np.array(sp_adj_list))
    labels = torch.FloatTensor(labels[np.newaxis])
    idx_train = torch.LongTensor(idx_train)
    idx_val = torch.LongTensor(idx_val)
    idx_test = torch.LongTensor(idx_test)

    return features, labels, sp_adj_list, p, idx_train, idx_val, idx_test


def load_data(dataset):
    if dataset == 'IMDB':
        with open('data/IMDB/3-class/movie_feature_vector_6334.pickle', 'rb') as f:
            features = pkl.load(f)
        features = sp.csr_matrix(features, dtype=np.float32)

        labels = []
        with open('data/IMDB/3-class/index_label.txt', 'r') as f:
            lines = f.readlines()
            for line in lines:
                line = line.split(',')
                labels.append(int(line[1]))
        labels = encode_one_hot(labels)

        adj_list = []
        for name in ['movie_director_movie', 'movie_actor_movie', 'movie_keyword_movie']:
            with open('data/IMDB/3-class/{}_adj.pickle'.format(name), 'rb') as f:
                adj = pkl.load(f)
            adj_list.append(adj)

        original = range(4000)
        idx_train = random.sample(original, 800)
        original = list(set(original) ^ set(idx_train))
        idx_val = random.sample(original, 800)
        idx_test = list(set(original) ^ set(idx_val))

        return adj_list, features, labels, idx_train, idx_val, idx_test


def encode_one_hot(labels):
    classes = set(labels)
    classes_dict = {c: np.identity(len(classes))[i, :] for i, c in enumerate(classes)}
    labels_one_hot = np.array(list(map(classes_dict.get, labels)), dtype=np.int32)
    return labels_one_hot


def preprocess_features(features):
    row_sum = np.array(features.sum(1))
    r_inv = np.power(row_sum, -1).flatten()
    r_inv[np.isinf(r_inv)] = 0.
    r_mat_inv = sp.diags(r_inv)
    features = r_mat_inv.dot(features)
    return features.todense(), sparse_to_tuple(features)


def sparse_to_tuple(sparse_mx, insert_batch=False):
    def to_tuple(mx):
        if not sp.isspmatrix_coo(mx):
            mx = mx.tocoo()
        if insert_batch:
            coords = np.vstack((np.zeros(mx.row.shape[0]), mx.row, mx.col)).transpose()
            values = mx.data
            shape = (1,) + mx.shape
        else:
            coords = np.vstack((mx.row, mx.col)).transpose()
            values = mx.data
            shape = mx.shape
        return coords, values, shape

    if isinstance(sparse_mx, list):
        for i in range(len(sparse_mx)):
            sparse_mx[i] = to_tuple(sparse_mx[i])
    else:
        sparse_mx = to_tuple(sparse_mx)

    return sparse_mx


def normalize_adj(adj):
    adj = sp.coo_matrix(adj)
    row_sum = np.array(adj.sum(1))
    d_inv_sqrt = np.power(row_sum, -0.5).flatten()
    d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
    d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
    return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()


def sparse_mx_to_torch_sparse_tensor(sparse_mx):
    """Convert a scipy sparse matrix to a torch sparse tensor."""
    sparse_mx = sparse_mx.tocoo().astype(np.float32)
    indices = torch.from_numpy(
        np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
    values = torch.from_numpy(sparse_mx.data)
    shape = torch.Size(sparse_mx.shape)
    return torch.sparse.FloatTensor(indices, values, shape)


if __name__ == '__main__':
    a, b, c, d, e, f, g = prepare_data('IMDB')
