import ssl

import numpy as np
import scipy.io as sio

# from torchvision import datasets, transforms
# from sklearn import datasets, manifold, utils
import scipy

# from PIL import Image
import sys
import sklearn
import torch
import pickle as pkl
import scipy.sparse as sp
import networkx as nx
import random

from torch import IntStorage
import joblib
from torch._C import device

ssl._create_default_https_context = ssl._create_unverified_context
def sys_normalized_adjacency_numpy(adj):
   adj = sp.coo_matrix(adj)
   adj = adj + sp.eye(adj.shape[0])
   row_sum = np.array(adj.sum(1))
   row_sum=(row_sum==0)*1+row_sum
   d_inv_sqrt = np.power(row_sum, -0.5).flatten()
   d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
   d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
   return d_mat_inv_sqrt.dot(adj).dot(d_mat_inv_sqrt).tocoo()

def sys_normalized_adjacency(adj,tdevice):
#    adj = sp.coo_matrix(adj)
   adj = torch.add(torch.eye(adj.shape[0], device=tdevice), adj) #adj + torch.eye(adj.shape[0])
   row_sum = torch.sum(adj, dim=1)
   row_sum=(row_sum==0)*1+row_sum
   d_inv_sqrt = torch.pow(row_sum, -0.5).flatten()
   d_inv_sqrt[torch.isinf(d_inv_sqrt)] = 0.
   d_mat_inv_sqrt = torch.diag(d_inv_sqrt)
   return d_mat_inv_sqrt.mm(adj).mm(d_mat_inv_sqrt)

def sparse_mx_to_torch_sparse_tensor(sparse_mx, data_name):
    """Convert a scipy sparse matrix to a torch sparse tensor."""
    sparse_mx = sparse_mx.tocoo().astype(np.float32)
    indices = torch.from_numpy(
        np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
    if data_name != 'wiki':
        values = torch.from_numpy(sparse_mx.data)
    else:
        values = torch.ones_like(torch.from_numpy(sparse_mx.data))
    shape = torch.Size(sparse_mx.shape)
    return torch.sparse.FloatTensor(indices, values, shape)

def GetData(data_name, device, testBool=False, n_data=None):

    try:
        
        name = str(data_name)
        data_train, label_train, DisE,DisG, adj = joblib.load(
            "save/datadist{}.pkl".format(name))
        adj = sparse_mx_to_torch_sparse_tensor(adj, data_name)
        print('-----> load data_train, label_train, DisE, DisG, adj from ./save')

    except:

        if data_name == "Cora":

            def normalize(mx):
                """Row-normalize sparse matrix"""
                rowsum = np.array(mx.sum(1))
                rowsum = (rowsum == 0) * 1 + rowsum
                r_inv = np.power(rowsum, -1).flatten()
                r_inv[np.isinf(r_inv)] = 0.0
                r_mat_inv = sp.diags(r_inv)
                mx = r_mat_inv.dot(mx)
                return mx

            def parse_index_file(filename):
                """Parse index file."""
                index = []
                for line in open(filename):
                    index.append(int(line.strip()))
                return index

            path_data = "./data/graphdata"
            dataset_str = "cora"
            names = ["x", "y", "tx", "ty", "allx", "ally", "graph"]
            objects = []
            for i in range(len(names)):
                with open(
                    path_data + "/ind.{}.{}".format(dataset_str.lower(), names[i]), "rb"
                ) as f:
                    if sys.version_info > (3, 0):
                        objects.append(pkl.load(f, encoding="latin1"))
                    else:
                        objects.append(pkl.load(f))

            x, y, tx, ty, allx, ally, graph = tuple(objects)
            test_idx_reorder = parse_index_file(
                path_data + "/ind.{}.test.index".format(dataset_str)
            )
            test_idx_range = np.sort(test_idx_reorder)

            features = sp.vstack((allx, tx)).tolil()
            features[test_idx_reorder, :] = features[test_idx_range, :]
            labels = np.vstack((ally, ty))
            labels[test_idx_reorder, :] = labels[test_idx_range, :]

            features = normalize(features)
            data_train = torch.FloatTensor(np.array(features.todense())).float()
            labels = torch.LongTensor(labels)
            label_train = torch.max(labels, dim=1)[1]
            
            adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
            adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
            adj = sp.coo_matrix(adj).tocoo()
            # adj = sys_normalized_adjacency(adj)
            # adj = sparse_mx_to_torch_sparse_tensor(adj)
            # useindex = np.loadtxt("data/stay.txt").astype(np.int)
            # data_train = data_train[useindex]
            # label_train = label_train[useindex]
            # graph_adj = graph_adj[useindex][:, useindex]

        if data_name == "citeseer":

            def normalize(mx):
                """Row-normalize sparse matrix"""
                rowsum = np.array(mx.sum(1))
                rowsum = (rowsum == 0) * 1 + rowsum
                r_inv = np.power(rowsum, -1).flatten()
                r_inv[np.isinf(r_inv)] = 0.0
                r_mat_inv = sp.diags(r_inv)
                mx = r_mat_inv.dot(mx)
                return mx

            def parse_index_file(filename):
                """Parse index file."""
                index = []
                for line in open(filename):
                    index.append(int(line.strip()))
                return index

            path_data = "./data/graphdata"
            dataset_str = "citeseer"
            names = ["x", "y", "tx", "ty", "allx", "ally", "graph"]
            objects = []
            for i in range(len(names)):
                with open(
                    path_data + "/ind.{}.{}".format(dataset_str.lower(), names[i]), "rb"
                ) as f:
                    if sys.version_info > (3, 0):
                        objects.append(pkl.load(f, encoding="latin1"))
                    else:
                        objects.append(pkl.load(f))

            x, y, tx, ty, allx, ally, graph = tuple(objects)
            test_idx_reorder = parse_index_file(
                path_data + "/ind.{}.test.index".format(dataset_str)
            )
            test_idx_range = np.sort(test_idx_reorder)

            test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder) + 1)
            tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
            tx_extended[test_idx_range - min(test_idx_range), :] = tx
            tx = tx_extended
            ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
            ty_extended[test_idx_range - min(test_idx_range), :] = ty
            ty = ty_extended

            features = sp.vstack((allx, tx)).tolil()
            features[test_idx_reorder, :] = features[test_idx_range, :]
            labels = np.vstack((ally, ty))
            labels[test_idx_reorder, :] = labels[test_idx_range, :]

            features = normalize(features)
            data_train = torch.FloatTensor(np.array(features.todense())).float()
            labels = torch.LongTensor(labels)
            label_train = torch.max(labels, dim=1)[1]

            adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
            adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
            adj = sp.coo_matrix(adj).tocoo()
            # graph_adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph)).toarray()

        if data_name == "pubmed":

            def normalize(mx):
                """Row-normalize sparse matrix"""
                rowsum = np.array(mx.sum(1))
                rowsum = (rowsum == 0) * 1 + rowsum
                r_inv = np.power(rowsum, -1).flatten()
                r_inv[np.isinf(r_inv)] = 0.0
                r_mat_inv = sp.diags(r_inv)
                mx = r_mat_inv.dot(mx)
                return mx

            def parse_index_file(filename):
                """Parse index file."""
                index = []
                for line in open(filename):
                    index.append(int(line.strip()))
                return index

            path_data = "./data/graphdata"
            dataset_str = "pubmed"
            names = ["x", "y", "tx", "ty", "allx", "ally", "graph"]
            objects = []
            for i in range(len(names)):
                with open(
                    path_data + "/ind.{}.{}".format(dataset_str.lower(), names[i]), "rb"
                ) as f:
                    if sys.version_info > (3, 0):
                        objects.append(pkl.load(f, encoding="latin1"))
                    else:
                        objects.append(pkl.load(f))

            x, y, tx, ty, allx, ally, graph = tuple(objects)
            test_idx_reorder = parse_index_file(
                path_data + "/ind.{}.test.index".format(dataset_str)
            )
            test_idx_range = np.sort(test_idx_reorder)

            features = sp.vstack((allx, tx)).tolil()
            features[test_idx_reorder, :] = features[test_idx_range, :]
            labels = np.vstack((ally, ty))
            labels[test_idx_reorder, :] = labels[test_idx_range, :]

            features = normalize(features)
            data_train = torch.tensor(features.todense())
            labels = torch.LongTensor(labels)
            label_train = torch.max(labels, dim=1)[1]

            adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
            adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
            adj = sp.coo_matrix(adj).tocoo()
        
        if data_name == "wiki":
            
            def normalize(mx):
                """Row-normalize sparse matrix"""
                rowsum = np.array(mx.sum(1))
                rowsum = (rowsum == 0) * 1 + rowsum
                r_inv = np.power(rowsum, -1).flatten()
                r_inv[np.isinf(r_inv)] = 0.0
                r_mat_inv = sp.diags(r_inv)
                mx = r_mat_inv.dot(mx)
                return mx

            def parse_index_file(filename):
                """Parse index file."""
                index = []
                for line in open(filename):
                    index.append(int(line.strip()))
                return index

            data = sio.loadmat('data/graphdata/wiki.mat')
            feature = data['fea']
            if sp.issparse(feature):
                feature = feature.todense()
            adj = data['W']
            gnd = data['gnd']
            gnd = gnd.T
            gnd = gnd - 1
            gnd = gnd[0, :]
            k = len(np.unique(gnd))
            adjden = np.array(adj.A)
            graph = {}
            for i in range(adjden.shape[0]):
                # print(i)
                list_i = []
                for j in range(adjden.shape[0]):
                    if adjden[i,j] > 0.1:
                        list_i.append(j)
                graph[i] = list_i



            features = np.asarray(feature)
            data_train = torch.tensor(features)
            label_train = torch.LongTensor(gnd)
            # label_train = torch.max(labels, dim=1)[1]



        DisE = sklearn.metrics.pairwise.cosine_distances(data_train, data_train)
        G = nx.Graph()
        for i in range(len(graph)):
            G.add_node(i)

        for i in range(len(graph)):
            for j in graph[i]:
                G.add_weighted_edges_from([(i, j, DisE[i, j])])
        s = nx.to_scipy_sparse_matrix(G)
        DisG = scipy.sparse.csgraph.dijkstra(s)
        print('finish load data')

        name = str(data_name)
        joblib.dump(
            [data_train, label_train, DisE,DisG, adj],
            "save/datadist{}.pkl".format(name))
        adj = sparse_mx_to_torch_sparse_tensor(adj, data_name)
        print('-----> save data_train, label_train, DisE, DisG, adj to ./save')
        
        # torch.save(adj, 'save/{}saveadj.pt'.format(data_name))

    return (data_train, label_train, DisE, DisG, adj)


def EdgeSampler_numpy(adj, droprate=0.01,):
    
    percent = 1-droprate
    nnz = adj.nnz
    
    perm = np.random.permutation(nnz)
    preserve_nnz = int(nnz*percent)
    perm = perm[:preserve_nnz]
    r_adj = sp.coo_matrix((adj.data[perm],
                            (adj.row[perm],
                            adj.col[perm])),
                            shape=adj.shape)
    r_adj = sys_normalized_adjacency(r_adj)
    r_adj = sparse_mx_to_torch_sparse_tensor(r_adj, data_name)
    return r_adj

def EdgeSampler(adj, droprate=0.01,tdevice='cpu'):
    
    percent = 1-droprate
    nnz = adj._nnz()
    
    perm = torch.randperm(nnz)
    preserve_nnz = int(nnz*percent)
    perm = perm[:preserve_nnz]

    r_adj = torch.sparse.FloatTensor(
        adj._indices()[:, perm],
        adj._values()[perm],
        adj.shape
    )

    r_adj = sys_normalized_adjacency(r_adj, tdevice)
    # r_adj = sparse_mx_to_torch_sparse_tensor(r_adj)
    return r_adj