import ssl

import numpy as np

# from torchvision import datasets, transforms
# from sklearn import datasets, manifold, utils
import scipy

# from PIL import Image
import sys
import sklearn
import torch
import pickle as pkl
import scipy.sparse as sp
import networkx as nx

ssl._create_default_https_context = ssl._create_unverified_context


def GetData(data_name, device, testBool=False):

    if data_name == "Cora":

        def normalize(mx):
            """Row-normalize sparse matrix"""
            rowsum = np.array(mx.sum(1))
            rowsum = (rowsum == 0) * 1 + rowsum
            r_inv = np.power(rowsum, -1).flatten()
            r_inv[np.isinf(r_inv)] = 0.0
            r_mat_inv = sp.diags(r_inv)
            mx = r_mat_inv.dot(mx)
            return mx

        def parse_index_file(filename):
            """Parse index file."""
            index = []
            for line in open(filename):
                index.append(int(line.strip()))
            return index

        path_data = "./data/graphdata"
        dataset_str = "cora"
        names = ["x", "y", "tx", "ty", "allx", "ally", "graph"]
        objects = []
        for i in range(len(names)):
            with open(
                path_data + "/ind.{}.{}".format(dataset_str.lower(), names[i]), "rb"
            ) as f:
                if sys.version_info > (3, 0):
                    objects.append(pkl.load(f, encoding="latin1"))
                else:
                    objects.append(pkl.load(f))

        x, y, tx, ty, allx, ally, graph = tuple(objects)
        test_idx_reorder = parse_index_file(
            path_data + "/ind.{}.test.index".format(dataset_str)
        )
        test_idx_range = np.sort(test_idx_reorder)

        features = sp.vstack((allx, tx)).tolil()
        features[test_idx_reorder, :] = features[test_idx_range, :]
        labels = np.vstack((ally, ty))
        labels[test_idx_reorder, :] = labels[test_idx_range, :]

        features = normalize(features)
        data_train = torch.FloatTensor(np.array(features.todense())).float()
        labels = torch.LongTensor(labels)
        label_train = torch.max(labels, dim=1)[1]

        # useindex = np.loadtxt("data/stay.txt").astype(np.int)
        # data_train = data_train[useindex]
        # label_train = label_train[useindex]
        # graph_adj = graph_adj[useindex][:, useindex]

    if data_name == "citeseer":

        def normalize(mx):
            """Row-normalize sparse matrix"""
            rowsum = np.array(mx.sum(1))
            rowsum = (rowsum == 0) * 1 + rowsum
            r_inv = np.power(rowsum, -1).flatten()
            r_inv[np.isinf(r_inv)] = 0.0
            r_mat_inv = sp.diags(r_inv)
            mx = r_mat_inv.dot(mx)
            return mx

        def parse_index_file(filename):
            """Parse index file."""
            index = []
            for line in open(filename):
                index.append(int(line.strip()))
            return index

        path_data = "./data/graphdata"
        dataset_str = "citeseer"
        names = ["x", "y", "tx", "ty", "allx", "ally", "graph"]
        objects = []
        for i in range(len(names)):
            with open(
                path_data + "/ind.{}.{}".format(dataset_str.lower(), names[i]), "rb"
            ) as f:
                if sys.version_info > (3, 0):
                    objects.append(pkl.load(f, encoding="latin1"))
                else:
                    objects.append(pkl.load(f))

        x, y, tx, ty, allx, ally, graph = tuple(objects)
        test_idx_reorder = parse_index_file(
            path_data + "/ind.{}.test.index".format(dataset_str)
        )
        test_idx_range = np.sort(test_idx_reorder)

        test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder) + 1)
        tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
        tx_extended[test_idx_range - min(test_idx_range), :] = tx
        tx = tx_extended
        ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
        ty_extended[test_idx_range - min(test_idx_range), :] = ty
        ty = ty_extended

        features = sp.vstack((allx, tx)).tolil()
        features[test_idx_reorder, :] = features[test_idx_range, :]
        labels = np.vstack((ally, ty))
        labels[test_idx_reorder, :] = labels[test_idx_range, :]

        features = normalize(features)
        data_train = torch.FloatTensor(np.array(features.todense())).float()
        labels = torch.LongTensor(labels)
        label_train = torch.max(labels, dim=1)[1]
        # graph_adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph)).toarray()

    if data_name == "pubmed":

        def normalize(mx):
            """Row-normalize sparse matrix"""
            rowsum = np.array(mx.sum(1))
            rowsum = (rowsum == 0) * 1 + rowsum
            r_inv = np.power(rowsum, -1).flatten()
            r_inv[np.isinf(r_inv)] = 0.0
            r_mat_inv = sp.diags(r_inv)
            mx = r_mat_inv.dot(mx)
            return mx

        def parse_index_file(filename):
            """Parse index file."""
            index = []
            for line in open(filename):
                index.append(int(line.strip()))
            return index

        path_data = "./data/graphdata"
        dataset_str = "pubmed"
        names = ["x", "y", "tx", "ty", "allx", "ally", "graph"]
        objects = []
        for i in range(len(names)):
            with open(
                path_data + "/ind.{}.{}".format(dataset_str.lower(), names[i]), "rb"
            ) as f:
                if sys.version_info > (3, 0):
                    objects.append(pkl.load(f, encoding="latin1"))
                else:
                    objects.append(pkl.load(f))

        x, y, tx, ty, allx, ally, graph = tuple(objects)
        test_idx_reorder = parse_index_file(
            path_data + "/ind.{}.test.index".format(dataset_str)
        )
        test_idx_range = np.sort(test_idx_reorder)

        features = sp.vstack((allx, tx)).tolil()
        features[test_idx_reorder, :] = features[test_idx_range, :]
        labels = np.vstack((ally, ty))
        labels[test_idx_reorder, :] = labels[test_idx_range, :]

        features = normalize(features)
        data_train = torch.FloatTensor(np.array(features.todense())).float()
        labels = torch.LongTensor(labels)
        label_train = torch.max(labels, dim=1)[1]
        # graph_adj = nx.adjacency_matrix(
        #     nx.from_dict_of_lists(graph)
        # ).toarray()

        # useindex = np.loadtxt("data/stay.txt").astype(np.int)
        # data_train = data_train[useindex]
        # label_train = label_train[useindex]
        # graph_adj = graph_adj[useindex][:, useindex]

        # useindex = np.loadtxt("data/stay.txt").astype(np.int)
        # data_train = data_train[useindex]
        # label_train = label_train[useindex]
        # graph_adj = graph_adj[useindex][:, useindex]
    # np.save('save/data{}.npy'.format(data_name), data_train)
    # np.save('save/label{}.npy'.format(data_name), label_train)
    
    # import tool
    # p = tool.GIFPloter()
    # p.AddNewFig(data_train, label_train, title_=data_name, DRMethod='UMAP')
    # p.AddNewFig(data_train, label_train, title_=data_name, DRMethod='PCA')
    # print('plot')
    
    DisE = sklearn.metrics.pairwise.euclidean_distances(data_train, data_train)
    G = nx.Graph()
    for i in range(len(graph)):
        G.add_node(i)

    for i in range(len(graph)):
        for j in graph[i]:
            G.add_weighted_edges_from([(i, j, DisE[i, j])])
    s = nx.to_scipy_sparse_matrix(G)
    DisG = scipy.sparse.csgraph.dijkstra(s)


    return (data_train, label_train, graph)
