import ssl

import numpy as np
import torch
from PIL import Image

# from torchvision import datasets, transforms
from sklearn import datasets, manifold, utils

# from sklearn.samples_generator import make_swiss_roll
# from sklearn.datasets import fetch_openml, make_s_curve, make_swiss_roll
import os
from torch.utils.data import dataloader

ssl._create_default_https_context = ssl._create_unverified_context
import torchvision.datasets as datasets
import torchvision.transforms as transforms
# import FlowCal
import sklearn

def GetData(args, device, testBool=False):

    if args["data_name"] == "mnist":
        # if not testBool:
        dataloader = datasets.MNIST(
            root="./data", train=not testBool, download=True, transform=None
        )
        # print(mnist_trainset.data.shape)
        X = dataloader.data.reshape(-1, 28 * 28).float() / 255
        y = dataloader.targets

        n1 = args["data_trai_n"]
        n2 = args["data_trai_n"] + 1
        data_train, data_test = X[:n1, :], X[n1:n2, :]
        label_train, label_test = y[:n1], y[n1:n2]

    if args["data_name"] == "Fmnist":
        dataloader = datasets.FashionMNIST(
            root="./data", train=not testBool, download=True, transform=None
        )
        # print(mnist_trainset.data.shape)
        X = dataloader.data.reshape(-1, 28 * 28).float() / 255
        y = dataloader.targets

        n1 = args["data_trai_n"]
        n2 = args["data_trai_n"] + 1
        data_train, data_test = X[:n1, :], X[n1:n2, :]
        label_train, label_test = y[:n1], y[n1:n2]

    if args["data_name"] == "Spheres5500":
        data_train, label_train = create_sphere_dataset5500(seed=0)

        if not testBool:
            data_train, label_train = data_train[:5500], label_train[:5500]
        else:
            data_train, label_train = data_train[-5500:], label_train[-5500:]

    if args["data_name"] == "Spheres10000":
        data_train, label_train = create_sphere_dataset10000(seed=0)
        if not testBool:
            data_train, label_train = data_train[:10000], label_train[:10000]
        else:
            data_train, label_train = data_train[10000:], label_train[10000:]

    if args["data_name"] == "cifa10":
        
        # if testBool is not True:
        #     transform = transforms.Compose([
        #         transforms.RandomCrop(32, padding=4),
        #         transforms.RandomHorizontalFlip(),
        #         transforms.ToTensor(),
        #         transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
        #     ])
        # else:
        #     transform = transforms.Compose([
        #         transforms.ToTensor(),
        #         transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
        #     ]) 
        dataloader = datasets.CIFAR10(
            root="./data", train=not testBool, download=True, transform=None
        )
        # dataloader.
        # cifa_testset = datasets.CIFAR10(root='./data', train=False, download=True, transform=None)
        # print(mnist_trainset.data.shape)
        # input(cifa_trainset.data.shape)
        X = torch.tensor(dataloader.data).float().permute(0,3,1,2) / 255
        y = torch.tensor(dataloader.targets).long()

        n1 = args["data_trai_n"]
        n2 = args["data_trai_n"] + 1
        data_train, data_test = X[:n1, :], X[n1:n2, :]
        label_train, label_test = y[:n1], y[n1:n2]

    elif args["data_name"] == "swishroll":

        args["data_trai_n"] = 800
        data = make_swiss_roll(
            n_samples=args["data_trai_n"] * 2, noise=0.0, random_state=1
        )
        X = data[0]
        y = data[1]

        X[:, 0] = X[:, 0] - np.mean(X[:, 0])
        X[:, 1] = X[:, 1] - np.mean(X[:, 1])
        X[:, 2] = X[:, 2] - np.mean(X[:, 2])

        scale = 15 / max(
            np.max(X[:, 0]) - np.min(X[:, 0]),
            np.max(X[:, 1]) - np.min(X[:, 1]),
            np.max(X[:, 2]) - np.min(X[:, 2]),
        )
        X = X * scale
        nP = X.shape[0] // 2
        if testBool:
            data_train = X[nP:]
            label_train = y[nP:]
        else:
            data_train = X[:nP]
            label_train = y[:nP]

    elif args["data_name"] == "SCurve":

        args["data_trai_n"] = 800
        data = make_s_curve(
            n_samples=args["data_trai_n"] * 2, noise=0.0, random_state=1
        )
        X = data[0]
        y = data[1]

        X[:, 0] = X[:, 0] - np.mean(X[:, 0])
        X[:, 1] = X[:, 1] - np.mean(X[:, 1])
        X[:, 2] = X[:, 2] - np.mean(X[:, 2])

        scale = 15 / max(
            np.max(X[:, 0]) - np.min(X[:, 0]),
            np.max(X[:, 1]) - np.min(X[:, 1]),
            np.max(X[:, 2]) - np.min(X[:, 2]),
        )
        X = X * scale
        nP = X.shape[0] // 2
        if testBool:
            data_train = X[nP:]
            label_train = y[nP:]
        else:
            data_train = X[:nP]
            label_train = y[:nP]

    elif args["data_name"] == "severedsphere":

        args["data_trai_n"] = 1200

        random_state = utils.check_random_state(0)
        p = random_state.rand(2 * args["data_trai_n"]) * (2 * np.pi - 0.55)
        t = random_state.rand(2 * args["data_trai_n"]) * np.pi

        indices = (t < (np.pi - (np.pi / 8))) & (t > ((np.pi / 8)))
        colors = p[indices]
        x_x, x_y, x_z = (
            np.sin(t[indices]) * np.cos(p[indices]),
            np.sin(t[indices]) * np.sin(p[indices]),
            np.cos(t[indices]),
        )

        X = np.concatenate(
            (x_x.reshape(-1, 1), x_y.reshape(-1, 1), x_z.reshape(-1, 1)), axis=1
        )
        # print(X.shape)
        y = colors
        nP = X.shape[0] // 2
        if testBool:
            data_train = X[nP:]
            label_train = y[nP:]
        else:
            data_train = X[:nP]
            label_train = y[:nP]

    elif args["data_name"] == "coil20":
        path = "./data/coil-20-proc"
        fig_path = os.listdir(path)
        fig_path.sort()

        label = []
        data = np.zeros((1440, 128 * 128))
        for i in range(1440):
            I = Image.open(path + "/" + fig_path[i])
            I_array = np.array(I).reshape(1, -1)
            data[i] = I_array
            label.append(int(fig_path[i].split("__")[0].split("obj")[1]))

        data = data / 255
        label = np.array(label)
        data_train, data_test = data, data
        label_train, label_test = label, label

    elif args["data_name"] == "coil100grey":
        path = "./data/coil-100"
        fig_path = os.listdir(path)

        label = []
        data = np.zeros((100 * 72, 128 * 128))
        for i, path_i in enumerate(fig_path):
            # print(fig_path[i])
            if "obj" in path_i:
                I = Image.open(path + "/" + path_i)
                # print(np.array(I).shape)
                I_array = np.array(I).mean(axis=2).reshape(1, -1)
                data[i] = I_array
                label.append(int(fig_path[i].split("__")[0].split("obj")[1]))

        data = data / 255
        label = np.array(label)
        data_train, data_test = data, data
        label_train, label_test = label, label

    elif args["data_name"] == "coil100rgb":
        path = "./data/coil-100"
        fig_path = os.listdir(path)

        label = []
        data = np.zeros((100 * 72, 128 * 128 * 3))
        for i, path_i in enumerate(fig_path):
            # print(fig_path[i])
            if "obj" in path_i:
                I = Image.open(path + "/" + path_i)
                # print(np.array(I).shape)
                I_array = np.array(I).reshape(1, -1)
                data[i] = I_array
                label.append(int(fig_path[i].split("__")[0].split("obj")[1]))

        data = data / 255
        label = np.array(label)
        data_train, data_test = data, data
        label_train, label_test = label, label

    elif(args['data_name'] == 'flow_cytometry'):
            file_name = []
            path = './data/Flow_Cytometry/pbmc_luca.fcs'
            s = FlowCal.io.FCSData(path)
            s = np.array(s)
            scaler = sklearn.preprocessing.StandardScaler()
            scaler.fit(s)
            data_transformed = scaler.transform(s)
            y = [0] * data_transformed.shape[0]
            y = np.array(y)
            # Subsample
            subsample_index = np.random.choice(
                range(data_transformed.shape[0]),
                int(data_transformed.shape[0] * 0.5),
                replace=False,
                
                )
            data_transformed = data_transformed[subsample_index]
            y = y[subsample_index]

            data_train, data_test = data_transformed[:args["data_trai_n"]], data_transformed
            label_train, label_test = y[:args["data_trai_n"]], y
            print(data_train.shape)
    
    elif args["data_name"] == "digits":
        digitsr = datasets.load_digits(n_class=10)
        data = digitsr.data / 255 * 2 - 1
        label = digitsr.target
        n1 = args["data_trai_n"]
        n2 = args["data_trai_n"] + args["data_test_n"]
        data_train, data_test = data[:n1, :], data[n1:n2, :]
        label_train, label_test = label[:n1], label[n1:n2]

    elif args["data_name"] == "Samusik01":
        # digitsr = datasets.load_digits(n_class=6)
        data = np.loadtxt("./data/cell/Samusik01.csv", delimiter=",")
        label = np.loadtxt("./data/cell/Samusik01label.csv", delimiter=",")
        n1 = args["data_trai_n"]
        n2 = args["data_trai_n"] + args["data_test_n"]
        data_train, data_test = data[:n1, :], data[n1:n2, :]
        label_train, label_test = label[:n1], label[n1:n2]

    elif args["data_name"] == "cora":
        import pickle as pkl
        import sys
        import scipy.sparse as sp
        import networkx as nx

        def normalize(mx):
            """Row-normalize sparse matrix"""
            rowsum = np.array(mx.sum(1))
            rowsum = (rowsum == 0) * 1 + rowsum
            r_inv = np.power(rowsum, -1).flatten()
            r_inv[np.isinf(r_inv)] = 0.0
            r_mat_inv = sp.diags(r_inv)
            mx = r_mat_inv.dot(mx)
            return mx

        def parse_index_file(filename):
            """Parse index file."""
            index = []
            for line in open(filename):
                index.append(int(line.strip()))
            return index

        path_data = "./data/graphdata"
        dataset_str = "cora"
        names = ["x", "y", "tx", "ty", "allx", "ally", "graph"]
        objects = []
        for i in range(len(names)):
            with open(
                path_data + "/ind.{}.{}".format(dataset_str.lower(), names[i]), "rb"
            ) as f:
                if sys.version_info > (3, 0):
                    objects.append(pkl.load(f, encoding="latin1"))
                else:
                    objects.append(pkl.load(f))

        x, y, tx, ty, allx, ally, graph = tuple(objects)
        test_idx_reorder = parse_index_file(
            path_data + "/ind.{}.test.index".format(dataset_str)
        )
        test_idx_range = np.sort(test_idx_reorder)

        features = sp.vstack((allx, tx)).tolil()
        features[test_idx_reorder, :] = features[test_idx_range, :]
        labels = np.vstack((ally, ty))
        labels[test_idx_reorder, :] = labels[test_idx_range, :]

        features = normalize(features)
        data_train = torch.FloatTensor(np.array(features.todense())).float()
        labels = torch.LongTensor(labels)
        label_train = torch.max(labels, dim=1)[1]

    data_train = torch.tensor(data_train)
    # data_test = torch.tensor(data_test)
    label_train = torch.tensor(label_train)
    # label_test = torch.tensor(label_test)
    return data_train.float(), label_train


def GetDataTwo(args, device, a, b, batch_size=128, pca=None):

    if args["data_name"] == "mnist":
        X, y = fetch_openml(
            "mnist_784", data_home="~/scikit_learn_data", version=1, return_X_y=True
        )
        X = X / 255
        y = y.astype(np.int32)

        index = (y == a) + (y == b) > 0
        X = X[index]
        y = y[index]

        n1 = args["data_trai_n"]
        n2 = 60000
        data_train, data_test = X[:n1, :], X[n1:n2, :]
        label_train, label_test = y[:n1], y[n1:n2]

        if pca is not None:
            from sklearn.decomposition import PCA

            clf = PCA(n_components=64)
            clf.fit(data_train)
            data_train = clf.transform(data_train)
            data_test = clf.transform(data_test)

    if args["data_name"] == "digits":
        digitsr = datasets.load_digits(n_class=6)
        data = digitsr.data / 255 * 2 - 1
        label = digitsr.target
        n1 = args["data_trai_n"]
        n2 = args["data_trai_n"] + args["data_test_n"]
        data_train, data_test = data[:n1, :], data[n1:n2, :]
        label_train, label_test = label[:n1], label[n1:n2]

    data_train = torch.tensor(data_train)
    data_test = torch.tensor(data_test)
    label_train = torch.tensor(label_train)
    label_test = torch.tensor(label_test)
    return data_train, data_test, label_train, label_test


def create_sphere_dataset5500(
    n_samples=1500, d=100, bigR=25, n_spheres=11, r=5, seed=42
):
    np.random.seed(42)

    # it seemed that rescaling the shift variance by sqrt of d lets big sphere stay around the inner spheres
    variance = 10 / np.sqrt(d)
    shift_matrix = np.random.normal(0, variance, [n_spheres, d + 1])

    spheres = []
    n_datapoints = 0
    for i in np.arange(n_spheres - 1):
        sphere = dsphere(n=n_samples, d=d, r=r)
        spheres.append(sphere + shift_matrix[i, :])
        n_datapoints += n_samples

    # additional big surrounding sphere
    n_samples_big = 1 * n_samples
    big = dsphere(n=n_samples_big, d=d, r=bigR)
    spheres.append(big)
    n_datapoints += n_samples_big

    # create Dataset
    dataset = np.concatenate(spheres, axis=0)

    labels = np.zeros(n_datapoints)
    label_index = 0
    for index, data in enumerate(spheres):
        n_sphere_samples = data.shape[0]
        labels[label_index : label_index + n_sphere_samples] = index
        label_index += n_sphere_samples

    arr = np.arange(dataset.shape[0])
    np.random.shuffle(arr)
    dataset = dataset[arr]
    labels = labels[arr]

    return dataset / 22 + 0.5, labels


def create_sphere_dataset10000(n_samples=1000, d=100, n_spheres=11, r=5, seed=42):
    np.random.seed(seed)

    # it seemed that rescaling the shift variance by sqrt of d lets big sphere stay around the inner spheres
    variance = 10 / np.sqrt(d)
    shift_matrix = np.random.normal(0, variance, [n_spheres, d + 1])

    spheres = []
    n_datapoints = 0
    for i in np.arange(n_spheres - 1):
        sphere = dsphere(n=n_samples, d=d, r=r)
        spheres.append(sphere + shift_matrix[i, :])
        n_datapoints += n_samples

    # additional big surrounding sphere:
    n_samples_big = 10 * n_samples  # int(n_samples/2)
    big = dsphere(n=n_samples_big, d=d, r=r * 5)
    spheres.append(big)
    n_datapoints += n_samples_big

    # create Dataset
    dataset = np.concatenate(spheres, axis=0)

    labels = np.zeros(n_datapoints)
    label_index = 0
    for index, data in enumerate(spheres):
        n_sphere_samples = data.shape[0]
        labels[label_index : label_index + n_sphere_samples] = index
        label_index += n_sphere_samples

    index_seed = np.linspace(0, n_datapoints, num=20, dtype="int16", endpoint=False)
    arr = np.array([], dtype="int16")
    for i in range(n_samples):
        arr = np.concatenate((arr, index_seed + int(i)))
    dataset = dataset[arr]
    labels = labels[arr]

    return dataset / 22 + 0.5, labels


def dsphere(n=100, d=2, r=1, noise=None, ambient=None):
    """
    Sample `n` data points on a d-sphere.
    Arguments:
        n {int} -- number of data points in shape
        r {float} -- radius of sphere
        ambient {int, default=None} -- Embed the sphere into a space with ambient dimension equal to `ambient`. The sphere is randomly rotated in this high dimensional space.
    """
    data = np.random.randn(n, d + 1)

    # normalize points to the sphere
    data = r * data / np.sqrt(np.sum(data ** 2, 1)[:, None])

    if noise:
        data += noise * np.random.randn(*data.shape)

    if ambient:
        assert ambient > d, "Must embed in higher dimensions"
        data = embed(data, ambient)

    return data
