from spikingjelly.datasets import cifar10_dvs
import os
import math
import numpy as np
import torch
import time


def build_dataset(is_train, args):
    # Data loading code
    print("Loading data")

    # st = time.time()
    # 指定路径，以帧的形式加载，指定每个样本的帧数，按数量分割
    if not hasattr(build_dataset, "datasets"):
        origin_set = cifar10_dvs.CIFAR10DVS(root=args.data_path, 
                                            data_type='frame', 
                                            frames_number=args.time_steps, 
                                            split_by='number')
        train_set, test_set = split_to_train_test_set(0.9, origin_set, 10)
        build_dataset.datasets = (train_set, test_set)
    # print("Took", time.time() - st)

    # print("Creating data loaders")

    return build_dataset.datasets[0] if is_train else build_dataset.datasets[1]


def split_to_train_test_set(train_ratio: float, origin_dataset: torch.utils.data.Dataset, num_classes: int, random_split: bool = False):
    '''
    :param train_ratio: split the ratio of the origin dataset as the train set
    :type train_ratio: float
    :param origin_dataset: the origin dataset
    :type origin_dataset: torch.utils.data.Dataset
    :param num_classes: total classes number, e.g., ``10`` for the MNIST dataset
    :type num_classes: int
    :param random_split: If ``False``, the front ratio of samples in each classes will
            be included in train set, while the reset will be included in test set.
            If ``True``, this function will split samples in each classes randomly. The randomness is controlled by
            ``numpy.randon.seed``
    :type random_split: int
    :return: a tuple ``(train_set, test_set)``
    :rtype: tuple
    '''
    label_idx = []
    for i in range(num_classes):
        label_idx.append([])

    for i, item in enumerate(origin_dataset):
        y = item[1]
        if isinstance(y, np.ndarray) or isinstance(y, torch.Tensor):
            y = y.item()
        label_idx[y].append(i)
    train_idx = []
    test_idx = []
    if random_split:
        for i in range(num_classes):
            np.random.shuffle(label_idx[i])

    for i in range(num_classes):
        pos = math.ceil(label_idx[i].__len__() * train_ratio)
        train_idx.extend(label_idx[i][0: pos])
        test_idx.extend(label_idx[i][pos: label_idx[i].__len__()])

    return torch.utils.data.Subset(origin_dataset, train_idx), torch.utils.data.Subset(origin_dataset, test_idx)

