# coding=utf8

import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import make_blobs
from sklearn.model_selection import StratifiedShuffleSplit


class Preliminary:

    @staticmethod
    def split_tarin_set():
        X, y = np.arange(10).reshape((5, 2)), range(5)
        X_train, X_test, y_train, y_test = \
            train_test_split(X, y, test_size=0.33, random_state=42)		        # 测试集比例0.33，随机数种子42
        print(
            ">>> X, y\n"
        )

    @staticmethod
    def stratified_shuffle_split():
        rand_seed = 1000
        sample_num = 10
        dataset = make_blobs(sample_num, 2)
        print(dataset)
        sss = StratifiedShuffleSplit(n_splits=sample_num, test_size=0.5, random_state=rand_seed)
        # result = sss.get_n_splits(dataset[0], dataset[1])
        result = sss.split(dataset[0], dataset[1])
        for train_index, test_index in result:
            print(train_index, test_index)


class UserSplit:

    @staticmethod
    def split1(dataset, labels=None, test_size=None, shuffle=False, random_stat=None):
        """
        dataset: 一维或多维数组。样本数据集
        labels: 一维数组。标签数据集
        test_size: 浮点数，或整数。测试集的划分比例，或分配样本数。
        shuffle: 布尔值。是否进行随机重排序。
        random_stat: 整数。随机数种子。
        stratify: 布尔值。是否使用分层随机抽样。
        """
        if random_stat is not None:
            np.random.seed(random_stat)
        if shuffle:
            new_permutation = np.random.permutation(len(dataset))
            dataset = dataset[new_permutation]
            labels = labels[new_permutation]
        test_size_count = test_size if int(test_size) == test_size else int(test_size*len(dataset))
        # 划分样本数据集
        test_set = dataset[0:test_size_count]
        train_set = dataset[test_size_count:]
        test_labels, train_labels = None, None
        if labels is not None:	    # 划分标签数据集
            test_labels = labels[0:test_size_count]
            train_labels = labels[test_size_count:]
        return test_set, train_set, test_labels, train_labels

    @staticmethod
    def split(dataset, labels=None, sample_size=None, shuffle=True, random_seed=None):
        """
        dataset: 一维或多维数组。样本数据集
        labels: 一维数组。标签数据集
        sample_size: 浮点数三元元组，或整数三元元组。指定训练集、验证集和测试集的划分比例。
        shuffle: 布尔值。是否进行随机重排序。
        random_seed: 整数。随机数种子。
        stratify: 布尔值。是否使用分层随机抽样。
        """

        # 样本随机排序
        if shuffle:
            # 设置随机种子
            if random_seed is not None:
                np.random.seed(random_seed)
            new_permutation = np.random.permutation(len(dataset))
            dataset = dataset[new_permutation]
            labels = labels[new_permutation]

        data_num = len(dataset)
        train_num = int(data_num*sample_size[0])
        vertfy_num = int(data_num*sample_size[1])

        # 划分样本数据集
        train_set = dataset[0:train_num]
        validate_set = dataset[train_num:train_num+vertfy_num]
        test_set = dataset[train_num+vertfy_num:]

        # 划分标签数据集
        train_labels, validate_labels, test_labels = None, None, None
        if labels is not None:
            train_labels = labels[:train_num]
            validate_labels = labels[train_num:train_num+vertfy_num]
            test_labels = labels[train_num+vertfy_num:]
        return train_set, train_labels, validate_set, validate_labels, test_set, test_labels

    @staticmethod
    def test_split():
        X = np.array([[1, 2], [3, 4], [5, 6],           # 含有6个样本的数据集
                      [7, 8], [9, 10], [11, 12]])
        y = np.array([0, 0, 0, 1, 1, 1])				# 具有2个分类的标签集

        split_data = UserSplit.split(X, y, sample_size=[0.5, 0.3, 0.2])
        print(split_data)
        print(
            '# dataset\n'
            '>>> X\n'
            f'{X}\n'
            f'# data labels:\n'
            f'>>> y\n'
            f'{y}\n'
            f'>>> split_data = UserSplit.split(X, y, sample_size=[0.5, 0.3, 0.2])\n'
            '# train_data:\n'
            f'{split_data[0]}\n'
            f'{split_data[1]}\n'
            '# validate_data:\n'
            f'{split_data[2]}\n'
            f'{split_data[3]}\n'
            '# test_data:\n'
            f'{split_data[4]}\n'
            f'{split_data[5]}\n'
        )


def task():
    """
    使用sklearn库函数，生成符合正态分布的两分类10个样本的数据集，
    并使用sklearn库函数train_test_split将其按照60%、20%、20%比例划分为训练集、验证集和测试集。
    """
    seed = 1000
    sample_num = 10
    X, y = make_blobs(n_samples=sample_num, n_features=5, random_state=seed)
    # 将原始数据集划分为训练集（60%）、测试集（40%）
    X_train, X_test1, y_train, y_test1 = \
        train_test_split(X, y, test_size=0.40, random_state=seed)
    # 将测试集划分为测试集（50%）和验证集（50%）
    X_validate, X_test, y_validate, y_test = \
        train_test_split(X_test1, y_test1, test_size=0.50, random_state=seed)
    # 打印输出
    print("原数据集：\n", X, '\n', y)
    print("训练集：\n", X_train, '\n', y_train)
    print("验证集：\n", X_validate, '\n', y_validate)
    print("测试集：\n", X_test, '\n', y_test)


def training1():
    """
    使用sklearn库函数，生成符合正态分布的两分类10个样本的数据集，
    并使用sklearn类型StratifiedShuffleSplit生成数据划分器
    将原始数据按照60%、20%、20%比例划分为训练集、验证集和测试集。
    注意：在子集少于3个样本时，被认为不符合要求！
    """
    seed = 100
    sample_num = 30
    X, y = make_blobs(n_samples=sample_num, n_features=3, random_state=seed)
    # print(X, y)
    # 将原始数据集划分为训练集（60%）、测试集（40%）
    sss1 = StratifiedShuffleSplit(1, test_size=0.4, random_state=seed)
    result1 = sss1.split(X, y)
    train_index, other_index = next(result1)
    # 将测试集划分为测试集（50%）和验证集（50%）
    sss2 = StratifiedShuffleSplit(1, test_size=0.5, random_state=seed)
    result2 = sss2.split(X[other_index], y[other_index])
    validate_index, test_index = next(result2)
    X_train = X[train_index]
    y_train = y[train_index]
    X_validate = X[validate_index]
    y_validate = y[validate_index]
    X_test = X[test_index]
    y_test = y[test_index]
    # 打印输出
    print("原数据集：\n", X, '\n', y)
    print("训练集：\n", X_train, '\n', y_train)
    print("验证集：\n", X_validate, '\n', y_validate)
    print("测试集：\n", X_test, '\n', y_test)


def training2():
    seed = 100
    sample_num = 30
    X, y = make_blobs(n_samples=sample_num, n_features=3, random_state=seed)
    # print(X, y)
    # 将原始数据集划分为训练集（60%）、测试集（40%）
    X_train, y_train, X_validate, y_validate, X_test, y_test = \
        UserSplit.split(X, y, sample_size=[0.6, 0.2, 0.2], random_seed=seed)
    # 打印输出
    print("原数据集：\n", X, '\n', y)
    print("训练集：\n", X_train, '\n', y_train)
    print("验证集：\n", X_validate, '\n', y_validate)
    print("测试集：\n", X_test, '\n', y_test)


if __name__ == "__main__":
    # Preliminary.stratified_shuffle_split()
    # Preliminary.
    # UserSplit.test_split()
    # task()
    # training1()
    training2()
