import numpy as np
import copy


def randomSampling0(D, ratio_samples=1.0, ratio_features=1.0):
    """
    D为含n个样本点的数据集（X,Y），其中 样本特征 X 是形状为（n, num_features）的numpy数组
    现对数据集D进行随机有放回采样
    """
    X, Y = D
    n = len(X)  # 原始数据集大小
    m = X.shape[1]  # 样本特征数量

    n_samples = int(n * ratio_samples)
    num_features = round(m * ratio_features)

    # 有放回地抽样n_samples次
    indexes_sampling = np.random.choice(np.arange(0, n), size=n_samples, replace=True)
    # 特征抽样采用 不放回抽样
    features_selected = np.random.choice(np.arange(0, m), size=num_features, replace=False)

    X_train = X[indexes_sampling]  # 先按照行索引取出抽到的样本
    X_train = X_train[:, features_selected]  # 对取出的样本再进行特征抽取
    Y_train = Y[indexes_sampling]
    return X_train, Y_train, features_selected


class BaggingClassifierByUser:
    def __init__(self, model_learning, num_models=2, max_samples=1.0, max_features=1.0):
        # self.modelsList = [model_learning for _ in range(num_models)]  # 如果直接这样生成列表整个列表的元素在内存中是同一个对象
        self.modelsList  = [copy.deepcopy(model_learning) for  _ in range(num_models)]
        self.T = num_models
        self.ratio_samples = max_samples
        self.ratio_features = max_features  # 就算选择所有特征（max_features=1.0），原本特征分量的顺序也会被打乱

        self.num_labels = 0  # 可能的标签的数量
        self.list2features_selected = []  # 若进行特征选择，保存选择的特征，方便预测新样本时选择其样本进行预测

    def fit(self, X_train, Y_train):
        self.num_labels = max(Y_train) + 1
        for i in range(self.T):
            X, Y, features_selected = randomSampling0((X_train, Y_train), self.ratio_samples, self.ratio_features)
            self.modelsList[i].fit(X, Y)

            self.list2features_selected.append(features_selected)

    def score(self, X, Y):
        labels = self.predict(X)
        n = float( len(X) )
        acc = (Y == labels).sum() / n
        return acc

    def predict(self, X):
        count_labels = np.zeros(shape=(len(X), self.num_labels))
        i_indexes = np.arange( len(X) )
        for i in range(self.T):
            features_selected = self.list2features_selected[i]
            XInput = X[:,features_selected]
            label = self.modelsList[i].predict(XInput)
            # count_labels[:, label] += 1  #错误
            count_labels[i_indexes, label] += 1
        labels = count_labels.argmax(axis=1)
        return labels


if __name__ == "__main__":
        from loadDatas import loadDatas
        from  sklearn.neighbors import KNeighborsClassifier
        from sklearn.ensemble import BaggingClassifier
        np.random.seed(0)

        X_train, X_test, y_train, y_test = loadDatas('wine', test_size=0.3)
        num_models = 3

        print("袋装模型(用户定义)：复合{}个 KNN".format(num_models))
        baggingModel = BaggingClassifierByUser(KNeighborsClassifier(), num_models=num_models, max_samples=0.8, max_features=0.5)
        baggingModel.fit(X_train, y_train)
        trainScore = baggingModel.score(X_train, y_train)
        testScore = baggingModel.score(X_test, y_test)
        print(f'\ttrainScore:{trainScore}\n\ttestScore:{testScore}')

        print("单个模型 KNN")
        model = KNeighborsClassifier()
        model.fit(X_train, y_train)
        trainScore = model.score(X_train, y_train)
        testScore = model.score(X_test, y_test)
        print(f'\ttrainScore:{trainScore}\n\ttestScore:{testScore}')

        print("袋装模型(sklearn接口)")
        model = BaggingClassifier(n_estimators=num_models, max_samples=0.8, max_features=0.5)
        model.fit(X_train, y_train)
        trainScore = model.score(X_train, y_train)
        testScore = model.score(X_test, y_test)
        print(f'\ttrainScore:{trainScore}\n\ttestScore:{testScore}')


