import argparse
import timeit
import warnings
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.datasets import load_wine
from sklearn.model_selection import train_test_split


def get_arguments():
    parser = argparse.ArgumentParser(description='AdaBoost')
    parser.add_argument('--test_size', type=float, default=0.33, help='the proportion of test data')
    parser.add_argument('--random_state', type=int, default=42, help='the random seed of dataset split')
    parser.add_argument('--classes', type=list, default=[1, 2],
                        help='the classes of iris datasets for classify, element of parameter should be 0, 1, 2')

    args = parser.parse_args()
    return args


class MyPreprocessing:
    def __init__(self, parser):
        self.random_state = parser.random_state
        self.test_size = parser.test_size
        self.classes = parser.classes

    def load_dataset(self):
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore")
            dataset = load_wine()
            datas = pd.DataFrame(dataset.data)
            datas.columns = dataset.feature_names
            target = dataset.target
            datas.insert(loc=len(datas.columns), column='label', value=target)
            return datas

    def choose_dataset(self, df):
        df = df[df.label.isin(self.classes)]  # 选取特定标签
        data = df.iloc[:, :-1]
        target = df.iloc[:, -1]
        return data.values, target.values

    def split_dataset(self, datas, targets):
        m = np.min(targets)
        labels = [-1 if targets[i] == m else 1 for i in range(len(targets))]
        labels = np.array(labels)
        assert 0 < self.test_size < 1, "Please choose right test size between 0 and 1"
        test_num = int(self.test_size * len(labels))
        labels.resize((len(labels), 1))  # 将标签升维
        data_target = np.concatenate([datas, labels], axis=1)  # 拼接
        np.random.seed(self.random_state)
        np.random.shuffle(data_target)
        X_test = data_target[:test_num, :-1]
        y_test = data_target[:test_num, -1]
        X_train = data_target[test_num:, :-1]
        y_train = data_target[test_num:, -1]
        return X_train, y_train, X_test, y_test


class DecisionTreeClassifier:
    def __init__(self):
        # 最小的加权分类错误率
        self.min_error = 1
        # 最优特征id
        self.best_feature = None
        # 最优特征的最优阈值
        self.best_threshold = None
        # 最优的阈值符号
        self.best_sign = None

    def fit(self, X, y, sample_weight):
        # sample_weight为样本权重
        sample_weight = np.array(sample_weight)
        # 遍历特征
        for i in range(np.shape(X)[1]):
            # 遍历阈值
            features = X[:, i]
            feature = np.sort(features)  # 给特征取值排序
            feature = np.unique(feature)  # 去除重复元素
            if len(feature) == 1:
                continue
            else:
                thresholds = [(feature[k + 1] + feature[k]) / 2 for k in range(len(feature) - 1)]
            for threshold in thresholds:
                # 遍历判别符号
                for sign in [1, -1]:
                    # 计算加权分类误差率(错分样本的权重之和)
                    predict = [1 if features[n] * sign >= threshold else -1 for n in range(len(features))]
                    error = np.sum(sample_weight[predict != y])
                    # 记录最小加权分类误差率，最优特征、最优阈值、最优判别符号
                    if error <= self.min_error:
                        self.min_error = error
                        self.best_sign = sign
                        self.best_feature = i
                        self.best_threshold = threshold
        # 将最小加权分类误差率，最优特征、最优阈值、最优判别符号返回
        return self.min_error, self.best_feature, self.best_threshold, self.best_sign

    def predict(self, X):
        # 根据最优特征、最优阈值、最优判别符号，对样本X进行预测
        predict = [1 if X[:, self.best_feature][n] * self.best_sign >= self.best_threshold
                   else -1
                   for n in range(len(X[:, self.best_feature]))]
        # 返回预测值1 or -1
        return np.array(predict)


class AdaBoostClassifier:
    def __init__(self, n_estimators=50):
        # 基评估器个数
        self.base_num = n_estimators
        # 基评估器模型（列表）
        self.base = []
        # 基评估器权重（列表）
        self.base_weight = []
        # 样本权重（列表）
        self.sample_weight = []

    def fit(self, X, y):
        # 初始化样本权重
        self.sample_weight = [1 / len(y)] * len(y)
        # 循环创建n_estimators个基评估器
        for i in range(self.base_num):
            # 实例化DecisionTreeClassifier，并训练基评估器，加入基评估器模型列表
            DTC = DecisionTreeClassifier()
            min_error, best_feature, best_threshold, best_sign = DTC.fit(X, y, self.sample_weight)
            self.base.append(DTC)
            # 计算基评估器权重alpha,并加入基评估器权重列表
            alpha = 0.5 * np.log((1-min_error) / min_error)
            self.base_weight.append(alpha)
            # 更新样本权重D,并加入样本权重列表
            predict = DTC.predict(X)
            sample_weight = self.sample_weight * np.exp(-1.0 * alpha * predict * y)
            self.sample_weight = sample_weight / np.sum(sample_weight)
        return self.base

    def predict(self, X):
        final_pre = np.zeros(np.shape(X)[0])
        # 遍历训练好的n_estimators个基评估器
        for i, base in enumerate(self.base):
            # 把样本放进每个基评估器进行预测（用于后续得到综合加权评分）
            predict = base.predict(X)
        # 根据每个基评估器结果和基评估器权重，获得综合预测结果
            final_pre += self.base_weight[i] * predict
        # 返回预测值
        result = [1 if final_pre[n] >=0 else -1 for n in range(len(final_pre))]
        return result

    def score(self, X, y):
        # 根据样本X，预测标签y_pred
        y_pred = self.predict(X)
        # 根据预测标签y_pred和实际标签y，计算分类准确率，并返回结果
        score = np.sum(y_pred == y) / len(y)
        return score

    def staged_score(self, X, y):
        # 返回每个基评估器的错误分类概率
        staged_score = []
        for i, base in enumerate(self.base):
            predict = base.predict(X)
            staged_score.append(np.sum(predict == y) / len(y))
        return staged_score


class SklearnAdaBoost:
    def __init__(self):
        pass

    def adaboost(self, X_train, y_train, X_test, y_test):
        clf = AdaBoostClassifier()
        clf.fit(X_train, y_train)
        print("The train scores of Adaboost is {}.".format(clf.score(X_train, y_train)))
        print("The test scores of Adaboost is {}.".format(clf.score(X_test, y_test)))
        train_staged_scores = [s for s in clf.staged_score(X_train, y_train)]
        test_staged_scores = [s for s in clf.staged_score(X_test, y_test)]
        return train_staged_scores, test_staged_scores



if __name__ == "__main__":
    parser = get_arguments()
    # 1、导入数据集
    MyPreprocessing = MyPreprocessing(parser)
    df = MyPreprocessing.load_dataset()
    # 选取两个类
    datas, target = MyPreprocessing.choose_dataset(df)
    # 2、数据集划分
    X_train, y_train, X_test, y_test = MyPreprocessing.split_dataset(datas, target)

    # 3、实例化
    ABC_clf = AdaBoostClassifier()
    # 4、训练模型（调用fit)
    ABC_clf.fit(X_train, y_train)
    # 5、预测，评分
    train_score = ABC_clf.score(X_train, y_train)
    print("My Adaboost train score is {}".format(train_score))
    test_score = ABC_clf.score(X_test, y_test)
    print("My Adaboost test score is {}".format(test_score))

    # 比较
    SklearnAdaBoost = SklearnAdaBoost()
    sklearn_train_staged_scores, sklearn_test_staged_scores = SklearnAdaBoost.adaboost(X_train, y_train, X_test, y_test)
    my_train_staged_scores = ABC_clf.staged_score(X_train, y_train)
    my_test_staged_scores = ABC_clf.staged_score(X_test, y_test)
    plt.plot(my_train_staged_scores, label='my_train')
    plt.plot(my_test_staged_scores, label='my_test')
    plt.plot(sklearn_train_staged_scores, label='sklearn_train')
    plt.plot(sklearn_test_staged_scores, label='sklearn_test')
    plt.legend()
    plt.show()


