# -*- coding: utf-8 -*-
from sklearn.base import ClassifierMixin, BaseEstimator
from sklearn.utils import check_random_state
import numpy as np
from utils import load_txt_regex
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score


class AdaBoostClassifier(BaseEstimator, ClassifierMixin):

    def __init__(self, base_estimator=None, n_estimators=50, random_state=None):
        super().__init__()
        self.base_estimator = base_estimator
        self.n_estimators = n_estimators
        self.random_state = random_state
        self.estimators_ = []
        self.estimator_weights_ = []
        self.estimator_errors_ = []

    def fit(self, X, y, sample_weight=None):
        X = np.array(X)
        y = np.array(y).flatten()
        if sample_weight is None:
            sample_weight = np.ones(len(X))
            sample_weight /= sample_weight.sum()
        else:
            sample_weight = np.array(sample_weight).flatten()
        for i in range(self.n_estimators):
            sample_weight, estimator_weight, estimator_error, estimator = \
                self._boost(i, X, y, sample_weight)
            # if estimator_error > 0.5 or estimator_error == 0:
            #     break
            self.estimator_errors_.append(estimator_error)
            self.estimator_weights_.append(estimator_weight)
            self.estimators_.append(estimator)
        self._select_sort()
        # self._select_estimators()
        # self.estimator_weights_ = np.array(self.estimator_weights_)
        # self.estimator_weights_ /= self.estimator_weights_.sum()
        return self

    def predict(self, X):
        predictions = [estimator.predict(X) for estimator in self.estimators_]
        predictions = np.array(predictions)
        estimator_weights = np.array(self.estimator_weights_)
        weighted_predictions = estimator_weights.dot(predictions)
        weighted_predictions = list(map(lambda x: 1 if x > 0 else -1, weighted_predictions))
        return np.array(weighted_predictions)

    def _select_estimators(self):
        """
        尝试在生成所有分类器后对其进行筛选
        1. 删除权值为负数的分类器
        2. 删除误差较高的分类器
        """
        estimator_weights = []
        estimator_errors = []
        estimators = []
        error = np.average(self.estimator_errors_)
        for i in range(len(self.estimator_weights_)):
            if self.estimator_errors_[i] < error:
                estimator_weights.append(self.estimator_weights_[i])
                estimators.append(self.estimators_[i])
                estimator_errors.append(self.estimator_errors_[i])
        self.estimators_ = estimators
        self.estimator_errors_ = estimator_errors
        self.estimator_weights_ = estimator_weights

    def _select_sort(self, percent=0.7):
        """
        选取错误率较小的前percent
        """
        estimator_weights = []
        estimator_errors = []
        estimators = []
        error_sorted = np.argsort(self.estimator_errors_)
        size = int(len(self.estimators_)*percent)
        for i in error_sorted[0:size]:
            estimator_weights.append(self.estimator_weights_[i])
            estimators.append(self.estimators_[i])
            estimator_errors.append(self.estimator_errors_[i])
        self.estimators_ = estimators
        self.estimator_errors_ = estimator_errors
        self.estimator_weights_ = estimator_weights

    def _boost(self, iboost, X, y, sample_weight):
        sample_X, sample_y = self.sampling(X, y, None, sample_weight)
        estimator = self._make_estimator()
        estimator.fit(sample_X, sample_y)
        predictions = estimator.predict(X)
        estimator_error = 1. - accuracy_score(y, predictions)
        estimator_weight = 0.5 * np.log((1 - estimator_error) / estimator_error)
        error_vect = predictions == y
        sample_weight *= np.exp(estimator_weight * (1 - 2*error_vect))
        sample_weight /= sample_weight.sum()
        return sample_weight, estimator_weight, estimator_error, estimator

    def _make_estimator(self):
        """
        按照基分类器的参数新建一个分类器，基分类器需要实现get_params方法
        :return: 新的分类器
        """
        clazz = getattr(self.base_estimator, '__class__')
        params = self.base_estimator.get_params()
        return clazz(**params)

    def sampling(self, X, y, size=None, sample_weight=None):
        if sample_weight is None:
            sample_weight = np.ones(len(X))
            sample_weight /= sample_weight.sum()
        else:
            sample_weight = np.array(sample_weight)
        if size is None:
            size = len(X)
        cumsum_weight = sample_weight.cumsum()
        samples_X = []
        samples_y = []
        state = check_random_state(self.random_state)
        for i in range(size):
            inx = state.rand() * cumsum_weight[-1]
            for j in range(len(X)):
                if cumsum_weight[j] >= inx:
                    samples_X.append(X[j])
                    samples_y.append(y[j])
                    break
        return np.array(samples_X), np.array(samples_y)


if __name__ == '__main__':
    # 读取数据集
    path = r'../data/breast-cancer.data/breast-cancer_%s_%s_%s.asc'
    train_data = load_txt_regex(path % ('train', 'data', 1))
    train_label = load_txt_regex(path % ('train', 'labels', 1)).flatten()
    test_data = load_txt_regex(path % ('test', 'data', 1))
    test_label = load_txt_regex(path % ('test', 'labels', 1)).flatten()

    accuracy_rates = []
    cycles = 1000

    # 测试单个分类器
    # for i in range(cycles):
    #     print('run %s times...' % i)
    #     tree = DecisionTreeClassifier(criterion='entropy')
    #     tree.fit(train_data, train_label)
    #     predictions = tree.predict(test_data)
    #     accuracy_rates.append(accuracy_score(test_label, predictions))

    # 测试集成分类器
    n_estimators = 10
    for i in range(cycles):
        try:
            print('run %s times...' % i)
            svm = SVC()
            ada = AdaBoostClassifier(svm, n_estimators=n_estimators)
            ada.fit(train_data, train_label)
            predictions = ada.predict(test_data)
            accuracy_rate = accuracy_score(test_label, predictions)
            accuracy_rates.append(accuracy_rate)
            # print(len(ada.estimators_))
            # print(ada.estimator_errors_)
            # print(ada.estimator_weights_)
        except ValueError as e:
            i -= 1
            continue

    print('运行 %s 次最大，最小，平均准确率为：%s, %s, %s'
          % (cycles, np.max(accuracy_rates), np.min(accuracy_rates), np.average(accuracy_rates)))






