# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from utils import load_txt_regex


class DisturbedTriTrainingClassifier(BaseEstimator, ClassifierMixin):
    def __init__(self, base_estimator, unlabeled_data, noise_rate=0.1, random_state=None):
        super().__init__()
        self.base_estimator = base_estimator
        self.unlabeled_data = np.array(unlabeled_data)
        self.noise_rate = noise_rate
        self.random_state = random_state
        self.estimators_ = []

    def fit(self, X, y):
        X = np.array(X)
        y = np.array(y).flatten()
        if len(self.estimators_) != 3:
            self._init_estimators(X, y)
        self._update_estimator(X, y)

    def predict(self, X):
        predictions = [estimator.predict(X) for estimator in self.estimators_]
        predictions = np.array(predictions)
        vote_predictions = np.sum(predictions, axis=0)
        return np.where(vote_predictions > 0, 1, -1)

    def _update_estimator(self, X, y):
        l_pre = [0] * 3
        e_pre = [0.5] * 3
        flags = [True] * 3
        cycles = 0
        while np.sum(flags) > 0:
            flags = [False] * 3
            for inx in range(3):
                L_X = []
                L_y = []
                # 计算（h2 & h3）误差e
                index = list(range(3))
                index.remove(inx)
                h2 = self.estimators_[index[0]].predict(X)
                h3 = self.estimators_[index[1]].predict(X)
                correct = (h2 == h3) & (h2 == y)
                e = 1 - np.sum(correct) / len(y)
                if e < e_pre[inx]:
                    # 从无标签数据中选取两个分类器相同标记的数据
                    h2_predict = self.estimators_[index[0]].predict(self.unlabeled_data)
                    h3_predict = self.estimators_[index[1]].predict(self.unlabeled_data)
                    inx_agree = h2_predict == h3_predict
                    new_data = self.unlabeled_data[inx_agree]
                    new_label = h2_predict[inx_agree]
                    # L_X = np.concatenate((X, new_data), axis=0)
                    # L_y = np.concatenate((y, new_label), axis=0)
                    L_X = np.array(new_data)
                    L_y = np.array(new_label)
                    if l_pre[inx] == 0:
                        l_pre[inx] = np.floor((e+0.5*self.noise_rate)/(e_pre[inx]-e)+1)
                    if l_pre[inx] < len(L_X):
                        if (e+0.5*self.noise_rate)*len(L_X) < (e_pre[inx]+0.5*self.noise_rate)*l_pre[inx]\
                                and self.noise_rate*len(L_X) < len(X):
                            flags[inx] = True
                        elif l_pre[inx] > ((e+0.5*self.noise_rate)/(e_pre[inx]-e)):
                            size = np.ceil((e_pre[inx]+0.5*self.noise_rate)/(e+0.5*self.noise_rate)*l_pre[inx]-1)
                            L_X, L_y = self._bootstrap_sampling(L_X, L_y, size=int(size))
                            if self.noise_rate*len(L_X) < len(X):
                                flags[inx] = True
                if flags[inx]:
                    cycles += 1
                    random_state = check_random_state(self.random_state)
                    h2_predict = self.estimators_[index[0]].predict(self.unlabeled_data)
                    h3_predict = self.estimators_[index[1]].predict(self.unlabeled_data)
                    inx_disagree = h2_predict != h3_predict
                    disagree_data = self.unlabeled_data[inx_disagree]
                    if len(disagree_data) < int(self.noise_rate*len(L_X)):
                        noise_size = len(disagree_data)
                        noise_data = np.array(disagree_data)
                    else:
                        noise_size = int(self.noise_rate*len(L_X))
                        noise_data = self._bootstrap_sampling(disagree_data, size=noise_size)
                    noise_label = []
                    for i in range(noise_size):
                        tmp = random_state.rand()
                        if tmp > 0.5:
                            noise_label.append(1)
                        else:
                            noise_label.append(-1)
                    noise_label = np.array(noise_label)
                    e_pre[inx] = e
                    l_pre[inx] = len(L_X)
                    if len(noise_data) > 0:
                        L_X = np.concatenate((X, L_X, noise_data), axis=0)
                        L_y = np.concatenate((y, L_y, noise_label), axis=0)
                    else:
                        L_X = np.concatenate((X, L_X), axis=0)
                        L_y = np.concatenate((y, L_y), axis=0)
                    self.estimators_[inx].fit(L_X, L_y)
        print("训练经过%s times" % cycles)

    def _init_estimators(self, X, y):
        """
        按照base_estimator的参数初始化三个分类器
        对训练数据bootstrap取样，训练这三个分类器
        """
        self.estimators_ = []
        clazz = getattr(self.base_estimator, '__class__')
        params = self.base_estimator.get_params()
        for i in range(3):
            estimator = clazz(**params)
            samples, labels = self._bootstrap_sampling(X, y)
            estimator.fit(samples, labels)
            self.estimators_.append(estimator)

    @staticmethod
    def _bootstrap_sampling(X, y=None, size=None, random_state=None):
        """
        Params
        -------
            X: 不含标签的数据，形状为2-D
            y: 数据标签，默认值为None时，不返回y的采样。形状为1-D
            size: 取样大小，默认为与X同样大小
            random_state: 随机的seed
        Return
        -------
            数据的采样
        """
        random_state = check_random_state(random_state)
        if size is None:
            size = len(X)
        samples = []
        labels = []
        for i in range(size):
            inx = int(random_state.rand() * len(X))
            samples.append(X[inx])
            if y is not None:
                labels.append(y[inx])
        if y is not None:
            return np.array(samples), np.array(labels)
        else:
            return np.array(samples)


if __name__ == '__main__':
    # 读取数据集
    path = r'../data/breast-cancer.data/breast-cancer_%s_%s_%s.asc'
    train_data = load_txt_regex(path % ('train', 'data', 1))
    train_label = load_txt_regex(path % ('train', 'labels', 1)).flatten()
    labeled_data, unlabeled_data, labeled_label, unlabeled_label = \
        train_test_split(train_data, train_label, test_size=0.6, random_state=919)
    test_data = load_txt_regex(path % ('test', 'data', 1))
    test_label = load_txt_regex(path % ('test', 'labels', 1)).flatten()
    # test_data = np.concatenate((test_data, load_txt_regex(path % ('test', 'data', 2))), axis=0)
    # test_label = np.concatenate((load_txt_regex(path % ('test', 'labels', 1)),
    #                              load_txt_regex(path % ('test', 'labels', 2))), axis=0).flatten()

    estimator = SVC()
    estimator.fit(train_data, train_label)
    estimator_ar = estimator.score(test_data, test_label)
    print("单个分类器在整个标签数据上准确率为%s" % estimator_ar)
    estimator.fit(labeled_data, labeled_label)
    estimator_ar = estimator.score(test_data, test_label)
    print("单个分类器在部分标签数据上准确率为%s" % estimator_ar)

    cycles = 1000

    # 测试不同noise_rate
    ave_ars = []
    ranges = [x*0.1 for x in range(1, 100)]
    for noise_rate in ranges:
        tri_ars = []
        print("run for noise rate %s" % noise_rate)
        for i in range(cycles):
            try:
                # print("run %s times" % i)
                tri = DisturbedTriTrainingClassifier(estimator, unlabeled_data, noise_rate)
                tri.fit(labeled_data, labeled_label)
                tri_ar = tri.score(test_data, test_label)
                tri_ars.append(tri_ar)
            except ValueError as e:
                i -= 1
                continue
        # print("Tri-Training在部分标签数据上运行%s次最大，最小，平均准确率为%s %s %s"
        #       % (cycles, np.max(tri_ars), np.min(tri_ars), np.average(tri_ars)))
        ave_ars.append(np.average(tri_ars))
    plt.figure()
    plt.plot(ranges, ave_ars, '.k-')
    plt.xlabel("Noise Rate")
    plt.ylabel("Average Accuracy Rate")
    plt.show()

    # 固定noise_rate
    # noise_rate = 0.1
    # tri_ars = []
    # for i in range(cycles):
    #     try:
    #         # print("run %s times" % i)
    #         tri = DisturbedTriTrainingClassifier(estimator, unlabeled_data, noise_rate)
    #         tri.fit(labeled_data, labeled_label)
    #         tri_ar = tri.score(test_data, test_label)
    #         tri_ars.append(tri_ar)
    #     except ValueError as e:
    #         i -= 1
    #         continue
    # print("Tri-Training在部分标签数据上运行%s次最大，最小，平均准确率为%s %s %s"
    #       % (cycles, np.max(tri_ars), np.min(tri_ars), np.average(tri_ars)))


