# -*- coding: utf-8 -*-
"""
使用三种分类器
"""
import numpy as np
import matplotlib.pyplot as plt
from utils.SemiSupervisedClassifier import SemiSupervisedClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from utils import load_txt_regex
from utils import gen_data
from sklearn.metrics import hamming_loss


class TriTrainingClassifier(SemiSupervisedClassifier):

    def init_estimators(self, X, y):
        self.estimators_ = []
        for i in range(self.n_estimator):
            clazz = getattr(self.base_estimator[i], '__class__')
            params = self.base_estimator[i].get_params()
            estimator = clazz(**params)
            samples, labels = self._bootstrap_sampling(X, y)
            estimator.fit(samples, labels)
            self.estimators_.append(estimator)

    def _update_estimator(self, X, y):
        l_pre = [0] * 3
        e_pre = [0.5] * 3
        flags = [True] * 3
        cycles = 0
        while np.sum(flags) > 0:
            self.train_scores.append(self.score(self.unlabeled_data, self.unlabeled_label))
            cycles += 1
            flags = [False] * 3
            for inx in range(self.n_estimator):
                L_X = []
                L_y = []
                # 计算（h2 & h3）误差e
                index = list(range(3))
                index.remove(inx)
                h2 = self.estimators_[index[0]].predict(X)
                h3 = self.estimators_[index[1]].predict(X)
                correct = (h2 == h3) & (h2 == y)
                e = 1 - np.sum(correct) / len(y)
                if e < e_pre[inx]:
                    # 从无标签数据中选取两个分类器相同标记的数据
                    h2_predict = self.estimators_[index[0]].predict(self.unlabeled_data)
                    h3_predict = self.estimators_[index[1]].predict(self.unlabeled_data)
                    inx_agree = h2_predict == h3_predict
                    new_data = self.unlabeled_data[inx_agree]
                    new_label = h2_predict[inx_agree]
                    L_X = np.array(new_data)
                    L_y = np.array(new_label)
                    if l_pre[inx] == 0:
                        l_pre[inx] = np.floor(e/(e_pre[inx]-e)+1)
                    if l_pre[inx] < len(L_X):
                        if e*len(L_X) < e_pre[inx] * l_pre[inx]:
                            flags[inx] = True
                        elif l_pre[inx] > (e/(e_pre[inx]-e)):
                            size = np.ceil((e_pre[inx]*l_pre[inx])/e-1)
                            L_X, L_y = self._bootstrap_sampling(L_X, L_y, size=int(size))
                            flags[inx] = True
                if flags[inx]:
                    e_pre[inx] = e
                    l_pre[inx] = len(L_X)
                    L_X = np.concatenate((X, L_X), axis=0)
                    L_y = np.concatenate((y, L_y), axis=0)
                    self.estimators_[inx].fit(L_X, L_y)
        # print("训练经过%stimes" % cycles)


if __name__ == '__main__':
    path = r'../data/csv/'
    name = 'banana.csv'
    train_data, train_label, test_data, test_label, labeled_data, labeled_label, unlabeled_data, unlabeled_label \
        = gen_data(path+name, unlabeled_rate=0.8, random_state=919)

    estimator = SVC()
    scores = []

    for cycle in range(50):
        rstri = TriTrainingClassifier(estimator, unlabeled_data, unlabeled_label)
        rstri.init_estimators(labeled_data, labeled_label)
        init_loss = hamming_loss(test_label, rstri.predict(test_data))
        rstri.fit(labeled_data, labeled_label)
        # train_loss = hamming_loss(train_label, rstri.predict(train_data))
        test_loss = hamming_loss(test_label, rstri.predict(test_data))
        # scores.append(2*(train_loss - test_loss)/(train_loss + test_loss))
        scores.append(2*(init_loss - test_loss)/(init_loss + test_loss))

    print(np.average(scores))
