# -*- coding: utf-8 -*-
import numpy as np
from utils.SemiSupervisedClassifier import SemiSupervisedClassifier
from sklearn.utils import check_random_state
from sklearn.metrics import accuracy_score
from utils import gen_data
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC


class CoForestClassifier(SemiSupervisedClassifier):

    def __init__(self, base_estimator,  unlabeled_data, unlabeled_label, n_estimator=10, random_state=None):
        super().__init__(base_estimator, unlabeled_data, unlabeled_label, random_state)
        self.n_estimator = n_estimator
        self.feat_inx = []          # 记录选择的特征索引下标

    def predict(self, X):
        data_selected = self._select_feature(X)
        predictions = [self.estimators_[i].predict(data_selected[i]) for i in range(len(self.estimators_))]
        predictions = np.array(predictions)
        vote_predictions = np.sum(predictions, axis=0)
        return np.where(vote_predictions > 0, 1, -1)

    def _init_estimators(self, X, y):
        self.estimators_ = []
        clazz = getattr(self.base_estimator, '__class__')
        params = self.base_estimator.get_params()
        self._select_feature_index(X)
        for i in range(self.n_estimator):
            estimator = clazz(**params)
            samples, labels = self._bootstrap_sampling(X, y)
            samples_selected = samples[:, self.feat_inx[i]]
            estimator.fit(samples_selected, labels)
            self.estimators_.append(estimator)

    def _update_estimator(self, X, y):
        w_pre = [0] * self.n_estimator
        e_pre = [0.5] * self.n_estimator
        flags = [True] * self.n_estimator
        cycles = 0
        labeled_data = self._select_feature(X)
        unlabeled_data = self._select_feature(self.unlabeled_data)
        while np.sum(flags) > 0:
            cycles += 1
            flags = [False] * self.n_estimator
            for inx in range(self.n_estimator):
                # 伴随集成：除当前分类器以外的所以分类器集合
                comitant_estimator = [self.estimators_[i] for i in range(self.n_estimator) if i != inx]
                # 对有标签数据进行分类
                pred_labeled = [self.estimators_[i].predict(labeled_data[i])
                                for i in range(self.n_estimator) if i != inx]
                vote_labeled = np.sum(pred_labeled, axis=0)
                vote_labeled = np.where(vote_labeled > 0, 1, -1)
                # 计算伴随集成的误差
                e = 1 - accuracy_score(y, vote_labeled)
                # 对无标签数据进行分类
                pred_unlabeled = [self.estimators_[i].predict(unlabeled_data[i])
                                  for i in range(self.n_estimator) if i != inx]
                vote_unlabeled = np.sum(pred_unlabeled, axis=0)
                vote_unlabeled = np.where(vote_unlabeled > 0, 1, -1)
                # 计算置信度
                confidence = np.sum(pred_unlabeled == vote_unlabeled, axis=0)/self.n_estimator
                cofident_inx = confidence > 0.7
                w = np.sum(cofident_inx)
                # 置信度较高的无标签数据
                L_X = self.unlabeled_data[cofident_inx]
                L_y = vote_unlabeled[cofident_inx]
                if e < e_pre[inx]:
                    if w_pre[inx] == 0:
                        w_pre[inx] = np.floor(e/(e_pre[inx]-e)+1)
                    if w_pre[inx] < w:
                        if e*w < e_pre[inx] * w_pre[inx]:
                            flags[inx] = True
                        elif w_pre[inx] > (e/(e_pre[inx]-e)):
                            size = np.ceil((e_pre[inx]*w_pre[inx])/e-1)
                            L_X, L_y = self._bootstrap_sampling(L_X, L_y, size=int(size))
                            flags[inx] = True
                if flags[inx]:
                    e_pre[inx] = e
                    w_pre[inx] = w
                    L_X = np.concatenate((X, L_X), axis=0)
                    L_y = np.concatenate((y, L_y), axis=0)
                    self.estimators_[inx].fit(L_X[:, self.feat_inx[inx]], L_y)
        print("训练经过%stimes" % cycles)

    def _select_feature_index(self, X, size=None):
        num, dim = X.shape
        if size is None:
            size = int(np.sqrt(dim))
        index = []
        random_state = check_random_state(None)
        for i in range(self.n_estimator):
            index.append(random_state.choice(range(dim), size))
        self.feat_inx = index
        return index

    def _select_feature(self, X):
        if len(self.feat_inx) <= 0:
            self._select_feature_index(X)
        data_selected = []
        for inx in self.feat_inx:
            data_selected.append(X[:, inx])
        return data_selected


if __name__ == '__main__':
    path = r'../data/csv/'
    name = 'banana.csv'
    train_data, train_label, test_data, test_label, labeled_data, labeled_label, unlabeled_data, unlabeled_label\
        = gen_data(path+name, unlabeled_rate=0.8, random_state=919)

    estimator = SVC()
    cofo = CoForestClassifier(estimator, unlabeled_data, unlabeled_label, n_estimator=10)
    cofo.fit(labeled_data, labeled_label)
    pred = cofo.predict(test_data)
    print(accuracy_score(test_label, pred))
