# -*- coding: utf-8 -*-
"""
聚类+三个降维方法
使用聚类扩大有标签数据再筛选高信度数据
尝试在初始化分类器时使用三个随机视角
或者三个特别的降维方法
"""
import numpy as np
from utils.SemiSupervisedClassifier import SemiSupervisedClassifier
from sklearn.svm import SVC
from utils import gen_data
from sklearn.cluster import KMeans
from sklearn.utils import check_random_state
from sklearn.metrics import hamming_loss
from sklearn.decomposition import PCA, TruncatedSVD, FactorAnalysis, KernelPCA, SparsePCA


class TriTrainingClassifier(SemiSupervisedClassifier):
    def correct_pred(self, X, y, n_clusters=5):
        km = KMeans(n_clusters=n_clusters)
        cluster_pred = km.fit_predict(X)

        clusters = []       # 记录每个簇中对应的数据下标
        clu_pred = []       # 记录每个簇中数据对应的预测类别
        inx = []
        for i in range(n_clusters):
            clusters.append([])
            clu_pred.append([])
        for i in range(len(cluster_pred)):
            clusters[cluster_pred[i]].append(i)
            clu_pred[cluster_pred[i]].append(y[i])
        labels = [1 if np.sum(i) > 0 else -1 for i in clu_pred]        # 记录每个簇对应的预测类别，按照多数投票
        for i in range(n_clusters):
            tmp = [clusters[i][j] for j in range(len(clu_pred[i]))if clu_pred[i][j] == labels[i]]
            inx += tmp
        return inx

    def __init__(self, base_estimator, unlabeled_data, unlabeled_label,random_state=None):
        super().__init__(base_estimator, unlabeled_data, unlabeled_label, random_state)
        self.decomp_algs = [PCA(), FactorAnalysis(), TruncatedSVD()]

    def predict(self, X):
        predictions = [self.estimators_[i].predict(self.decomp_algs[i].transform(X))
                       for i in range(len(self.estimators_))]
        predictions = np.array(predictions)
        vote_predictions = np.sum(predictions, axis=0)
        return np.where(vote_predictions > 0, 1, -1)

    def init_estimators(self, X, y):
        self.estimators_ = []
        clazz = getattr(self.base_estimator, '__class__')
        params = self.base_estimator.get_params()
        for alg in self.decomp_algs:
            alg.fit(X)
        for i in range(self.n_estimator):
            estimator = clazz(**params)
            samples, labels = self._bootstrap_sampling(X, y)
            samples_selected = self.decomp_algs[i].transform(samples)
            estimator.fit(samples_selected, labels)
            self.estimators_.append(estimator)

    def _update_estimator(self, X, y):
        l_pre = [0] * 3
        e_pre = [0.5] * 3
        flags = [True] * 3
        cycles = 0
        # mahala_score = self.mahala_dist(X, y)
        while np.sum(flags) > 0:
            self.train_scores.append(self.score(self.unlabeled_data, self.unlabeled_label))
            cycles += 1
            flags = [False] * 3
            for inx in range(self.n_estimator):
                L_X = []
                L_y = []
                # 计算（h2 & h3）误差e
                index = list(range(3))
                index.remove(inx)
                h2 = self.estimators_[index[0]].predict(self.decomp_algs[index[0]].transform(X))
                h3 = self.estimators_[index[1]].predict(self.decomp_algs[index[1]].transform(X))
                correct = (h2 == h3) & (h2 == y)
                e = 1 - np.sum(correct) / len(y)
                if e < e_pre[inx]:
                    # 从无标签数据中选取两个分类器相同标记的数据
                    h2_predict = self.estimators_[index[0]].\
                        predict(self.decomp_algs[index[0]].transform(self.unlabeled_data))
                    h3_predict = self.estimators_[index[1]].\
                        predict(self.decomp_algs[index[1]].transform(self.unlabeled_data))
                    # inx_agree = h2_predict == h3_predict
                    # new_data = self.unlabeled_data[inx_agree]
                    # new_label = h2_predict[inx_agree]
                    # L_X = np.array(new_data)
                    # L_y = np.array(new_label)
                    # 使用聚类扩大有标签数据
                    pred = np.zeros(len(self.unlabeled_data))
                    for i in range(len(self.unlabeled_data)):
                        if h2_predict[i] == h3_predict[i]:
                            pred[i] = h2_predict[i]
                        else:
                            pred[i] = 1 if np.random.rand() > 0.5 else -1
                    correct_inx = self.correct_pred(self.unlabeled_data, pred)
                    L_X = self.unlabeled_data[correct_inx]
                    L_y = pred[correct_inx]
                    # 使用聚类筛选高置信度数据
                    # correct_inx = self.correct_pred(L_X, L_y)
                    # L_X = self.unlabeled_data[correct_inx]
                    # L_y = L_y[correct_inx]
                    if l_pre[inx] == 0:
                        l_pre[inx] = np.floor(e/(e_pre[inx]-e)+1)
                    if l_pre[inx] < len(L_X):
                        if e*len(L_X) < e_pre[inx] * l_pre[inx]:
                            flags[inx] = True
                        elif l_pre[inx] > (e/(e_pre[inx]-e)):
                            size = np.ceil((e_pre[inx]*l_pre[inx])/e-1)
                            # score = mahala_score[correct_inx]
                            # score_sorted = np.argsort(score)
                            # score_inx = score_sorted[-1:int(-1-size):-1]
                            # L_X = L_X[score_inx]
                            # L_y = L_y[score_inx]
                            L_X, L_y = self._bootstrap_sampling(L_X, L_y, size=int(size))
                            flags[inx] = True
                if flags[inx]:
                    e_pre[inx] = e
                    l_pre[inx] = len(L_X)
                    L_X = np.concatenate((X, L_X), axis=0)
                    L_y = np.concatenate((y, L_y), axis=0)
                    self.estimators_[inx].fit(self.decomp_algs[inx].transform(L_X), L_y)
        # print("训练经过%stimes" % cycles)


if __name__ == '__main__':
    path = r'../data/csv/'
    name = 'german.csv'
    train_data, train_label, test_data, test_label, labeled_data, labeled_label, unlabeled_data, unlabeled_label \
        = gen_data(path+name, unlabeled_rate=0.8, random_state=919)

    estimator = SVC()
    scores = []

    for cycle in range(50):
        rstri = TriTrainingClassifier(estimator, unlabeled_data, unlabeled_label)
        rstri.init_estimators(labeled_data, labeled_label)
        init_loss = hamming_loss(test_label, rstri.predict(test_data))
        rstri.fit(labeled_data, labeled_label)
        # train_loss = hamming_loss(train_label, rstri.predict(train_data))
        test_loss = hamming_loss(test_label, rstri.predict(test_data))
        # scores.append(2*(train_loss - test_loss)/(train_loss + test_loss))
        scores.append(2*(init_loss - test_loss)/(init_loss + test_loss))

    print(np.average(scores))
