# -*- coding: utf-8 -*-
"""
聚类
尝试引入聚类算法辅助标记无标签数据
"""
import numpy as np
import matplotlib.pyplot as plt
from utils.SemiSupervisedClassifier import SemiSupervisedClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from utils import load_txt_regex
from sklearn.cluster import KMeans


class TriTrainingClassifier(SemiSupervisedClassifier):

    def correct_pred(self, X, y, n_clusters=5):
        km = KMeans(n_clusters=n_clusters)
        cluster_pred = km.fit_predict(X)

        clusters = []       # 记录每个簇中对应的数据下标
        clu_pred = []       # 记录每个簇中数据对应的预测类别
        inx = []
        for i in range(n_clusters):
            clusters.append([])
            clu_pred.append([])
        for i in range(len(cluster_pred)):
            clusters[cluster_pred[i]].append(i)
            clu_pred[cluster_pred[i]].append(y[i])
        labels = [1 if np.sum(i) > 0 else -1 for i in clu_pred]        # 记录每个簇对应的预测类别，按照多数投票
        for i in range(n_clusters):
            tmp = [clusters[i][j] for j in range(len(clu_pred[i]))if clu_pred[i][j] == labels[i]]
            inx += tmp
        return inx

    def _update_estimator(self, X, y):
        l_pre = [0] * 3
        e_pre = [0.5] * 3
        flags = [True] * 3
        cycles = 0
        while np.sum(flags) > 0:
            self.train_scores.append(self.score(self.unlabeled_data, self.unlabeled_label))
            cycles += 1
            flags = [False] * 3
            for inx in range(self.n_estimator):
                L_X = []
                L_y = []
                # 计算（h2 & h3）误差e
                index = list(range(3))
                index.remove(inx)
                h2 = self.estimators_[index[0]].predict(X)
                h3 = self.estimators_[index[1]].predict(X)
                correct = (h2 == h3) & (h2 == y)
                e = 1 - np.sum(correct) / len(y)
                if e < e_pre[inx]:
                    # 从无标签数据中选取两个分类器相同标记的数据
                    h2_predict = self.estimators_[index[0]].predict(self.unlabeled_data)
                    h3_predict = self.estimators_[index[1]].predict(self.unlabeled_data)
                    inx_agree = h2_predict == h3_predict
                    pred = np.zeros(len(self.unlabeled_data))
                    for i in range(len(self.unlabeled_data)):
                        if h2_predict[i] == h3_predict[i]:
                            pred[i] = h2_predict[i]
                        else:
                            pred[i] = 1 if np.random.rand() > 0.5 else -1
                    # new_data = self.unlabeled_data[inx_agree]
                    # new_label = h2_predict[inx_agree]
                    # L_X = np.array(new_data)
                    # L_y = np.array(new_label)
                    # 在高置信度数据上继续筛选
                    correct_inx = self.correct_pred(self.unlabeled_data, pred)
                    L_X = self.unlabeled_data[correct_inx]
                    L_y = pred[correct_inx]
                    if l_pre[inx] == 0:
                        l_pre[inx] = np.floor(e/(e_pre[inx]-e)+1)
                    if l_pre[inx] < len(L_X):
                        if e*len(L_X) < e_pre[inx] * l_pre[inx]:
                            flags[inx] = True
                        elif l_pre[inx] > (e/(e_pre[inx]-e)):
                            size = np.ceil((e_pre[inx]*l_pre[inx])/e-1)
                            L_X, L_y = self._bootstrap_sampling(L_X, L_y, size=int(size))
                            flags[inx] = True
                if flags[inx]:
                    e_pre[inx] = e
                    l_pre[inx] = len(L_X)
                    L_X = np.concatenate((X, L_X), axis=0)
                    L_y = np.concatenate((y, L_y), axis=0)
                    self.estimators_[inx].fit(L_X, L_y)
        # print("训练经过%stimes" % cycles)


if __name__ == '__main__':
    # 读取数据集
    path = r'../data/breast-cancer.data/breast-cancer_%s_%s_%s.asc'
    train_data = load_txt_regex(path % ('train', 'data', 1))
    train_label = load_txt_regex(path % ('train', 'labels', 1)).flatten()
    labeled_data, unlabeled_data, labeled_label, unlabeled_label = \
        train_test_split(train_data, train_label, test_size=0.8, random_state=919)
    test_data = load_txt_regex(path % ('test', 'data', 1))
    test_label = load_txt_regex(path % ('test', 'labels', 1)).flatten()
    # test_data = np.concatenate((test_data, load_txt_regex(path % ('test', 'data', 2))), axis=0)
    # test_label = np.concatenate((load_txt_regex(path % ('test', 'labels', 1)),
    #                              load_txt_regex(path % ('test', 'labels', 2))), axis=0).flatten()

    estimator = SVC()
    # estimator.fit(train_data, train_label)
    # estimator_ar = estimator.score(test_data, test_label)
    # print("单个分类器在整个标签数据上准确率为%s" % estimator_ar)
    estimator.fit(labeled_data, labeled_label)
    estimator_ar = estimator.score(test_data, test_label)
    print("单个分类器在有标签数据上准确率为%s" % estimator_ar)
    bag = BaggingClassifier(estimator)
    bag.fit(train_data, train_label)
    bag_ar = bag.score(test_data, test_label)
    print("Bag集成在所有训练数据上准确率为%s" % bag_ar)

    # 多次循环取均值
    # cycles = 50
    # tri_ars = []
    # for i in range(cycles):
    #     try:
    #         # print("run %s times" % i)
    #         tri = TriTrainingClassifier(estimator, unlabeled_data)
    #         tri.fit(labeled_data, labeled_label)
    #         tri_ar = tri.score(test_data, test_label)
    #         tri_ars.append(tri_ar)
    #     except ValueError as e:
    #         i -= 1
    #         continue
    # print("Tri-Training在部分标签数据上运行%s次最大，最小，平均准确率为%s %s %s"
    #       % (cycles, np.max(tri_ars), np.min(tri_ars), np.average(tri_ars)))

    tri = TriTrainingClassifier(estimator, unlabeled_data, unlabeled_label)
    tri.fit(labeled_data, labeled_label)
    tri_ar = tri.score(test_data, test_label)
    print("tri在有标签数据上准确率为%s" % tri_ar)

    plt.figure()
    plt.plot(tri.train_scores, '.k-')
    plt.xlabel('Round')
    plt.ylabel('Accuracy Rate')
    plt.show()
