# -*- coding: utf-8 -*-
"""
尝试在选择高置信度数据时引入马氏距离
"""
import numpy as np
import matplotlib.pyplot as plt
from utils.SemiSupervisedClassifier import SemiSupervisedClassifier
from sklearn.model_selection import train_test_split
from sklearn.utils import check_random_state
from sklearn.svm import SVC
from sklearn.ensemble import BaggingClassifier
from utils import load_txt_regex
from numpy.linalg import pinv


class DisturbedTriTrainingClassifier(SemiSupervisedClassifier):

    def __init__(self, base_estimator, unlabeled_data, unlabeled_label, noise_rate=0.1, random_state=None):
        super().__init__(base_estimator, unlabeled_data, unlabeled_label, random_state)
        self.noise_rate = noise_rate

    def _update_estimator(self, X, y):
        l_pre = [0] * 3
        e_pre = [0.5] * 3
        flags = [True] * 3
        cycles = 0
        mahala_score = self.mahala_dist(X, y)
        while np.sum(flags) > 0:
            self.train_scores.append(self.score(self.unlabeled_data, self.unlabeled_label))
            flags = [False] * 3
            for inx in range(3):
                L_X = []
                L_y = []
                # 计算（h2 & h3）误差e
                index = list(range(3))
                index.remove(inx)
                h2 = self.estimators_[index[0]].predict(X)
                h3 = self.estimators_[index[1]].predict(X)
                correct = (h2 == h3) & (h2 == y)
                e = 1 - np.sum(correct) / len(y)
                if e < e_pre[inx]:
                    # 从无标签数据中选取两个分类器相同标记的数据
                    h2_predict = self.estimators_[index[0]].predict(self.unlabeled_data)
                    h3_predict = self.estimators_[index[1]].predict(self.unlabeled_data)
                    inx_agree = h2_predict == h3_predict
                    new_data = self.unlabeled_data[inx_agree]
                    new_label = h2_predict[inx_agree]
                    # L_X = np.concatenate((X, new_data), axis=0)
                    # L_y = np.concatenate((y, new_label), axis=0)
                    L_X = np.array(new_data)
                    L_y = np.array(new_label)
                    if l_pre[inx] == 0:
                        l_pre[inx] = np.floor((e+0.5*self.noise_rate)/(e_pre[inx]-e)+1)
                    if l_pre[inx] < len(L_X):
                        if (e+0.5*self.noise_rate)*len(L_X) < (e_pre[inx]+0.5*self.noise_rate)*l_pre[inx]\
                                and self.noise_rate*len(L_X) < len(X):
                            flags[inx] = True
                        elif l_pre[inx] > ((e+0.5*self.noise_rate)/(e_pre[inx]-e)):
                            size = np.ceil((e_pre[inx]+0.5*self.noise_rate)/(e+0.5*self.noise_rate)*l_pre[inx]-1)
                            score = mahala_score[inx_agree]
                            score_sorted = np.argsort(score)
                            score_inx = score_sorted[-1:int(-1-size):-1]
                            L_X = L_X[score_inx]
                            L_y = L_y[score_inx]
                            # L_X, L_y = self._bootstrap_sampling(L_X, L_y, size=int(size))
                            if self.noise_rate*len(L_X) < len(X):
                                flags[inx] = True
                if flags[inx]:
                    cycles += 1
                    random_state = check_random_state(self.random_state)
                    h2_predict = self.estimators_[index[0]].predict(self.unlabeled_data)
                    h3_predict = self.estimators_[index[1]].predict(self.unlabeled_data)
                    inx_disagree = h2_predict != h3_predict
                    disagree_data = self.unlabeled_data[inx_disagree]
                    if len(disagree_data) < int(self.noise_rate*len(L_X)):
                        noise_size = len(disagree_data)
                        noise_data = np.array(disagree_data)
                    else:
                        noise_size = int(self.noise_rate*len(L_X))
                        noise_data = self._bootstrap_sampling(disagree_data, size=noise_size)
                    noise_label = []
                    for i in range(noise_size):
                        tmp = random_state.rand()
                        if tmp > 0.5:
                            noise_label.append(1)
                        else:
                            noise_label.append(-1)
                    noise_label = np.array(noise_label)
                    e_pre[inx] = e
                    l_pre[inx] = len(L_X)
                    if len(noise_data) > 0:
                        L_X = np.concatenate((X, L_X, noise_data), axis=0)
                        L_y = np.concatenate((y, L_y, noise_label), axis=0)
                    else:
                        L_X = np.concatenate((X, L_X), axis=0)
                        L_y = np.concatenate((y, L_y), axis=0)
                    self.estimators_[inx].fit(L_X, L_y)
        # print("训练经过%s times" % cycles)

    def mahala_dist(self, X, y):
        """
        根据输入的有标签数据集计算无标签数据的评分
        """
        X_pos = X[y == 1, :]
        X_neg = X[y == -1, :]
        pos_ave = np.average(X_pos, axis=0)
        neg_ave = np.average(X_neg, axis=0)
        cov_mat = np.cov(X.T)
        cov_inv = pinv(cov_mat)
        score = []
        for line in self.unlabeled_data:
            pos_tmp = np.sqrt(np.dot((line-pos_ave).dot(cov_inv), (line-pos_ave).T))
            neg_tmp = np.sqrt(np.dot((line-neg_ave).dot(cov_inv), (line-neg_ave).T))
            score.append(np.abs(pos_tmp-neg_tmp))
        return np.array(score)


if __name__ == '__main__':
    # 读取数据集
    path = r'../data/breast-cancer.data/breast-cancer_%s_%s_%s.asc'
    train_data = load_txt_regex(path % ('train', 'data', 1))
    train_label = load_txt_regex(path % ('train', 'labels', 1)).flatten()
    labeled_data, unlabeled_data, labeled_label, unlabeled_label = \
        train_test_split(train_data, train_label, test_size=0.8, random_state=919)
    test_data = load_txt_regex(path % ('test', 'data', 1))
    test_label = load_txt_regex(path % ('test', 'labels', 1)).flatten()
    # test_data = np.concatenate((test_data, load_txt_regex(path % ('test', 'data', 2))), axis=0)
    # test_label = np.concatenate((load_txt_regex(path % ('test', 'labels', 1)),
    #                              load_txt_regex(path % ('test', 'labels', 2))), axis=0).flatten()

    estimator = SVC()
    # estimator.fit(train_data, train_label)
    # estimator_ar = estimator.score(test_data, test_label)
    # print("单个分类器在整个标签数据上准确率为%s" % estimator_ar)
    # estimator.fit(labeled_data, labeled_label)
    # estimator_ar = estimator.score(labeled_data, labeled_label)
    # print("单个分类器在有标签数据上准确率为%s" % estimator_ar)
    # bag = BaggingClassifier(estimator)
    # bag.fit(train_data, train_label)
    # bag_ar = bag.score(train_data, train_label)
    # print("Bag集成在所有训练数据上准确率为%s" % bag_ar)

    cycles = 1000

    # 测试不同noise_rate
    # ave_ars = []
    # ranges = [x*0.1 for x in range(1, 100)]
    # for noise_rate in ranges:
    #     tri_ars = []
    #     print("run for noise rate %s" % noise_rate)
    #     for i in range(cycles):
    #         try:
    #             # print("run %s times" % i)
    #             tri = DisturbedTriTrainingClassifier(estimator, unlabeled_data, noise_rate)
    #             tri.fit(labeled_data, labeled_label)
    #             tri_ar = tri.score(test_data, test_label)
    #             tri_ars.append(tri_ar)
    #         except ValueError as e:
    #             i -= 1
    #             continue
    #     # print("Tri-Training在部分标签数据上运行%s次最大，最小，平均准确率为%s %s %s"
    #     #       % (cycles, np.max(tri_ars), np.min(tri_ars), np.average(tri_ars)))
    #     ave_ars.append(np.average(tri_ars))
    # plt.figure()
    # plt.plot(ranges, ave_ars, '.k-')
    # plt.xlabel("Noise Rate")
    # plt.ylabel("Average Accuracy Rate")
    # plt.show()

    # 固定noise_rate
    # noise_rate = 0.1
    # tri_ars = []
    # for i in range(cycles):
    #     try:
    #         # print("run %s times" % i)
    #         tri = DisturbedTriTrainingClassifier(estimator, unlabeled_data, noise_rate)
    #         tri.fit(labeled_data, labeled_label)
    #         tri_ar = tri.score(test_data, test_label)
    #         tri_ars.append(tri_ar)
    #     except ValueError as e:
    #         i -= 1
    #         continue
    # print("Tri-Training在部分标签数据上运行%s次最大，最小，平均准确率为%s %s %s"
    #       % (cycles, np.max(tri_ars), np.min(tri_ars), np.average(tri_ars)))

    noise_rate = 0.1
    tri = DisturbedTriTrainingClassifier(estimator, unlabeled_data, unlabeled_label, noise_rate=noise_rate)
    tri.fit(labeled_data, labeled_label)
    tri_ar = tri.score(test_data, test_label)
    print("tri在有标签数据上准确率为%s" % tri_ar)
    print(tri.train_scores)

    # plt.figure()
    # plt.plot(range(len(tri.train_scores)), tri.train_scores, '.k-', label=u'AR')
    # plt.xlim(0, 5, 1)
    # plt.xlabel('Round')
    # plt.ylabel('Accuracy Rate')
    # plt.legend('upper left')
    # plt.show()


