# -*- coding: utf-8 -*-
"""
尝试引入聚类算法辅助标记无标签数据
尝试在选择高置信度数据时引入马氏距离
"""
import numpy as np
import matplotlib.pyplot as plt
from utils.SemiSupervisedClassifier import SemiSupervisedClassifier
from sklearn.model_selection import train_test_split
from sklearn.utils import check_random_state
from sklearn.svm import SVC
from numpy.linalg import pinv
from sklearn.cluster import MiniBatchKMeans, KMeans, Birch
from utils import gen_data


class DisturbedTriTrainingClassifier(SemiSupervisedClassifier):

    def __init__(self, base_estimator, unlabeled_data, unlabeled_label, noise_rate=0.1, random_state=None):
        super().__init__(base_estimator, unlabeled_data, unlabeled_label, random_state)
        self.noise_rate = noise_rate

    def correct_pred(self, X, y, n_clusters=10):
        km = Birch(n_clusters=n_clusters)
        cluster_pred = km.fit_predict(X)

        clusters = []       # 记录每个簇中对应的数据下标
        clu_pred = []       # 记录每个簇中数据对应的预测类别
        inx = [1] * len(y)
        for i in range(km.n_clusters):
            clusters.append([])
            clu_pred.append([])
        for i in range(len(cluster_pred)):
            clusters[cluster_pred[i]].append(i)
            clu_pred[cluster_pred[i]].append(y[i])
        labels = [1 if np.sum(i) > 0 else -1 for i in clu_pred]        # 记录每个簇对应的预测类别，按照多数投票
        for i in range(km.n_clusters):
            for j in range(len(clusters[i])):
                inx[clusters[i][j]] = labels[i]         # 同一个簇中所有数据具有相同的标签
        return np.array(inx)

    def _update_estimator(self, X, y):
        l_pre = [0] * 3
        e_pre = [0.5] * 3
        flags = [True] * 3
        cycles = 0
        mahala_score = self.mahala_dist(X, y)
        while np.sum(flags) > 0:
            self.train_scores.append(self.score(self.unlabeled_data, self.unlabeled_label))
            flags = [False] * 3
            for inx in range(3):
                L_X = []
                L_y = []
                # 计算（h2 & h3）误差e
                index = list(range(3))
                index.remove(inx)
                h2 = self.estimators_[index[0]].predict(X)
                h3 = self.estimators_[index[1]].predict(X)
                correct = (h2 == h3) & (h2 == y)
                e = 1 - np.sum(correct) / len(y)
                if e < e_pre[inx]:
                    # 从无标签数据中选取两个分类器相同标记的数据
                    h2_predict = self.estimators_[index[0]].predict(self.unlabeled_data)
                    h3_predict = self.estimators_[index[1]].predict(self.unlabeled_data)
                    inx_agree = h2_predict == h3_predict
                    cluster_inx = self.correct_pred(self.unlabeled_data, h2_predict)
                    correct_inx = cluster_inx & inx_agree
                    L_X = self.unlabeled_data[correct_inx]
                    L_y = h2_predict[correct_inx]
                    if l_pre[inx] == 0:
                        l_pre[inx] = np.floor((e+0.5*self.noise_rate)/(e_pre[inx]-e)+1)
                    if l_pre[inx] < len(L_X):
                        if (e+0.5*self.noise_rate)*len(L_X) < (e_pre[inx]+0.5*self.noise_rate)*l_pre[inx]\
                                and self.noise_rate*len(L_X) < len(X):
                            flags[inx] = True
                        elif l_pre[inx] > ((e+0.5*self.noise_rate)/(e_pre[inx]-e)):
                            size = np.ceil((e_pre[inx]+0.5*self.noise_rate)/(e+0.5*self.noise_rate)*l_pre[inx]-1)
                            # score = mahala_score[correct_inx]
                            # score_sorted = np.argsort(score)
                            # score_inx = score_sorted[-1:int(-1-size):-1]
                            # L_X = L_X[score_inx]
                            # L_y = L_y[score_inx]
                            L_X, L_y = self._bootstrap_sampling(L_X, L_y, size=int(size))
                            if self.noise_rate*len(L_X) < len(X):
                                flags[inx] = True
                if flags[inx]:
                    cycles += 1
                    random_state = check_random_state(self.random_state)
                    h2_predict = self.estimators_[index[0]].predict(self.unlabeled_data)
                    h3_predict = self.estimators_[index[1]].predict(self.unlabeled_data)
                    inx_disagree = h2_predict != h3_predict
                    disagree_data = self.unlabeled_data[inx_disagree]
                    if len(disagree_data) < int(self.noise_rate*len(L_X)):
                        noise_size = len(disagree_data)
                        noise_data = np.array(disagree_data)
                    else:
                        noise_size = int(self.noise_rate*len(L_X))
                        noise_data = self._bootstrap_sampling(disagree_data, size=noise_size)
                    noise_label = []
                    for i in range(noise_size):
                        tmp = random_state.rand()
                        if tmp > 0.5:
                            noise_label.append(1)
                        else:
                            noise_label.append(-1)
                    noise_label = np.array(noise_label)
                    e_pre[inx] = e
                    l_pre[inx] = len(L_X)
                    if len(noise_data) > 0:
                        L_X = np.concatenate((X, L_X, noise_data), axis=0)
                        L_y = np.concatenate((y, L_y, noise_label), axis=0)
                    else:
                        L_X = np.concatenate((X, L_X), axis=0)
                        L_y = np.concatenate((y, L_y), axis=0)
                    self.estimators_[inx].fit(L_X, L_y)
        # print("训练经过%s times" % cycles)

    def mahala_dist(self, X, y):
        """
        根据输入的有标签数据集计算无标签数据的评分
        """
        X_pos = X[y == 1, :]
        X_neg = X[y == -1, :]
        pos_ave = np.average(X_pos, axis=0)
        neg_ave = np.average(X_neg, axis=0)
        cov_mat = np.cov(X.T)
        cov_inv = pinv(cov_mat)
        score = []
        for line in self.unlabeled_data:
            pos_tmp = np.sqrt(np.dot((line-pos_ave).dot(cov_inv), (line-pos_ave).T))
            neg_tmp = np.sqrt(np.dot((line-neg_ave).dot(cov_inv), (line-neg_ave).T))
            score.append(np.abs(pos_tmp-neg_tmp))
        return np.array(score)


if __name__ == '__main__':
    path = r'../data/csv/'
    name = 'thyroid.csv'
    train_data, train_label, test_data, test_label, labeled_data, labeled_label, unlabeled_data, unlabeled_label \
        = gen_data(path+name, unlabeled_rate=0.8, random_state=919)

    estimator = SVC()
    scores = []

    for cycle in range(50):
        mcdtri = DisturbedTriTrainingClassifier(estimator, unlabeled_data, unlabeled_label, noise_rate=0.1)
        mcdtri.fit(labeled_data, labeled_label)
        scores.append(mcdtri.score(test_data, test_label))

    print(np.average(scores))

