# -*- coding: utf-8 -*-
"""
简单的Co-Training的实现
两个视图由属性随机分配而成
每轮循环选择的无标签数据数量有百分比决定
"""
import numpy as np
from utils.SemiSupervisedClassifier import SemiSupervisedClassifier
from sklearn.model_selection import train_test_split
from utils import load_txt_regex
from sklearn.svm import SVC


class SimpleCoTrainingClassifier(SemiSupervisedClassifier):

    def __init__(self, base_estimator, unlabeled_data, unlabeled_label, cycles=10, random_state=None):
        super().__init__(base_estimator, unlabeled_data, unlabeled_label, random_state)
        self.n_estimator = 2
        self.cycles = cycles

    def init_estimators(self, X, y):
        feat_index = self.choose_views(X)
        self.feat_index = feat_index
        views = self.split_views(X, feat_index)
        self.estimators_ = []
        clazz = getattr(self.base_estimator, '__class__')
        params = self.base_estimator.get_params()
        for i in range(2):
            estimator = clazz(**params)
            estimator.fit(views[i], y)
            self.estimators_.append(estimator)

    def _update_estimator(self, X, y):
        unlabeled_views = self.split_views(self.unlabeled_data, self.feat_index)
        for i in range(self.cycles):
            # 保存每一轮训练时在训练集上的准确度
            self.train_scores.append(self.score(self.unlabeled_data, self.unlabeled_label))
            for j in range(2):
                k = (j+1) % 2
                h_predictions = self.estimators_[k].predict(unlabeled_views[k])
                # h_X, _, h_y, _ = train_test_split(self.unlabeled_data, h_predictions)
                h_X = self.unlabeled_data
                h_y = h_predictions
                h_X = np.concatenate((X, h_X), axis=0)
                h_y = np.concatenate((y, h_y), axis=0)
                views_X = self.split_views(h_X, self.feat_index)
                self.estimators_[j].fit(views_X[j], h_y)

    def predict(self, X):
        views = self.split_views(X, self.feat_index)
        predictions = [self.estimators_[i].predict(views[i]) for i in range(len(self.estimators_))]
        predictions = np.array(predictions)
        vote_predictions = np.sum(predictions, axis=0)
        return np.where(vote_predictions > 0, 1, -1)

    @staticmethod
    def choose_views(X):
        """
        简单的根据属性数量，随机划分为两个等大的属性集合
        Params
        -------
            X: 数据矩阵2-D
        Return
        -------
            返回划分的两个属性下标集合
        """
        dim = X.shape[1]
        feat_index = train_test_split(range(dim), test_size=0.5)
        return [sorted(x) for x in feat_index]

    @staticmethod
    def split_views(X, index):
        """
        根据属性索引集合，返回对应的视图数据
        Params
        -------
            X: 数据矩阵2-D
        Return
        -------
            返回对应的视图数据
        """
        views = []
        for feats in index:
            views.append(X[:, feats])
        return views


if __name__ == '__main__':
    # 读取数据集
    path = r'../data/breast-cancer.data/breast-cancer_%s_%s_%s.asc'
    train_data = load_txt_regex(path % ('train', 'data', 1))
    train_label = load_txt_regex(path % ('train', 'labels', 1)).flatten()
    labeled_data, unlabeled_data, labeled_label, unlabeled_label = \
        train_test_split(train_data, train_label, test_size=0.8, random_state=919)
    test_data = load_txt_regex(path % ('test', 'data', 1))
    test_label = load_txt_regex(path % ('test', 'labels', 1)).flatten()
    # test_data = np.concatenate((test_data, load_txt_regex(path % ('test', 'data', 2))), axis=0)
    # test_label = np.concatenate((load_txt_regex(path % ('test', 'labels', 1)),
    #                              load_txt_regex(path % ('test', 'labels', 2))), axis=0).flatten()

    estimator = SVC()
    estimator.fit(train_data, train_label)
    estimator_ar = estimator.score(test_data, test_label)
    print("单个分类器在整个标签数据上准确率为%s" % estimator_ar)
    estimator.fit(labeled_data, labeled_label)
    estimator_ar = estimator.score(test_data, test_label)
    print("单个分类器在部分标签数据上准确率为%s" % estimator_ar)

    # cycles = 100
    # tri_ars = []
    # for i in range(cycles):
    #     try:
    #         print("run %s times" % i)
    #         tri = SimpleCoTrainingClassifier(estimator, unlabeled_data)
    #         tri.fit(labeled_data, labeled_label)
    #         tri_ar = tri.score(test_data, test_label)
    #         tri_ars.append(tri_ar)
    #     except ValueError as e:
    #         i -= 1
    #         continue
    # print("Co-Training在部分标签数据上运行%s次最大，最小，平均准确率为%s %s %s"
    #       % (cycles, np.max(tri_ars), np.min(tri_ars), np.average(tri_ars)))

    co = SimpleCoTrainingClassifier(estimator, unlabeled_data, unlabeled_label)
    co.fit(labeled_data, labeled_label)
    prediction = co.predict(test_data)
    print(co.score(test_data, test_label))
    print(co.train_scores[-1]-co.train_scores[0])
    print(co.train_scores)