# -*- coding: utf-8 -*-
"""
测试不同算法对集成效果的提升
"""
from utils import load_txt_regex, bootstrap_sampling
from TriTraining.DisturbedTriTrainingClassifier2 import DisturbedTriTrainingClassifier
from TriTraining.TriTrainingClassifier2 import TriTrainingClassifier
from CoTraining.SimpleCoTraining import SimpleCoTrainingClassifier
from SelfTraining.SelfTrainingClassifier1 import SelfTrainingClassifier1
from SelfTraining.SelfTrainingClassifier2 import SelfTrainingClassifier2
from sklearn.ensemble import BaggingClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import BernoulliNB
import matplotlib.pyplot as plt
from copy import deepcopy
from collections import defaultdict
import numpy as np
import os


# 读取数据集划分训练,测试,有标签,无标签数据
def gen_data(path, unlabeled_rate, random_state=919):
    data = np.loadtxt(path, delimiter=',')
    data_pos = data[data[:, 0] == 1.0]
    data_neg = data[data[:, 0] == -1.0]
    train_pos_data, test_pos_data, train_pos_label, test_pos_label = \
        train_test_split(data_pos[:, 1:], data_pos[:, 0], random_state=random_state)
    train_neg_data, test_neg_data, train_neg_label, test_neg_label = \
        train_test_split(data_neg[:, 1:], data_neg[:, 0], random_state=random_state)
    train_data = np.concatenate((train_pos_data, train_neg_data), axis=0)
    train_label = np.concatenate((train_pos_label, train_neg_label), axis=0)
    test_data = np.concatenate((test_pos_data, test_neg_data), axis=0)
    test_label = np.concatenate((test_pos_label, test_neg_label), axis=0)
    labeled_data, unlabeled_data, labeled_label, unlabeled_label = \
        train_test_split(train_data, train_label, test_size=unlabeled_rate, random_state=random_state)
    return train_data, train_label, test_data, test_label, labeled_data, labeled_label, unlabeled_data, unlabeled_label


# 测试
def test_noise_rate(path, unlabeled_rate=0.8, estimator=SVC(), noise_rate=0.1, random_state=919):
    train_data, train_label, test_data, test_label, labeled_data, labeled_label, unlabeled_data, unlabeled_label = \
        gen_data(path, unlabeled_rate, random_state=random_state)
    bag = BaggingClassifier(estimator)
    tri = TriTrainingClassifier(estimator, unlabeled_data, unlabeled_label)
    dtri = DisturbedTriTrainingClassifier(estimator, unlabeled_data, unlabeled_label, noise_rate=noise_rate)
    co = SimpleCoTrainingClassifier(estimator, unlabeled_data, unlabeled_label)
    st1 = SelfTrainingClassifier1(estimator, unlabeled_data, unlabeled_label)
    st2 = SelfTrainingClassifier2(estimator, unlabeled_data, unlabeled_label)
    semi_als = {
        'tri': tri,
        'dtri': dtri,
        'co': co,
        'st1': st1,
        'st2': st2,
    }
    # 记录在完整训练集和训练集上的准确率
    train_scores = defaultdict(list)
    test_scores = defaultdict(list)
    cycles = 20
    for cycle in range(cycles):
        # print('%s times' % cycle)
        # 生成3个分类器，并且这些半监督算法使用相同的初始分类器
        estimators = []
        clazz = getattr(estimator, '__class__')
        params = estimator.get_params()
        for i in range(3):
            estimator = clazz(**params)
            samples, labels = bootstrap_sampling(labeled_data, labeled_label)
            estimator.fit(samples, labels)
            estimators.append(estimator)
        for k, v in semi_als.items():
            if k != 'co':
                v.estimators_ = deepcopy(estimators)

        # 训练
        estimator.fit(train_data, train_label)
        bag.fit(train_data, train_label)
        [x.fit(labeled_data, labeled_label) for x in semi_als.values()]

        # 测试在测试集和完整训练集上的准确度
        test_score = {x: semi_als[x].score(test_data, test_label) for x in semi_als.keys()}
        test_score['bag'] = bag.score(test_data, test_label)
        test_score['single'] = estimator.score(test_data, test_label)
        train_score = {x: semi_als[x].score(train_data, train_label) for x in semi_als.keys()}
        train_score['bag'] = bag.score(train_data, train_label)
        train_score['single'] = estimator.score(train_data, train_label)
        # print(train_score)
        # print(test_score)
        for k in train_score.keys():
            train_scores[k].append(train_score[k])
            test_scores[k].append(test_score[k])

    for k in train_scores.keys():
        print('%s: %.6f +/- %0.6f, %.6f +/- %0.6f' %
              (k, np.average(train_scores[k]), np.std(train_scores[k]),
               np.average(test_scores[k]), np.std(test_scores[k])))


if __name__ == '__main__':
    path = r'../data/csv/'    # 数据集目录
    name = 'twonorm.csv'                 # 数据集名称
    estimator = SVC()
    unlabeled_rate = 0.8
    noise_rate = 0.1

    print('data set: %s, unlabeled rate: %s, noise rate: %s' % (name, unlabeled_rate, noise_rate))
    test_noise_rate(path + name, unlabeled_rate, estimator, noise_rate)

    # 读取数据集
    # path = r'../data/breast-cancer.data/breast-cancer_%s_%s_%s.asc'
    # train_data = load_txt_regex(path % ('train', 'data', 1))
    # train_label = load_txt_regex(path % ('train', 'labels', 1)).flatten()
    # labeled_data, unlabeled_data, labeled_label, unlabeled_label = \
    #     train_test_split(train_data, train_label, test_size=0.8, random_state=919)
    # test_data = load_txt_regex(path % ('test', 'data', 1))
    # test_label = load_txt_regex(path % ('test', 'labels', 1)).flatten()
    # test_data = np.concatenate((test_data, load_txt_regex(path % ('test', 'data', 2))), axis=0)
    # test_label = np.concatenate((load_txt_regex(path % ('test', 'labels', 1)),
    #                              load_txt_regex(path % ('test', 'labels', 2))), axis=0).flatten()

    # 读取CSV数据集
    # path = r'../data/csv/%s.csv'    # 数据集目录
    # name = 'banana'                 # 数据集名称
    # unlabeled_rate = 0.8            # 无标签比例
    # estimator = SVC()               # 选择基分类器
    # noise_rate = 0.2                # 噪声比例
    # data = np.loadtxt(path % name, delimiter=',')
    # data_pos = data[data[:, 0] == 1.0]
    # data_neg = data[data[:, 0] == -1.0]
    # train_pos_data, test_pos_data, train_pos_label, test_pos_label = \
    #     train_test_split(data_pos[:, 1:], data_pos[:, 0], random_state=919)
    # train_neg_data, test_neg_data, train_neg_label, test_neg_label = \
    #     train_test_split(data_neg[:, 1:], data_neg[:, 0], random_state=919)
    # train_data = np.concatenate((train_pos_data, train_neg_data), axis=0)
    # train_label = np.concatenate((train_pos_label, train_neg_label), axis=0)
    # test_data = np.concatenate((test_pos_data, test_neg_data), axis=0)
    # test_label = np.concatenate((test_pos_label, test_neg_label), axis=0)
    # labeled_data, unlabeled_data, labeled_label, unlabeled_label = \
    #     train_test_split(train_data, train_label, test_size=unlabeled_rate, random_state=919)
    #
    # # 初始化算法
    # bag = BaggingClassifier(estimator)
    # tri = TriTrainingClassifier(estimator, unlabeled_data, unlabeled_label)
    # dtri = DisturbedTriTrainingClassifier(estimator, unlabeled_data, unlabeled_label, noise_rate=noise_rate)
    # co = SimpleCoTrainingClassifier(estimator, unlabeled_data, unlabeled_label)
    # st1 = SelfTrainingClassifier1(estimator, unlabeled_data, unlabeled_label)
    # st2 = SelfTrainingClassifier2(estimator, unlabeled_data, unlabeled_label)
    # semi_als = {
    #     'tri': tri,
    #     'dtri': dtri,
    #     'co': co,
    #     'st1': st1,
    #     'st2': st2,
    # }
    #
    # # 记录在完整训练集和训练集上的准确率
    # train_scores = defaultdict(list)
    # test_scores = defaultdict(list)
    # cycles = 50
    #
    # for cycle in range(cycles):
    #     print('%s times' % cycle)
    #     # 生成3个分类器，并且这些半监督算法使用相同的初始分类器
    #     estimators = []
    #     clazz = getattr(estimator, '__class__')
    #     params = estimator.get_params()
    #     for i in range(3):
    #         estimator = clazz(**params)
    #         samples, labels = bootstrap_sampling(labeled_data, labeled_label)
    #         estimator.fit(samples, labels)
    #         estimators.append(estimator)
    #     for k, v in semi_als.items():
    #         if k != 'co':
    #             v.estimators_ = deepcopy(estimators)
    #
    #     # 训练
    #     estimator.fit(train_data, train_label)
    #     bag.fit(train_data, train_label)
    #     [x.fit(labeled_data, labeled_label) for x in semi_als.values()]
    #
    #     # 测试在测试集和完整训练集上的准确度
    #     test_score = {x: semi_als[x].score(test_data, test_label) for x in semi_als.keys()}
    #     test_score['bag'] = bag.score(test_data, test_label)
    #     test_score['single'] = estimator.score(test_data, test_label)
    #     train_score = {x: semi_als[x].score(train_data, train_label) for x in semi_als.keys()}
    #     train_score['bag'] = bag.score(train_data, train_label)
    #     train_score['single'] = estimator.score(train_data, train_label)
    #     # print(train_score)
    #     # print(test_score)
    #     for k in train_score.keys():
    #         train_scores[k].append(train_score[k])
    #         test_scores[k].append(test_score[k])
    #
    # for k in train_scores.keys():
    #     print('%s: %.6f +/- %0.6f, %.6f +/- %0.6f' %
    #           (k, np.average(train_scores[k]), np.std(train_scores[k]), np.average(test_scores[k]), np.std(test_scores[k])))

    # 绘图
    # symbols = {
    #     'tri': 'pb-',
    #     'dtri': '+r-',
    #     'co': '^k-',
    #     'st1': 'ok-',
    #     'st2': 'sk-',
    # }
    # fig = plt.figure()
    # for k, v in train_scores.items():
    #     plt.plot(range(len(v)), v, symbols[k], label=k)
    # plt.legend(loc='upper right')
    # plt.xlim(0, 10, 1)
    # plt.show()
