"""
# k-均值特征生成器
"""

import numpy as np
from scipy import sparse
from sklearn.cluster import KMeans

from scipy.spatial import Voronoi, voronoi_plot_2d
from sklearn.datasets import make_moons
import matplotlib.pyplot as plt

from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier

from sklearn.metrics import roc_curve


class KMeansFeaturizer:
    """
    The transformer runs k-means on the input data and converts each data point into the
    ID of the closest cluster. If a target variable is present, it is scaled and included as
    input to k-means in order to derive cluster that obey the classification boundary as well
    as group similar points together.
    """

    def __init__(self, k=100, target_scale=5.0, random_state=None):
        self.k = k
        self.target_scale = target_scale
        self.random_state = random_state

    def fit(self, X, y=None):
        """Run k-means on the input data and finds centroids"""
        if y is None:
            # 没有目标变量，就执行普通k-均值算法
            km_model = KMeans(n_clusters=self.k,
                              n_init=20,
                              random_state=self.random_state)
            km_model.fit(X)

            self.km_model = km_model
            self.cluster_centers_ = km_model.cluster_centers_
            return self
        # 有目标信息，使用恰当的缩放
        # 并将其包含在k-均值算法的输入数据中
        data_with_target = np.hstack((X, y[:, np.newaxis] * self.target_scale))

        # 在数据和目标变量上建立一个预训练k-均值模型
        km_model_pretrain = KMeans(n_clusters=self.k,
                                   n_init=20,
                                   random_state=self.random_state)
        km_model_pretrain.fit(data_with_target)

        # 第二次运行k-均值算法，得到不带目标变量信息的初始空间的簇
        # 使用在预训练中得到的中心点进行初始化
        # 执行一次迭代，进行簇分配和中心点重计算
        km_model = KMeans(n_clusters=self.k,
                          init=km_model_pretrain.cluster_centers_[:, :2],
                          n_init=1,
                          max_iter=1)
        km_model.fit(X)

        self.km_model = km_model
        self.cluster_centers_ = km_model.cluster_centers_
        return self

    def transform(self, X, y=None):
        """Outputs the closest cluster ID for each input data point."""
        cluster = self.km_model.predict(X)
        return cluster[:, np.newaxis]

    def fit_transform(self, X, y=None):
        self.fit(X, y)
        return self.transform(X, y)


# 使用目标提示和不使用目标提示的k-均值特征化
training_data, training_labels = make_moons(n_samples=2000, noise=0.2)
kmf_hint = KMeansFeaturizer(k=100, target_scale=10).fit(training_data, training_labels)
kmf_no_hint = KMeansFeaturizer(k=100, target_scale=0).fit(training_data, training_labels)

# with plt.style.context(['science', 'scatter']):
#     fig, ax = plt.subplots(2, 1, figsize=(10, 5))
#     axes = ax.flatten()
#
#     axes[0].scatter(training_data[:, 0], training_data[:, 1], c=training_labels, cmap='Set1', alpha=0.2)
#     axes[0].set_title('k-means with target variable')
#     vor = Voronoi(kmf_hint.cluster_centers_)
#     voronoi_plot_2d(vor, ax=axes[0], show_points=False, alpha=0.5)
#
#     axes[1].scatter(training_data[:, 0], training_data[:, 1], c=training_labels, cmap='Set1', alpha=0.2)
#     axes[1].set_title('k-means without target variable')
#     vor = Voronoi(kmf_no_hint.cluster_centers_)
#     voronoi_plot_2d(vor, ax=axes[1], show_points=False, alpha=0.5)
#
#     fig.savefig('可视化/k-均值特征生成器.png')
#     fig.show()

# 适用于训练数据相同的分布生成一些测试数据
test_data, test_labels = make_moons(n_samples=2000, noise=0.3)

# 使用k-均值特征生成器生成簇特征
training_cluster_features = kmf_hint.transform(training_data)
test_cluster_features = kmf_hint.transform(test_data)

# 使用簇特征构造新的输入特征
training_with_cluster = np.column_stack((training_data, training_cluster_features))
test_with_cluster = np.column_stack((test_data, test_cluster_features))

# 建立分类器
clsr_names = ['LR',
              'kNN',
              'RBF SVM',
              'Random Forest',
              'Boosted Trees']
clsr = [LogisticRegression(random_state=kmf_hint.random_state),
        KNeighborsClassifier(5),
        SVC(gamma=2, C=1),
        RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
        GradientBoostingClassifier(n_estimators=10, learning_rate=1.0, max_depth=5)]

with plt.style.context(['science', 'grid']):
    fig, ax = plt.subplots()
    for i, model in enumerate(clsr):
        model.fit(training_data, training_labels)
        # 使用ROC评价分类器性能的辅助函数
        if hasattr(model, 'decision_function'):
            predictions = model.decision_function(test_data)
        else:
            predictions = model.predict_proba(test_data)[:, 1]
        fpr, tpr, _ = roc_curve(test_labels, predictions)
        # 绘制结果
        ax.plot(fpr, tpr, label=clsr_names[i])

    ax.plot([0, 1], [0, 1], 'k--')
    ax.legend()
    fig.savefig('可视化/在人造双月数据集上k-means+逻辑回归与其他非线性分类器及普通逻辑回归的ROC曲线对比.png')
    fig.show()
