import unittest
import ssl
import numpy as np
from matplotlib import pyplot as plot
import mglearn

from sklearn.model_selection import train_test_split

from sklearn.svm import SVC
from sklearn.decomposition import PCA, NMF
from sklearn.neighbors import KNeighborsClassifier as KNC
from sklearn.manifold import TSNE
from sklearn.cluster import KMeans, AgglomerativeClustering as Agg, DBSCAN

from sklearn import datasets
from sklearn.preprocessing import MinMaxScaler, StandardScaler

from sklearn.metrics.cluster import adjusted_rand_score


ssl._create_default_https_context = ssl._create_unverified_context


class Base(unittest.TestCase):
    def __init__(self, *args, **kwargs):
        super(Base, self).__init__(*args, **kwargs)
        self.cancer = datasets.load_breast_cancer()
        self.blob = datasets.make_blobs(n_samples=50, centers=5, random_state=4, cluster_std=2)
        self.faces = datasets.fetch_lfw_people(min_faces_per_person=20, resize=0.7)
        self.signals = mglearn.datasets.make_signals()
        self.digits = datasets.load_digits()
        self.moons = datasets.make_moons(n_samples=200, noise=0.05, random_state=0)

    def get_train_test_50_faces(self):
        mask = np.zeros(self.faces.target.shape, dtype=np.bool_)
        for target in np.unique(self.faces.target):
            mask[np.where(self.faces.target == target)[0][:50]] = 1
        return self.faces.data[mask] / 255, self.faces.target[mask]


class TestTransform(Base):
    def test_scale_cancer_minmax(self):
        xtr, xte, ytr, yte = train_test_split(self.cancer.data, self.cancer.target, random_state=1)
        print(f'origin cancer data and target shape: {self.cancer.data.shape}, {self.cancer.target.shape}')
        scaler = MinMaxScaler().fit(xtr)
        xtr_scaled, xte_scaled = scaler.transform(xtr), scaler.transform(xte)
        print(f'transformed shape: {xtr_scaled.shape}')
        print(f'feature min:{xtr.min(axis=0)}\nmax:{xtr.max(axis=0)},\nfeature scaled min:{xtr_scaled.min(axis=0)}\nscaled max:{xtr_scaled.max(axis=0)}')

    def test_scale_blob_minmax(self):
        xtr, xte, _, _ = train_test_split(*self.blob, random_state=5, test_size=.1)
        fig, axes = plot.subplots(1, 3, figsize=(13, 4))
        axes[0].scatter(xtr[:, 0], xtr[:, 1], c=mglearn.cm2(0), label='train set', s=60)
        axes[0].scatter(xte[:, 0], xte[:, 1], c=mglearn.cm2(0), marker='^', label='test set', s=60)
        axes[0].legend(loc='upper left')
        axes[0].set_title('origin data')

        scaler = MinMaxScaler().fit(xtr)    # data = Scaler().fit_transform(input)
        xtr_scaled, xte_scaled = scaler.transform(xtr), scaler.transform(xte)
        axes[1].scatter(xtr_scaled[:, 0], xtr_scaled[:, 1], c=mglearn.cm2(0), label='train set', s=60)
        axes[1].scatter(xte_scaled[:, 0], xte_scaled[:, 1], c=mglearn.cm2(0), label='test set', s=60, marker='^')
        axes[1].set_title('scaled data')

        te_scaler = MinMaxScaler().fit(xte)
        xte_scaled_bad = te_scaler.transform(xte)
        axes[2].scatter(xtr_scaled[:, 0], xtr_scaled[:, 1], c=mglearn.cm2(0), label='train set', s=60)
        axes[2].scatter(xte_scaled_bad[:, 0], xte_scaled_bad[:, 1], c=mglearn.cm2(0), label='test set', s=60, marker='^')
        axes[2].set_title('improperly scaled test data')

        for ax in axes:
            ax.set_xlabel('feature 0')
            ax.set_ylabel('feature 1')
        plot.show()

    def test_predict_cancer_scaled(self):
        xtr, xte, ytr, yte = train_test_split(self.cancer.data, self.cancer.target, random_state=0)
        svm = SVC(C=100).fit(xtr, ytr)
        print(f'SVC accuracy: {svm.score(xte, yte)}')

        scaler = MinMaxScaler().fit(xtr)
        xtr_scaled, xte_scaled = scaler.transform(xtr), scaler.transform(xte)
        svm.fit(xtr_scaled, ytr)
        print(f'SVC scaled accuracy: {svm.score(xte_scaled, yte)}')


class TestPCA(Base):
    def test_PCA_sample(self):
        mglearn.plots.plot_pca_illustration()
        plot.tight_layout()
        plot.show()

    def test_plot_cancer_hist_(self):
        fig, axes = plot.subplots(6, 5, figsize=(20, 10))
        malignant, benign = self.cancer.data[self.cancer.target==0], self.cancer.data[self.cancer.target==1]
        ax = axes.ravel()
        for i in range(30):
            _, bins = np.histogram(self.cancer.data[:, i], bins=50)
            ax[i].hist(malignant[:, i], bins=bins, color=mglearn.cm3(0), alpha=.5)
            ax[i].hist(benign[:, i], bins=bins, color=mglearn.cm3(2), alpha=.5)
            ax[i].set_title(self.cancer.feature_names[i])
            # ax[i].set_yticks(())
        #ax[0].set_xlabel('feature magnitude')
        #ax[0].set_ylabel('frequency')
        ax[0].legend(['magnitude', 'benign'], loc='best')
        fig.tight_layout()
        plot.show()

    def test_pca_cancer(self):
        scaler = StandardScaler().fit(self.cancer.data)
        x_scaled = scaler.transform(self.cancer.data)
        pca = PCA(n_components=2).fit(x_scaled)   # keep 2 pca in 30 features
        x_pca = pca.transform(x_scaled)
        print(f'scaled x shape:{x_scaled.shape}, pca x shape:{x_pca.shape}')
        plot.figure(figsize=(8, 8))
        mglearn.discrete_scatter(x_pca[:, 0], x_pca[:, 1], self.cancer.target)
        plot.legend(self.cancer.target_names, loc='best')
        plot.gca().set_aspect('equal')
        plot.xlabel('first principal component')
        plot.ylabel('second principal component')
        plot.show()

    def test_show_faces_sample(self):
        fix, axes = plot.subplots(2, 5, figsize=(15, 8), subplot_kw={'xticks': (), 'yticks': ()})
        for target, image, ax in zip(self.faces.target, self.faces.images, axes.ravel()):
            ax.imshow(image)
            ax.set_title(self.faces.target_names[target])
        print(f'faces shape: {self.faces.images.shape}, number of classes: {len(self.faces.target_names)}, faces target shape: {self.faces.target.shape}')
        plot.show()

    def test_predict_faces(self):
        x_faces, y_faces = self.get_train_test_50_faces()
        xtr, xte, ytr, yte = train_test_split(x_faces, y_faces, stratify=y_faces, random_state=0)
        knn = KNC(n_neighbors=1).fit(xtr, ytr)
        print(f'test 1-nn score: {knn.score(xte, yte)}')
        pca = PCA(n_components=100, whiten=True, random_state=0).fit(xtr)
        xtr_pca, xte_pca = pca.transform(xtr), pca.transform(xte)
        knn_pca = KNC(n_neighbors=1).fit(xtr_pca, ytr)
        print(f'test 1-nn pca score: {knn_pca.score(xte_pca, yte)}')

    def test_plot_pca_component(self):
        x_faces, y_faces = self.get_train_test_50_faces()
        xtr, xte, ytr, yte = train_test_split(x_faces, y_faces, stratify=y_faces, random_state=0)
        pca = PCA(n_components=100, whiten=True, random_state=0).fit(xtr)
        fix, axes = plot.subplots(3, 5, figsize=(15, 12), subplot_kw={'xticks': (), 'yticks': ()})
        for i, (component, ax) in enumerate(zip(pca.components_, axes.ravel())):
            ax.imshow(component.reshape(self.faces.images[0].shape), cmap='viridis')    # viridis:翠绿色
            ax.set_title(f'{i + 1} component')
        plot.show()


class TestNMFSNE(Base):
    def test_plot_extract_15_components(self):
        x_faces, y_faces = self.get_train_test_50_faces()
        xtr, xte, ytr, yte = train_test_split(x_faces, y_faces, stratify=y_faces, random_state=0)
        nmf = NMF(n_components=15, random_state=0).fit(xtr)
        xtr_nmf, xte_nmf = nmf.transform(xtr), nmf.transform(xte)
        fix, axes = plot.subplots(3, 5, figsize=(15, 12), subplot_kw={'xticks':(), 'yticks':()})
        for i, (component, ax) in enumerate(zip(nmf.components_, axes.ravel())):
            ax.imshow(component.reshape(self.faces.images[0].shape))
            ax.set_title(f'{i} component')
        plot.show()

    def test_plot_2_main_nmf_components(self):
        x_faces, y_faces = self.get_train_test_50_faces()
        xtr, xte, ytr, yte = train_test_split(x_faces, y_faces, stratify=y_faces, random_state=0)
        nmf = NMF(n_components=15, random_state=0).fit(xtr)
        xtr_nmf, xte_nmf = nmf.transform(xtr), nmf.transform(xte)
        print(f'xtr_nmf shape: {xtr_nmf.shape}')
        for comp in (4, 7):
            fig, axes = plot.subplots(2, 5, figsize=(15, 8), subplot_kw={'xticks':(),'yticks':()})
            inds = np.argsort(xtr_nmf[:, comp])[::-1]
            for i, (ind, ax) in enumerate(zip(inds, axes.ravel())):
                ax.imshow(xtr[ind].reshape(self.faces.images[0].shape))
        plot.show()

    def test_plot_signal(self):
        fig, axes = plot.subplots(5, figsize=(8, 4), gridspec_kw={'hspace': .5}, subplot_kw={'xticks':(), 'yticks':()})
        axes[0].plot(self.signals, '-')
        axes[0].set_title('origin signal data')
        A = np.random.RandomState(0).uniform(size=(100, 3))
        X = np.dot(self.signals, A.T)   # matrix multiply 2000-3 x 3-100 = 2000-100
        print(f'shape of measurements: {X.shape}')
        nmf = NMF(n_components=3, random_state=42)
        S_ = nmf.fit_transform(X)
        print(f'recovered signal shape: {S_.shape}')
        H = PCA(n_components=3).fit_transform(X)
        models = [X, self.signals, S_, H]
        names = ["Observations(first 3 measurements)","True sources", "NMF recovered", "PCA recovered"]
        for model, name, ax in zip(models, names, axes[1:]):
            ax.set_title(name)
            ax.plot(model[:, :3], '-')
        plot.show()

    def test_plot_tSNE_and_pca(self):
        fig, axes = plot.subplots(2, 5, figsize=(10, 5), subplot_kw={'xticks':(),'yticks':()})
        for ax, img in zip(axes.ravel(), self.digits.images):
            ax.imshow(img)
        print(f'digits data shape: {self.digits.data.shape}')
        pca = PCA(n_components=2).fit(self.digits.data)
        digits_pca = pca.transform(self.digits.data)
        colors = ["#476A2A", "#7851B8", "#BD3430", "#4A2D4E", "#875525",
                  "#A83683", "#4E655E", "#853541", "#3A3120", "#535D8E"]
        plot.figure(figsize=(10, 10))
        plot.xlim(digits_pca[:, 0].min(), digits_pca[:, 0].max())
        plot.ylim(digits_pca[:, 1].min(), digits_pca[:, 1].max())
        for i in range(len(self.digits.data)):
            plot.text(digits_pca[i, 0], digits_pca[i, 1], str(self.digits.target[i]), color=colors[self.digits.target[i]], fontdict={'weight': 'bold', 'size': 9})
        plot.xlabel('first principal component')
        plot.ylabel('second principal component')

        digits_sne = TSNE(random_state=42).fit_transform(self.digits.data)
        plot.figure(figsize=(10, 10))
        plot.xlim(digits_sne[:, 0].min(), digits_sne[:, 0].max())
        plot.ylim(digits_sne[:, 1].min(), digits_sne[:, 1].max())
        for i in range(len(self.digits.data)):
            plot.text(digits_sne[i, 0], digits_sne[i, 1], str(self.digits.target[i]), color=colors[self.digits.target[i]], fontdict={'weight': 'bold', 'size': 9})
        plot.xlabel('t-SNE feature 0')
        plot.ylabel('t-SNE feature 1')
        plot.show()


class TestCluster(Base):
    def test_plot_normal_cluster(self):
        kmeans = KMeans(n_clusters=3).fit(self.blob[0])  # cluster label can be found by kmeans.labels_
        mglearn.discrete_scatter(self.blob[0][:, 0], self.blob[0][:, 1], kmeans.labels_, markers='o')
        mglearn.discrete_scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], [0, 1, 2], markers='^', markeredgewidth=2)
        plot.show()

    def test_plot_abnormal_cluster(self):
        kmeans_2, kmeans_5 = KMeans(n_clusters=2).fit(self.blob[0]), KMeans(n_clusters=5).fit(self.blob[0])
        fig, axes = plot.subplots(1, 2, figsize=(10, 5))
        mglearn.discrete_scatter(self.blob[0][:, 0], self.blob[0][:, 1], kmeans_2.labels_, ax=axes[0])
        mglearn.discrete_scatter(self.blob[0][:, 0], self.blob[0][:, 1], kmeans_5.labels_, ax=axes[1])
        plot.show()

    def test_plot_faces_pca_nmf_kmean(self):
        x_faces, y_faces = self.get_train_test_50_faces()
        xtr, xte, ytr, yte = train_test_split(x_faces, y_faces, stratify=y_faces, random_state=0)
        nmf, pca, kmean = NMF(n_components=100, random_state=0).fit(xtr), PCA(n_components=100, random_state=0).fit(xtr), KMeans(n_clusters=100, random_state=0).fit(xtr)
        x_nmf = np.dot(nmf.transform(xte), nmf.components_)
        x_pca = pca.inverse_transform(pca.transform(xte))
        x_kmean = kmean.cluster_centers_[kmean.predict(xte)]

        image_shape = self.faces.images[0].shape
        fig, axes = plot.subplots(3, 5, figsize=(8, 8), subplot_kw={'xticks':(), 'yticks':()})
        fig.suptitle('extracted components')
        for ax, comp_kmean, comp_pca, comp_nmf in zip(axes.T, kmean.cluster_centers_, pca.components_, nmf.components_):
            ax[0].imshow(comp_kmean.reshape(image_shape))
            ax[1].imshow(comp_pca.reshape(image_shape), cmap='viridis')
            ax[2].imshow(comp_nmf.reshape(image_shape))
        axes[0, 0].set_ylabel('kmean')
        axes[1, 0].set_ylabel('pca')
        axes[2, 0].set_ylabel('nmf')
        fig, axes = plot.subplots(4, 5, subplot_kw={'xticks':(), 'yticks':()}, figsize=(8, 8))
        fig.suptitle('reconstructions')
        for ax, orig, rec_kmean, rec_pca, rec_nmf in zip(axes.T, xte, x_kmean, x_pca, x_nmf):
            ax[0].imshow(orig.reshape(image_shape))
            ax[1].imshow(rec_kmean.reshape(image_shape))
            ax[2].imshow(rec_pca.reshape(image_shape))
            ax[3].imshow(rec_nmf.reshape(image_shape))
        axes[0, 0].set_ylabel('origin')
        axes[1, 0].set_ylabel('kmean')
        axes[2, 0].set_ylabel('pca')
        axes[3, 0].set_ylabel('nmf')
        plot.show()

    def test_predict_accuracy(self):
        scaler = StandardScaler().fit(self.moons[0])
        x_scale = scaler.transform(self.moons[0])
        fig, axes = plot.subplots(1, 4, figsize=(15, 3), subplot_kw={'xticks':(),'yticks':()})
        algorithms = [KMeans(n_clusters=2), Agg(n_clusters=2), DBSCAN()]
        random_clusters = np.random.RandomState(seed=0)
        random_clusters = random_clusters.randint(low=0, high=2, size=len(self.moons[0]))
        axes[0].scatter(x_scale[:, 0], x_scale[:, 1], c=random_clusters, cmap=mglearn.cm3, s=60)
        axes[0].set_title(f'random assignment ARI: {adjusted_rand_score(self.moons[1], random_clusters):.2f}')
        for ax, algorithm in zip(axes[1:], algorithms):
            clusters = algorithm.fit_predict(x_scale)
            ax.scatter(x_scale[:, 0], x_scale[:, 1], c=clusters, cmap=mglearn.cm3, s=60)
            ax.set_title(f'{algorithm.__class__.__name__} - ARI: {adjusted_rand_score(self.moons[1], clusters):.2f}')
        plot.tight_layout()
        plot.show()
