from sklearn import datasets
from sklearn.model_selection import train_test_split
from matplotlib import pyplot as plot
from mpl_toolkits.mplot3d import Axes3D, axes3d
import mglearn
import numpy as np
import graphviz
import unittest

from sklearn.neighbors import KNeighborsClassifier as KNC, KNeighborsRegressor as KNR
from sklearn.linear_model import LinearRegression as LinearReg
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.linear_model import LogisticRegression as LogisticReg
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier as DTC
from sklearn.tree import export_graphviz
from sklearn.ensemble import RandomForestClassifier as RFC
from sklearn.ensemble import GradientBoostingClassifier as GBC
from sklearn.neural_network import MLPClassifier as MLP



class DataBase(unittest.TestCase):
    def __init__(self, *args, **kwargs):
        super(DataBase, self).__init__(*args, **kwargs)
        self.forge = mglearn.datasets.make_forge()
        self.wave = mglearn.datasets.make_wave(n_samples=40)
        self.cancer = datasets.load_breast_cancer()
        self.boston = mglearn.datasets.load_extended_boston()
        self.blob = datasets.make_blobs(random_state=8, centers=4)
        self.moon = datasets.make_moons(n_samples=100, noise=0.25, random_state=3)
        self.handcraft = mglearn.tools.make_handcrafted_dataset()

    def plot_cancer_feature_importance(self, model):
        n_features = self.cancer.data.shape[1]
        plot.barh(range(n_features), model.feature_importances_, align='center')
        plot.yticks(np.arange(n_features), self.cancer.feature_names)
        plot.xlabel('feature importance')
        plot.ylabel('feature')
        plot.tight_layout()
        plot.show()


class TestKnn(DataBase):
    def test_plot_classifier_data(self):
        forge_x, forge_y = self.forge
        mglearn.discrete_scatter(forge_x[:, 0], forge_x[:, 1], forge_y)
        plot.legend(['class 0', 'class 1'], loc=4)
        plot.xlabel('first feature')
        plot.ylabel('second feature')
        plot.show()

    def test_plot_regression_data(self):
        plot.plot(*self.wave, 'o')
        plot.ylim(-3, 3)
        plot.xlabel('feature')
        plot.ylabel('target')
        plot.show()

    def test_plot_classification_3_neighbor(self):
        mglearn.plots.plot_knn_classification(n_neighbors=3)
        plot.show()

    def test_predict_classification_accuracy(self):
        n_neighbors = 3
        xtr, xte, ytr, yte = train_test_split(*self.forge, random_state=0)
        knc = KNC(n_neighbors).fit(xtr, ytr)
        print(f'forge knn precision with {n_neighbors} neighbors: {knc.score(xte, yte)}')

    def test_plot_classification_decision_boundary(self):
        forge_x, forge_y = self.forge
        fig, axes = plot.subplots(1, 3, figsize=(10, 3))
        for n_neighbors, ax in zip([1, 3, 9], axes):
            knc = KNC(n_neighbors).fit(*self.forge)
            mglearn.plots.plot_2d_separator(knc, forge_x, fill=True, eps=.5, ax=ax, alpha=.4)
            mglearn.discrete_scatter(forge_x[:, 0], forge_x[:, 1], forge_y, ax=ax)
            ax.set_title(f'{n_neighbors} neighbors')
            ax.set_xlabel('feature 0')
            ax.set_ylabel('feature 1')
        axes[0].legend(loc=3)
        plot.show()

    # explore relation between accuracy and n_neighbors
    def test_plot_classification_neighbors_relation(self):
        xtr, xte, ytr, yte = train_test_split(self.cancer.data, self.cancer.target,
                                              stratify=self.cancer.target, random_state=66)
        train_accuracy, test_accuracy = [], []
        neighbors_settings = range(1, 11)
        for n_neighbors in neighbors_settings:
            knc = KNC(n_neighbors = n_neighbors)
            knc.fit(xtr, ytr)
            train_accuracy.append(knc.score(xte, yte))
            test_accuracy.append(knc.score(xtr, ytr))
        plot.plot(neighbors_settings, train_accuracy, label='train accuracy')
        plot.plot(neighbors_settings, test_accuracy, label='test accuracy')
        plot.xlabel('accuracy')
        plot.ylabel('n_neighbors')
        plot.legend()
        plot.show()

    def test_plot_regression_1_neighbors(self):
        mglearn.plots.plot_knn_regression(n_neighbors=1)
        plot.show()

    def test_plot_regression_3_neighbors(self):
        mglearn.plots.plot_knn_regression(n_neighbors=3)
        plot.show()

    def test_predict_regression_accuracy(self):
        xtr, xte, ytr, yte = train_test_split(*self.wave, random_state=0)
        knr = KNR(n_neighbors=3)
        knr.fit(xtr, ytr)
        print(f'test regression accuracy: {knr.score(xte, yte)}')

    def test_predict_regression_boundary(self):
        xtr, xte, ytr, yte = train_test_split(*self.wave, random_state=0)
        fig, axes = plot.subplots(1, 3, figsize=(15, 4))
        line = np.linspace(-3, 3, 1000).reshape(-1, 1)
        for n_neighbors, ax in zip([1, 3, 9], axes):
            knr = KNR(n_neighbors).fit(xtr, ytr)
            ax.plot(line, knr.predict(line))
            ax.plot(xtr, ytr, '^', c=mglearn.cm2(0), markersize=8)
            ax.plot(xte, yte, 'v', c=mglearn.cm2(0), markersize=8)
            ax.set_title(f'{n_neighbors} neighbors\ntrain accuracy: {knr.score(xtr, ytr):.2f}, test accuracy: {knr.score(xte, yte):.2f}')
            ax.set_xlabel('feature')
            ax.set_ylabel('target')
        axes[0].legend(['model prediction', 'training data/target', 'test data/target'], loc='best')
        plot.show()


class TestLinear(DataBase):
    # 1 linear model/ols
    def test_plot_wave_linear_model(self):
        mglearn.plots.plot_linear_regression_wave()
        plot.show()

    def test_params_ols(self):
        xtr, xte, ytr, yte = train_test_split(*self.wave, random_state=42)
        lr = LinearReg().fit(xtr, ytr)
        print(f'lr coef_: {lr.coef_}, lr intercept_:{lr.intercept_}')
        print(f'train score: {lr.score(xtr, ytr)}')
        print(f'test score: {lr.score(xte, yte)}')

    def test_predict_boston(self):
        # model over fit upon boston house price
        xtr, xte, ytr, yte = train_test_split(*self.boston, random_state=0)
        lr = LinearReg().fit(xtr, ytr)
        print(f'train accuracy: {lr.score(xtr, ytr)}, test accuracy: {lr.score(xte, yte)}')

    # 2 ridge regression(L2 regularization)
    def test_predict_boston_ridge(self):
        xtr, xte, ytr, yte = train_test_split(*self.boston, random_state=0)
        ridge = Ridge().fit(xtr, ytr)
        print(f'train accuracy: {ridge.score(xtr, ytr):.2f}, test accuracy: {ridge.score(xte, yte):.2f}')

    def test_plot_ridge_with_diff_alpha(self):
        xtr, xte, ytr, yte = train_test_split(*self.boston, random_state=0)
        ridge10 = Ridge(alpha=10).fit(xtr, ytr)
        ridge01 = Ridge(alpha=0.1).fit(xtr, ytr)
        ridge = Ridge().fit(xtr, ytr)
        lr = LinearReg().fit(xtr, ytr)
        plot.plot(ridge.coef_, 's', label='alpha=1')
        plot.plot(ridge01.coef_, '^', label='alpha=0.1')
        plot.plot(ridge10.coef_, 'v', label='alpha=10')
        plot.plot(lr.coef_, 'o', label='Linear Regression')
        plot.xlabel('coefficient index')
        plot.ylabel('coefficient magnitude')
        plot.hlines(0, 0, len(lr.coef_))
        plot.ylim(-25, 25)
        plot.legend()
        plot.show()

    # 3 lasso model(L1 regularization)
    def test_predict_and_plot_boston_lasso(self):
        xtr, xte, ytr, yte = train_test_split(*self.boston, random_state=0)
        lasso = Lasso().fit(xtr, ytr)
        print(f'train: {lasso.score(xtr, ytr):.2f}, test: {lasso.score(xte, yte):.2f}')
        print(f'used feature number: {np.sum(lasso.coef_ != 0)}')
        # plot start
        lasso001 = Lasso(alpha=0.01, max_iter=10000).fit(xtr, ytr)
        lasso0001 = Lasso(alpha=0.001, max_iter=10000).fit(xtr, ytr)
        ridge01 = Ridge(alpha=0.1).fit(xtr, ytr)
        plot.plot(lasso.coef_, 's', label='lasso alpha=1')
        plot.plot(lasso001.coef_, '^', label='lasso alpha=0.01')
        plot.plot(lasso0001.coef_, 'v', label='lasso alpha=0.001')
        plot.plot(ridge01.coef_, 'o', label='ridge alpha=0.1')
        plot.legend(ncol=2, loc=(0, 1.05))
        plot.ylim(-25, 25)
        plot.xlabel('coefficient index')
        plot.ylabel('coefficient magnitude')
        plot.show()

    # 4 logistic bi-classification(L2 regularization by default, but can use L1)
    def test_plot_logistic_reg_boundary(self):
        x, y = self.forge
        fig, axes = plot.subplots(1, 2, figsize=(10, 3))
        for model, ax in zip((LinearSVC(), LogisticReg()), axes):
            clf = model.fit(*self.forge)
            mglearn.plots.plot_2d_separator(clf, x, fill=False, eps=.5, ax=ax, alpha=.7)
            mglearn.discrete_scatter(x[:, 0], x[:, 1], y, ax=ax)
            ax.set_title(f'{clf.__class__.__name__}')
            ax.set_xlabel('feature 0')
            ax.set_ylabel('feature 1')
        axes[0].legend()
        plot.show()

    def test_predict_and_plot_cancer_logistic(self):
        xtr, xte, ytr, yte = train_test_split(self.cancer.data, self.cancer.target, random_state=42)
        logistic_reg = LogisticReg().fit(xtr, ytr)
        print(f'train: {logistic_reg.score(xtr, ytr):.3f},'
              f'test: {logistic_reg.score(xte, yte):.3f}')
        logistic_reg001 = LogisticReg(C=0.01).fit(xtr, ytr)
        logistic_reg100 = LogisticReg(C=100).fit(xtr, ytr)
        plot.plot(logistic_reg001.coef_.T, 'o', label='C=0.01')
        plot.plot(logistic_reg100.coef_.T, '^', label='C=100')
        plot.plot(logistic_reg.coef_.T, 'v', label='C=1')
        plot.xticks(range(self.cancer.data.shape[1]), self.cancer.feature_names, rotation=90)
        plot.hlines(0, 0, self.cancer.data.shape[1])
        plot.ylim(-5, 5)
        plot.xlabel('coefficient index')
        plot.ylabel('coefficient magnitude')
        plot.legend()
        plot.show()

    def test_plot_cancer_logistic_L1(self):
        xtr, xte, ytr, yte = train_test_split(self.cancer.data, self.cancer.target, random_state=42)
        for C, marker in zip([0.01, 1, 100], ['o', '^', 'v']):
            logistic_l1 = LogisticReg(C=C, penalty='l1', solver='liblinear').fit(xtr, ytr)
            print(f'train: {logistic_l1.score(xtr, ytr):.3f}, test: {logistic_l1.score(xte, yte):.3f}')
            plot.plot(logistic_l1.coef_.T, marker, label=f'C={C}')
            plot.xticks(range(self.cancer.data.shape[1]), self.cancer.feature_names, rotation=90)
            plot.hlines(0, 0, self.cancer.data.shape[1])
            plot.xlabel('coefficient index')
            plot.ylabel('coefficient magnitude')
            plot.ylim(-5, 5)
            plot.legend()
        plot.tight_layout()
        plot.show()

    # 5 LVC(multi-classification)
    def test_plot_LVC_boundary_bi(self):
        x, y = self.blob
        linear_svm = LinearSVC().fit(x, y)
        line = np.linspace(-15, 15)
        mglearn.discrete_scatter(x[:, 0], x[:, 1], y)
        for coef, intercept, color in zip(linear_svm.coef_, linear_svm.intercept_, ['b', 'r', 'g']):
            plot.plot(line, -(line * coef[0] + intercept) / coef[1], c=color)
            plot.ylim(-10, 15)
            plot.xlim(-10, 8)
            plot.xlabel('feature 0')
            plot.ylabel('feature 1')
            plot.legend(['class 0', 'class 1', 'class 2', 'line class0', 'line class1', 'line class2'], loc=(1.01, 0.3))
        plot.tight_layout()
        plot.show()

    def test_plot_LVC_boundary_multi(self):
        x, y, linear_svm = *self.blob, LinearSVC().fit(*self.blob)
        mglearn.plots.plot_2d_classification(linear_svm, x, fill=True, alpha=.7)
        mglearn.discrete_scatter(x[:, 0], x[:, 1], y)
        line = np.linspace(-15, 15)
        for coef, intercept, color in zip(linear_svm.coef_, linear_svm.intercept_, ['b', 'r', 'g']):
            plot.plot(line, -(line * coef[0] + intercept) / coef[1], c=color)
            plot.legend(['class0', 'class1', 'class2', 'line class0', 'line class1', 'line class2'], loc=(1.01, 0.3))
            plot.xlabel('feature 0')
            plot.ylabel('feature 1')
        plot.show()


class TestDTC(DataBase):
    # 6 naive bayes:gaussianNB, BernoulliNB, MultinomialNB
    # 7 DecisionTreeClassifier
    def test_plot_DTC_base(self):
        mglearn.plots.plot_animal_tree()
        plot.show()

    def test_predict_DTC_accuracy(self):
        xtr, xte, ytr, yte = train_test_split(self.cancer.data, self.cancer.target, random_state=42)
        tree = DTC(random_state=42).fit(xtr, ytr)
        print(f'train: {tree.score(xtr, ytr)}, test: {tree.score(xte, yte)}')
        tree = DTC(max_depth=4, random_state=0).fit(xtr, ytr)
        print(f'tree with 4 depth train: {tree.score(xtr, ytr)}, test: {tree.score(xte, yte)}')

    def test_visualize_DTC(self):
        xtr, _, ytr, _ = train_test_split(self.cancer.data, self.cancer.target, random_state=42)
        tree = DTC(max_depth=4, random_state=0).fit(xtr, ytr)
        data = export_graphviz(tree, out_file=None, class_names=['malignant', 'benign'], feature_names=self.cancer.feature_names, impurity=False, filled=True)
        source = graphviz.Source(data)
        source.view()

    def test_plot_DTC_feature_importance(self):
        xtr, _, ytr, _ = train_test_split(self.cancer.data, self.cancer.target, random_state=42)
        tree = DTC(max_depth=4, random_state=0).fit(xtr, ytr)
        self.plot_cancer_feature_importance(tree)

    # 8 DTC ensemble:random forest, gradient boosted DT
    def test_plot_forest_boundary(self):
        xtr, xte, ytr, yte = train_test_split(*self.moon, stratify=self.moon[1], random_state=3)
        forest = RFC(n_estimators=5, random_state=2).fit(xtr, ytr)
        fig, axes = plot.subplots(2, 3, figsize=(20, 10))
        for i, (ax, tree) in enumerate(zip(axes.ravel(), forest.estimators_)):
            ax.set_title(f'tree {i}')
            mglearn.plots.plot_tree_partition(xtr, ytr, tree, ax=ax)
            mglearn.plots.plot_2d_separator(forest, xtr, fill=True, ax=axes[-1, -1], alpha=.4)
        axes[-1, -1].set_title('random forest')
        mglearn.discrete_scatter(xtr[:, 0], xtr[:, 1], ytr)
        plot.show()

    def test_plot_cancer_importance(self):
        xtr, xte, ytr, yte = train_test_split(self.cancer.data, self.cancer.target, random_state=0)
        forest = RFC(n_estimators=100, random_state=0).fit(xtr, ytr)
        print(f'train: {forest.score(xtr, ytr)}, test: {forest.score(xte, yte)}')
        self.plot_cancer_feature_importance(forest)

    def test_plot_and_predict_cancer(self):
        xtr, xte, ytr, yte = train_test_split(self.cancer.data, self.cancer.target, random_state=0)
        gbc = GBC(random_state=0).fit(xtr, ytr)
        print(f'GBC train: {gbc.score(xtr, ytr)}, test: {gbc.score(xte, yte)}')
        gbc = GBC(random_state=0, learning_rate=0.01).fit(xtr, ytr)
        print(f'GBC train: {gbc.score(xtr, ytr)}, test: {gbc.score(xte, yte)}')
        # avoid over fitting
        gbc = GBC(random_state=0, max_depth=1, n_estimators=100).fit(xtr, ytr)
        print(f'GBC train: {gbc.score(xtr, ytr)}, test: {gbc.score(xte, yte)}')
        self.plot_cancer_feature_importance(gbc)


class TestKernelizedSVM(DataBase):
    def test_plot_3d_boundary_polynomial(self):
        x, y = self.blob
        x_new = np.hstack([x, x[:, 1:] ** 2])
        figure = plot.figure()
        ax = figure.add_axes(Axes3D(figure, elev=-152, azim=-26))
        mask = y == 0
        ax.scatter(x_new[mask, 0], x_new[mask, 1], x_new[mask, 2], c='b', cmap=mglearn.cm2, s=60)
        ax.scatter(x_new[~mask, 0], x_new[~mask, 1], x_new[~mask, 2], c='r', marker='^', cmap=mglearn.cm2, s=60)
        ax.set_xlabel('feature 0')
        ax.set_ylabel('feature 1')
        ax.set_zlabel('feature 1 ** 2')
        plot.show()

    def test_plot_2d_boundary_RBF(self):
        x, y = self.handcraft
        svm = SVC(kernel='rbf', C=10, gamma=0.1).fit(x, y)
        mglearn.plots.plot_2d_separator(svm, x, eps=.5)
        mglearn.discrete_scatter(x[:, 0], x[:, 1], y)
        sv = svm.support_vectors_
        sv_label = svm.dual_coef_.ravel() > 0
        mglearn.discrete_scatter(sv[:, 0], sv[:, 1], sv_label, s=15, markeredgewidth=3)
        plot.xlabel('feature 0')
        plot.ylabel('feature 1')
        plot.show()

    def test_plot_2d_boundary_adjust_params(self):
        fig, axes = plot.subplots(3, 3, figsize=(15, 10))
        for ax, C in zip(axes, [-1, 0, 3]):
            for a, gamma in zip(ax, range(-1, 2)):
                mglearn.plots.plot_svm(log_C=C, log_gamma=gamma, ax=a)
        axes[0, 0].legend(['class 0', 'class 1', 'sv class 0', 'sv class 1'], ncol=4, loc=(.9, 1.2))
        plot.show()

    def test_predict_cancer(self):
        xtr, xte, ytr, yte = train_test_split(self.cancer.data, self.cancer.target, random_state=0)
        svc = SVC(C=1, gamma=1/len(self.cancer.feature_names)).fit(xtr, ytr)
        print(f'kernelized SVM train: {svc.score(xtr, ytr)}, test: {svc.score(xte, yte)}')

    def test_pre_deal_cancer_data(self):
        xtr, xte, ytr, yte = train_test_split(self.cancer.data, self.cancer.target, random_state=42)
        min_tr = xtr.min(axis=0)
        range_tr = (xtr - min_tr).max(axis=0)
        xtr_scale = (xtr - min_tr) / range_tr
        xte_scale = (xte - min_tr) / range_tr

        svc = SVC().fit(xtr_scale, ytr)
        print(f'kernelized SVC after data scale. train: {svc.score(xtr_scale, ytr)}, test: {svc.score(xte_scale, yte)}')


class TestNeural(DataBase):
    def test_plot_mlp_boundary(self):
        xtr, xte, ytr, yte = train_test_split(*self.moon, random_state=3)
        fig, axes = plot.subplots(2, 2, figsize=(10, 10))
        func = 'relu'
        for node_num, ax in zip([10, 10, 100, 100], axes.ravel()):
            mlp = MLP(solver='lbfgs', activation=func, random_state=0, hidden_layer_sizes=[node_num]).fit(xtr, ytr)
            mglearn.plots.plot_2d_separator(mlp, xtr, fill=True, alpha=.3, ax=ax)
            mglearn.discrete_scatter(xtr[:, 0], xtr[:, 1], ytr, ax=ax)
            ax.set_title(f'hidden node num: {node_num}, activate func: {func}')
            ax.set_xlabel('feature 0')
            ax.set_ylabel('feature 1')
            func = 'tanh' if func == 'relu' else 'relu'
        plot.tight_layout()
        plot.show()

    def test_predict_cancer_mlp(self):
        # data not scaled
        xtr, xte, ytr, yte = train_test_split(self.cancer.data, self.cancer.target, random_state=42)
        mlp = MLP(random_state=42).fit(xtr, ytr)
        print(f'mlp not scale data train: {mlp.score(xtr, ytr)}, test: {mlp.score(xte, yte)}')
        # after scale data
        xtr_mean = xtr.mean(axis=0)
        xtr_std = xtr.std(axis=0)
        xtr_scale = (xtr - xtr_mean) / xtr_std
        xte_scale = (xte - xtr_mean) / xtr_std
        mlp = MLP(random_state=42).fit(xtr_scale, ytr)
        print(f'mlp scaled data train: {mlp.score(xtr_scale, ytr)}, test: {mlp.score(xte_scale, yte)}')
        mlp = MLP(random_state=42, max_iter=1000).fit(xtr_scale, ytr)
        print(f'mlp scaled data iter 1000 train: {mlp.score(xtr_scale, ytr)}, test: {mlp.score(xte_scale, yte)}')
        mlp = MLP(random_state=42, alpha=1, max_iter = 1000).fit(xtr_scale, ytr)
        print(f'mlp scaled 1000 iter alpha 1 train: {mlp.score(xtr_scale, ytr)}, test: {mlp.score(xte_scale, yte)}')

if __name__ == '__main__':
    unittest.main()
