import unittest
from matplotlib import pyplot as plot

import mglearn
from sklearn import datasets
from sklearn.model_selection import train_test_split, GridSearchCV

from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.preprocessing import MinMaxScaler, StandardScaler, PolynomialFeatures
from sklearn.svm import SVC
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression, Ridge


class TestBase(unittest.TestCase):
    def __init__(self, *args, **kwargs):
        super(TestBase, self).__init__(*args, **kwargs)
        self.cancer = datasets.load_breast_cancer()
        self.boston = mglearn.datasets.load_extended_boston()


class TestPipeline(TestBase):
    def test_chain_scale_train_grid(self):
        xtr, xte, ytr, yte = train_test_split(self.cancer.data, self.cancer.target, random_state=0)
        pipe = Pipeline([('scaler', MinMaxScaler()), ('svm', SVC())]).fit(xtr, ytr)
        print(f'predict score: {pipe.score(xte, yte)}')
        params_grid = {'svm__C': [0.001, 0.01, 0.1, 1, 10, 100],
                       'svm__gamma': [0.001, 0.01, 0.1, 1, 10, 100]}
        grid = GridSearchCV(pipe, params_grid, cv=5).fit(xtr, ytr).fit(xtr, ytr)
        print(f'best cross-validation accuracy: {grid.best_score_}')
        print(f'test score: {grid.score(xte, yte)}')
        print(f'best params: {grid.best_params_}')

    def test_make_pipeline(self):
        pipe_long = Pipeline([('scaler', MinMaxScaler()), ('svm', SVC(C=100))])
        pipe_short = make_pipeline(MinMaxScaler(), SVC(C=100))
        print(f'show pipe step name via make_pipeline: {pipe_short.steps}')

    def test_show_pipe_step_attrs(self):
        pipe = make_pipeline(StandardScaler(), PCA(n_components=2), StandardScaler()).fit(self.cancer.data, self.cancer.target)
        print(f'show pipe PCA main component shape: {pipe.named_steps["pca"].components_.shape}')
        xtr, xte, ytr, yte = train_test_split(self.cancer.data, self.cancer.target, random_state=0)
        pipe = make_pipeline(StandardScaler(), LogisticRegression())
        params_pipe = {'logisticregression__C': [0.01, 0.1, 1, 10, 100]}
        grid = GridSearchCV(pipe, params_pipe, cv=5).fit(xtr, ytr)
        print(f'best estimators: {grid.best_estimator_}')
        print(f'logistic regression best estimator: {grid.best_estimator_.named_steps["logisticregression"]}')
        print(f'best model coef: {grid.best_estimator_.named_steps["logisticregression"].coef_}')

    def test_chain_comprehensive(self):
        xtr, xte, ytr, yte = train_test_split(*self.boston, random_state=0)
        pipe = make_pipeline(StandardScaler(), PolynomialFeatures(), Ridge())
        params_grid = {"polynomialfeatures__degree": [1, 2, 3], "ridge__alpha": [0.001, 0.01, 0.1, 1, 10, 100]}
        grid = GridSearchCV(pipe, params_grid, cv=5, n_jobs=-1).fit(xtr, ytr)
        print(f'grid best params: {grid.best_params_}')
        print(f'grid best scores: {grid.score(xte, yte)}')

        # normal grid
        params_grida = {'ridge__alpha': [0.001, 0.01, 0.1, 1, 10, 100]}
        pipea = make_pipeline(StandardScaler(), Ridge())
        grida = GridSearchCV(pipea, params_grida, cv=5).fit(xtr, ytr)
        print(f'normal ridge without polynomial features scores: {grida.score(xte, yte)}')

        plot.matshow(grid.cv_results_['mean_test_score'].reshape(3, -1), vmin=0, cmap='viridis')
        plot.xlabel('ridge__alpha')
        plot.ylabel('polynomialfeatures__degree')
        plot.xticks(range(len(params_grid['ridge__alpha'])), params_grid['ridge__alpha'])
        plot.yticks(range(len(params_grid['polynomialfeatures__degree'])), params_grid['polynomialfeatures__degree'])
        plot.colorbar()
        plot.show()

