import unittest

import numpy as np
import pandas as pd
from matplotlib import pyplot as plot
from IPython.display import display

import mglearn
from sklearn import datasets

from sklearn.model_selection import train_test_split, cross_val_score, KFold, LeaveOneOut, ShuffleSplit, GroupKFold
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import OneHotEncoder, PolynomialFeatures
from sklearn.linear_model import LogisticRegression, LinearRegression, Ridge
from sklearn.tree import DecisionTreeRegressor
from sklearn.svm import SVR, SVC
from sklearn.feature_selection import SelectPercentile, SelectFromModel, RFE
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
from sklearn.metrics import classification_report


class TestBase(unittest.TestCase):
    def __init__(self, *args, **kwargs):
        super(TestBase, self).__init__(*args, **kwargs)
        self.wave = mglearn.datasets.make_wave(n_samples=100)
        self.cancer = datasets.load_breast_cancer()
        self.iris = datasets.load_iris()
        self.blob = datasets.make_blobs(n_samples=12, random_state=0)
        self.digits = datasets.load_digits()
        # show data
        file_path = r"F:\anaconda\setuppatha\envs\py39\Lib\site-packages\mglearn\data\adult.data"
        self.data = pd.read_csv(file_path, header=None, index_col=False,
                                names=['age', 'workclass', 'fnlwgt', 'education', 'education-num',
                                       'marial-status', 'occupation', 'relationship', 'race', 'gender',
                                       'capital-gain', 'capital-loss', 'hours-per-week', 'native-country',
                                       'income'])
        attrs = ['age', 'workclass', 'education', 'gender', 'hours-per-week', 'occupation', 'income']
        # data = self.data[attrs]
        # display(data.head())

        # show attrs values
        # for attr in attrs:
        #     print(f'attr {attr}')
        #     print(getattr(data, attr).value_counts())

        # show data after one hot
        self.data_dummy = pd.get_dummies(self.data[attrs])
        # print(f'columns before dummy {list(data.columns)},\n after dummy: {list(self.data_dummy.columns)}')


class TestOneHot(TestBase):
    def test_extract_features_and_predict_logistic(self):
        features = self.data_dummy.loc[:, 'age':'occupation_ Transport-moving']
        x, y = features.values, self.data_dummy['income_ >50K'].values
        print(f'x shape: {x.shape}, y shape: {y.shape}, x origin shape {self.data_dummy.shape}')
        xtr, xte, ytr, yte = train_test_split(x, y, random_state=0)
        reg = LogisticRegression().fit(xtr, ytr)
        print(f'accuracy with one hot: {reg.score(xte, yte):.3f}')

    def test_onehot_classifier(self):
        demo_df = pd.DataFrame({'integer feature': [0,1,2,1], 'categorical feature': ['sock','box','fox','box']})
        display(demo_df)
        demo_df['integer feature'] = demo_df['integer feature'].astype(str)
        display(pd.get_dummies(demo_df, columns=['integer feature', 'categorical feature']))

    def test_plot_binning(self):
        bins = np.linspace(-3, 3, 11)
        which_bin = np.digitize(self.wave[0], bins=bins)
        encoder = OneHotEncoder(sparse=False).fit(which_bin)
        line = np.linspace(-3, 3, 1000, endpoint=False).reshape(-1, 1)
        x_bin, line_bin = encoder.transform(which_bin), encoder.transform(np.digitize(line, bins=bins))
        print(f'x_bin first 5: {x_bin[:5]}')
        reg = LinearRegression().fit(x_bin, self.wave[1])
        plot.plot(line, reg.predict(line_bin), label='linear regression binned')
        reg = DecisionTreeRegressor(min_samples_split=3).fit(x_bin, self.wave[1])
        plot.plot(line, reg.predict(line_bin), label='decision tree binned')
        plot.plot(self.wave[0], self.wave[1], 'o', c='k')
        plot.vlines(bins, -3, 3, linewidth=1, alpha=.2)
        plot.legend(loc='best')
        plot.ylabel('regression output')
        plot.xlabel('input feature')
        plot.show()

    def test_plot_bin_gradient(self):
        bins = np.linspace(-3, 3, 11)
        which_bin = np.digitize(self.wave[0], bins=bins)
        # sparse默认为True，会返回降维的系数矩阵，这里False返回完整矩阵，否则np.hstack会报错
        encoder = OneHotEncoder(sparse=False).fit(which_bin)
        line = np.linspace(-3, 3, 1000, endpoint=False).reshape(-1, 1)
        x_bin, line_bin = encoder.transform(which_bin), encoder.transform(np.digitize(line, bins=bins))
        x_combined, line_combined = np.hstack([self.wave[0], x_bin]), np.hstack([line, line_bin])
        reg = LinearRegression().fit(x_combined, self.wave[1])
        print(f'line combine shape {line_combined.shape}')
        plot.plot(line, reg.predict(line_combined), label='linear regression combined')
        for bin in bins:
            plot.plot([bin, bin], [-3, 3], ':', c='k')
        plot.legend(loc='best')
        plot.ylabel('regression output')
        plot.xlabel('input feature')
        plot.plot(self.wave[0], self.wave[1], 'o', c='k')
        plot.show()

    def test_plot_interaction_feature(self):
        bins, line = np.linspace(-3, 3, 11), np.linspace(-3, 3, 1000, endpoint=False).reshape(-1, 1)
        which_bin = np.digitize(self.wave[0], bins=bins)
        encoder = OneHotEncoder(sparse=False).fit(which_bin)
        x_bin, line_bin = encoder.transform(which_bin), encoder.transform(np.digitize(line, bins=bins))
        x_prod, line_prod = np.hstack([x_bin, self.wave[0] * x_bin]), np.hstack([line_bin, line * line_bin])
        print(f'x prod shape: {x_prod.shape}')
        reg = LinearRegression().fit(x_prod, self.wave[1])
        plot.plot(line, reg.predict(line_prod), label='linear regression product')
        for bin in bins:
            plot.plot([bin, bin], [-3, 3], ':', c='k')
        plot.plot(self.wave[0], self.wave[1], 'o', c='k')
        plot.ylabel('regression output')
        plot.xlabel('input feature')
        plot.show()

    def test_plot_polynomial_feature(self):
        line = np.linspace(-3, 3, 1000, endpoint=False).reshape(-1, 1)
        poly = PolynomialFeatures(degree=10, include_bias=False).fit(self.wave[0])
        x_poly = poly.transform(self.wave[0])
        reg = LinearRegression().fit(x_poly, self.wave[1])
        line_poly = poly.transform(line)
        plot.plot(line, reg.predict(line_poly), label='polynomial regression linear')
        plot.plot(self.wave[0], self.wave[1], 'o', c='k')
        plot.xlabel('input feature')
        plot.ylabel('regression output')
        plot.legend(loc='best')
        plot.show()

    def test_plot_svm_wave(self):
        line = np.linspace(-3, 3, 1000, endpoint=False).reshape(-1, 1)
        for gamma in (1, 10):
            svr = SVR(gamma=gamma).fit(*self.wave)
            plot.plot(line, svr.predict(line), label=f'SVR gamma={gamma}')
        plot.plot(*self.wave, 'o', c='k')
        plot.xlabel('input feature')
        plot.ylabel('regression feature')
        plot.legend(loc='best')
        plot.show()


class TestUnLinear(TestBase):
    def test_plot_random_distribute(self):
        r = np.random.RandomState(0)
        x_org, w = r.normal(size=(1000, 3)), r.normal(size=3)
        x = r.poisson(10 * np.exp(x_org))
        y = np.dot(x_org, w)

        bins = np.bincount(x[:, 0])
        print(f'bins shape: {bins.shape}, bins [:5]: {bins[:5]}')
        fig, axes = plot.subplots(2, 1, figsize=(10, 10))
        axes[0].bar(range(len(bins)), bins)
        axes[0].set_ylabel('number of appearances')
        axes[0].set_xlabel('x')
        axes[1].hist(x_org[:, 0], bins=30)
        axes[1].set_ylabel('number of appearances')
        axes[1].set_xlabel('y')
        plot.show()

    def test_predict_poisson_linear(self):
        r = np.random.RandomState(0)
        x_org, w = r.normal(size=(1000, 3)), r.normal(size=3)
        x, y = r.poisson(10 * np.exp(x_org)), np.dot(x_org, w)
        xtr, xte, ytr, yte = train_test_split(x, y, random_state=0)
        score = Ridge().fit(xtr, ytr).score(xte, yte)
        print(f'ridge predict poisson distribute score: {score}')
        xtr_log, xte_log = np.log(xtr + 1), np.log(xte + 1)
        plot.hist(xtr_log[:, 0], bins=25)
        plot.show()


class TestFeatureSelection(TestBase):
    def test_feature_select_percentile(self):
        # show how to select features
        r = np.random.RandomState(42)
        noise = r.normal(size=(len(self.cancer.data), 50))
        cancer_noise = np.hstack([self.cancer.data, noise])
        xtr, xte, ytr, yte = train_test_split(cancer_noise, self.cancer.target, random_state=0, test_size=.5)
        select = SelectPercentile(percentile=50).fit(xtr, ytr)
        xtr_selected = select.transform(xtr)
        print(f'before select shape: {xtr.shape}, after select shape: {xtr_selected.shape}')

        # show which features are selected
        mask = select.get_support()
        print(f'feature selection mask: {mask}')
        plot.matshow(mask.reshape(1, -1), cmap='gray_r')
        plot.xlabel('sample index')
        plot.show()

    def test_feature_select_from_model(self):
        noise = np.random.RandomState(0).normal(size=(len(self.cancer.data), 50))
        cancer_noise = np.hstack([self.cancer.data, noise])
        xtr, xte, ytr, yte = train_test_split(cancer_noise, self.cancer.target, random_state=0)
        select = SelectFromModel(RandomForestClassifier(n_estimators=100, random_state=42), threshold='median').fit(xtr, ytr)
        mask = select.get_support()
        plot.matshow(mask.reshape(1, -1), cmap='gray_r')
        plot.xlabel('sample index')
        plot.show()

    def test_feature_select_iter(self):
        noise = np.random.RandomState(42).normal(size=(len(self.cancer.data), 50))
        xtr, xte, ytr, yte = train_test_split(np.hstack([self.cancer.data, noise]), self.cancer.target, random_state=0, test_size=.5)
        select = RFE(RandomForestClassifier(n_estimators=100, random_state=42), n_features_to_select=40).fit(xtr, ytr)
        plot.matshow(select.get_support().reshape(1, -1), cmap='gray_r')
        plot.xlabel('sample index')
        plot.show()


class TestModelEvaluation(TestBase):
    def test_cross_evaluation(self):
        logreg = LogisticRegression()
        scores = cross_val_score(logreg, self.iris.data, self.iris.target, cv=5)
        print(f'logistic cross evaluation score: {scores}, mean score: {scores.mean()}')

    def test_cross_kfold(self):
        kfold = KFold(n_splits=5)
        logreg = LogisticRegression().fit(self.iris.data, self.iris.target)
        print(f'5 fold cross score: {cross_val_score(logreg, self.iris.data, self.iris.target, cv=kfold)}')
        print(f'3 fold cross score: {cross_val_score(logreg, self.iris.data, self.iris.target, cv=KFold(n_splits=3))}')

    def test_cross_leave_one(self):
        loo = LeaveOneOut()
        logreg = LogisticRegression().fit(self.iris.data, self.iris.target)
        scores = cross_val_score(logreg, self.iris.data, self.iris.target, cv=loo)
        print(f'score number: {len(scores)}, score mean: {scores.mean()}')

    def test_cross_shuffle_split(self):
        logreg = LogisticRegression()
        shuffle_split = ShuffleSplit(test_size=.5, train_size=.5, n_splits=10)
        scores = cross_val_score(logreg, self.iris.data, self.iris.target, cv=shuffle_split)
        print(f'shuffle split scores: {scores}')

    def test_cross_group_split(self):
        groups = [0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 3]
        scores = cross_val_score(LogisticRegression(), *self.blob, groups=groups, cv=GroupKFold(n_splits=4))
        print(f'cross validation scores: {scores}')


class TestGridCross(TestBase):
    def test_grid_and_cross(self):
        params = {'C': [0.001, 0.01, 0.1, 1, 10, 100], 'gamma': [0.001, 0.01, 0.1, 1, 10, 100]}
        grid_search = GridSearchCV(SVC(), params, cv=5)
        xtr, xte, ytr, yte = train_test_split(self.iris.data, self.iris.target, random_state=0)
        grid_search.fit(xtr, ytr)
        print(f'test grid search score: {grid_search.score(xte, yte)}')
        print(f'test grid search best params: {grid_search.best_params_}')
        print(f'test grid search best scores: {grid_search.best_score_}')
        print(f'test grid search best all params: {grid_search.best_estimator_}')

    def test_visualization_cross_and_grid(self):
        params = {'C': [0.001, 0.01, 0.1, 1, 10, 100], 'gamma': [0.001, 0.01, 0.1, 1, 10, 100]}
        grid_search = GridSearchCV(SVC(), params, cv=5)
        xtr, xte, ytr, yte = train_test_split(self.iris.data, self.iris.target, random_state=0)
        grid_search.fit(xtr, ytr)
        scores = np.array(pd.DataFrame(grid_search.cv_results_).mean_test_score).reshape(6, 6)
        mglearn.tools.heatmap(scores, xlabel='gamma', xticklabels=params['gamma'], ylabel='C', yticklabels=params['C'], cmap='viridis')
        plot.show()

    def test_nested_cross(self):
        params = {'C': [0.001, 0.01, 0.1, 1, 10, 100], 'gamma': [0.001, 0.01, 0.1, 1, 10, 100]}
        scores = cross_val_score(GridSearchCV(SVC(), params, cv=5), self.iris.data, self.iris.target, cv=5)
        print(f'nested cross scores: {scores}, \n nested cross mean scores: {scores.mean()}')

    def test_digits_predict_metric(self):
        xtr, xte, ytr, yte = train_test_split(self.digits.data, self.digits.target == 9, random_state=0)
        dummy = DummyClassifier(strategy='most_frequent').fit(xtr, ytr)
        pred_most_freq = dummy.predict(xte)
        print(f'{classification_report(yte, pred_most_freq, target_names=["not nine", "nine"])}')

