#!/usr/bin/env python


def main(input):
    from sklearn.preprocessing import MinMaxScaler, StandardScaler
    from sklearn.preprocessing import QuantileTransformer
    from sklearn.ensemble import RandomForestClassifier as rfc
    from sklearn.metrics import accuracy_score, fbeta_score
    from sklearn.metrics.scorer import make_scorer
    from scipy.stats import randint as sp_randint
    # from scipy.stats.distributions import expon
    from sklearn.model_selection import StratifiedKFold
    from sklearn.model_selection import cross_validate
    from sklearn.model_selection import ShuffleSplit
    from sklearn.model_selection import ParameterSampler
    from sklearn.model_selection import GridSearchCV
    from sklearn.model_selection import RandomizedSearchCV
    from sklearn.metrics import classification_report
    from sklearn.pipeline import Pipeline
    from sklearn.pipeline import make_pipeline
    # from sklearn.feature_selection import RFE
    from sklearn.feature_selection import RFECV
    from sklearn.model_selection import learning_curve
    from sklearn.datasets import make_classification
    from sklearn.externals import joblib
    from collections import OrderedDict
    import pickle
    import numpy as np
    import pandas as pd
    import beta
    from rf.mlutil import Steps, plot_learning_curve, \
        format_rfe, data_prep, test_prep, read, xy_split, \
        format_grid_search, format_rf_train_results, dups
    from rf.mlutil import auc_classification
    from rf.const import mode, class_col, filt, n_jobs, n_trees
    from rf.rf_test import main as rf_test_main

    df = read(input)
    dups(df, axis=0)
    dups(df, axis=1)

    # mode, class_col, filter = 'median', 0, .5
    # train, test, _ = data_prepare(dat, filter=.5,
                                  # class_col=0, mode=mode)
    # train, test, dat_fill_class, les = data_prep(dat, filter=filter,
                                                 # class_col=class_col,
                                                 # mode=mode)

    X, y, X_test, dat_fill_test, les = data_prep(df,
                                                 class_col=class_col,
                                                 filt=filt,
                                                 mode=mode, )
    joblib.dump(dat_fill_test, 'dat_fill_class.pkl')
    joblib.dump(les, 'les.pkl')

    # test = test_prep(test, dat_fill_class, les, mode=mode, )
    # X_test = test_prep(X_test, dat_fill_test, les, filt=filt)
    # X, y = xy_split(train)

    n_jobs = n_jobs
    n_trees = n_trees
    y_uniq = list(set(y))
    n_classes = len(y_uniq)

    if n_classes == 2:
        print('ROC_AUC')
        scoring = OrderedDict(
            # auc = 'roc_auc', # first is more important
            auc=make_scorer(auc_classification,),
            f1=make_scorer(fbeta_score, beta=1, pos_label=y_uniq[1]),
            accuracy='accuracy', )
    elif n_classes > 2:
        print('FBETA')
        scoring = OrderedDict(
            # first is more important
            fbeta=make_scorer(fbeta_score, beta=1, average='macro'),
            accuracy=make_scorer(accuracy_score), )

    param_distributions = dict()
    
    estimator = rfc(n_estimators=n_trees, criterion='gini',
                    max_depth=None, min_samples_split=2, min_samples_leaf=1,
                    min_weight_fraction_leaf=0.0, max_features='auto',
                    max_leaf_nodes=None, min_impurity_decrease=0.0,
                    bootstrap=False,
                    oob_score=False, n_jobs=n_jobs, random_state=None,
                    verbose=0, warm_start=False, class_weight=None)

    steps = Steps()
    steps.add('scalar', MinMaxScaler())
    steps.add('estimator', estimator)

    # step0 = ('str2f', )
    # scalar = 'scalar'
    # step1 = (scalar, MinMaxScaler())
    # rfc = 'rfc'
    # step2 = (rfc, estimator)
    # param_list = list(ParameterSampler(param_grid, n_iter=4))
    # param_grid = [
        # dict(
            # scalar = [
                # StandardScaler(),
                # MinMaxScaler(),
            # ],
            # rfc__class_weight = [None, 'balanced',],
            # rfc__n_estimators = [10, 20, 40, 80, 160, 320],
            # rfc__max_features = [None, 'log2', 'auto'],
        # ),
        #
        # dict(
            # scalar=[None, preprocessing.StandardScaler(),
                    # preprocessing.MinMaxScaler()],
            # dim_reduce=[None],
            # clf=[SVC()],
            # clf__C=C,
            # clf__class_weight=[None, 'balanced'],
            # clf__kernel=['rbf', 'linear',]
        # ),
        #
    # ]

    sep__ = '__'
    param_grid = []
    param_grid0 = dict()

    i = 0
    param_grid0[steps.values()[i][0]] = [
        StandardScaler(),
        MinMaxScaler(),
        QuantileTransformer(subsample=10000), ]

    i = 1
    params0 = ['class_weight', 'n_estimators', 'max_features']
    keys0 = [sep__.join([steps.values()[i][0], j]) for j in params0]
    param_grid0[keys0[0]] = [None, 'balanced', ]
    param_grid0[keys0[1]] = [10, 20, ]
    param_grid0[keys0[2]] = [None, 'log2', 'auto']

    param_grid.append(param_grid0)

    pipeline = Pipeline(steps.values())
    print pipeline
    print param_grid

    n_jobs_grid = 2
    cv = 3
    iid = True
    # rscv = RandomizedSearchCV(estimator, param_distributions,
        # n_iter=10, scoring=scoring, n_jobs=n_jobs, iid=iid,
        # refit=True, cv=cv, verbose=0, pre_dispatch='2*n_jobs',
        # random_state=None, error_score='raise', return_train_score='warn')

    grid_search = GridSearchCV(pipeline,
                               param_grid,
                               scoring=scoring,
                               n_jobs=n_jobs_grid, iid=iid,
                               refit=scoring.keys()[0],
                               cv=cv, verbose=0,
                               pre_dispatch='2*n_jobs',
                               error_score='raise',
                               return_train_score=True)

    grid_search.fit(X, y)

    format_grid_search(grid_search)

    # joblib.dump(grid_search, 'm_rf.pkl')
    # pickle.dumps(grid_search, 'm_rf.pkl')
    # joblib.load('m_rf.pkl')
    # pickle.loads('m_rf.pkl')

    # joblib.dump(estimator_best, 'pipeline_best.pkl')
    # print grid_search.estimator
    # print grid_search.scorer_
    # print estimator_best

    estimator_best = grid_search.best_estimator_
    # rfc = grid_search.best_estimator_.get_params()['estimator']

    format_rf_train_results(estimator_best, X, y)

    pre_prob = rf_test_main(X_test)

    # if test.empty:
        # format_rf_results(rfc, X, y)
    # else:
        # X_test, _ = xy_split(test)
        # format_rf_results(rfc, X, y, X_test)

    # estimator_best.fit(dat_train_x.values,
                       # dat_train_y.values.flatten())
    y_pre_train = estimator_best.predict(X)
    # y_pre_test = estimator_best.predict(y_test)
    print(classification_report(y_pre_train, y))
    # print(classification_report(y_pre_test, y_test))

    # rfe = RFE(grid_search.best_estimator_,
              # n_features_to_select=1, step=.5, verbose=0)

    rfc = grid_search.best_estimator_.get_params()['estimator']
    rfecv = RFECV(rfc, step=1, cv=cv,
                  scoring=scoring[scoring.keys()[0]],
                  verbose=0, n_jobs=n_jobs)
    rfecv.fit(X, y)

    # joblib.dump(rfecv, 'rfe.pkl')

    format_rfe(rfecv, X)
    
    # train_sizes = np.array([0.1, 0.33, 0.55, 0.78, 1.])
    # learning_curve_results = learning_curve(grid_search.best_estimator_,
                                            # X, y, groups=None,
                                            # train_sizes=train_sizes,
                                            # cv=cv,
                                            # scoring=scoring[scoring.keys()[0]], 
                                            # exploit_incremental_learning=False,
                                            # n_jobs=n_jobs, pre_dispatch='all',
                                            # verbose=0,
                                            # shuffle=False, random_state=None)

    # joblib.dump(learning_curve_results, 'learning_curve.pkl')

    # plot_learning_curve(learning_curve_results)


if __name__ == '__main__':
    import argparse
    ap = argparse.ArgumentParser(description='rf test')
    ap.add_argument('file', help='input file')
    # ap.add_argument('sub', nargs='?', help='sub')
    ap.add_argument('-p', '--prefix', help='out prefix', default='1')
    # ap.add_argument('-r', '--rename', help='False default', action="store_true")
    args = ap.parse_args()
    main(args.file)
