# coding:utf-8
# Author : hiicy redldw
# Date : 2019/04/11
from sklearn import model_selection, ensemble
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, ExtraTreesClassifier, \
    GradientBoostingClassifier
from sklearn.model_selection import KFold
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
import pandas as pd
import numpy as np
import xgboost as xgb
import keras
from keras.models import Model,Sequential
from keras.layers import LSTM,Dense

__all__ = ['get_top_n_features','from_Blend_model','Xhouse']


def get_top_n_features(train_data_X, train_data_Y, top_n_features):
    # randomforest
    rf_est = RandomForestClassifier(random_state=0)
    rf_param_grid = {'n_estimators': [100], 'min_samples_split': [2, 3], 'max_depth': [20]}
    rf_grid = model_selection.GridSearchCV(rf_est, rf_param_grid, n_jobs=2, cv=2, verbose=1)
    rf_grid.fit(train_data_X, train_data_Y)
    print('Top N Features Best RF Params:' + str(rf_grid.best_params_))
    print('Top N Features Best RF Score:' + str(rf_grid.best_score_))
    print('Top N Features RF Train Score:' + str(rf_grid.score(train_data_X, train_data_Y)))
    feature_imp_sorted_rf = pd.DataFrame({'feature': list(train_data_X),
                                          'importance': rf_grid.best_estimator_.feature_importances_}).sort_values(
        'importance', ascending=False)
    features_top_n_rf = feature_imp_sorted_rf.head(top_n_features)['feature']
    print('Sample 10 Feeatures from RF Classifier')
    print(str(features_top_n_rf[:10]))

    # AdaBoost
    ada_est = AdaBoostClassifier(random_state=0)
    ada_param_grid = {'n_estimators': [100], 'learning_rate': [0.01, 0.1]}
    ada_grid = model_selection.GridSearchCV(ada_est, ada_param_grid, n_jobs=2, cv=2, verbose=1)
    ada_grid.fit(train_data_X, train_data_Y)
    print('Top N Features Best Ada Params:' + str(ada_grid.best_params_))
    print('Top N Features Best Ada Score:' + str(ada_grid.best_score_))
    print('Top N Features Ada Train Score:' + str(ada_grid.score(train_data_X, train_data_Y)))
    feature_imp_sorted_ada = pd.DataFrame({'feature': list(train_data_X),
                                           'importance': ada_grid.best_estimator_.feature_importances_}).sort_values(
        'importance', ascending=False)
    features_top_n_ada = feature_imp_sorted_ada.head(top_n_features)['feature']
    print('Sample 10 Features from Ada Classifier:')
    print(str(features_top_n_ada[:10]))

    # ExtraTree
    et_est = ExtraTreesClassifier(random_state=0)
    et_param_grid = {'n_estimators': [100], 'min_samples_split': [3, 4], 'max_depth': [20]}
    et_grid = model_selection.GridSearchCV(et_est, et_param_grid, n_jobs=2, cv=2, verbose=1)
    et_grid.fit(train_data_X, train_data_Y)
    print('Top N Features Best ET Params:' + str(et_grid.best_params_))
    print('Top N Features Best DT Score:' + str(et_grid.best_score_))
    print('Top N Features ET Train Score:' + str(et_grid.score(train_data_X, train_data_Y)))
    feature_imp_sorted_et = pd.DataFrame({'feature': list(train_data_X),
                                          'importance': et_grid.best_estimator_.feature_importances_}).sort_values(
        'importance', ascending=False)
    features_top_n_et = feature_imp_sorted_et.head(top_n_features)['feature']
    print('Sample 10 Features from ET Classifier:')
    print(str(features_top_n_et[:10]))

    # GradientBoosting
    gb_est = GradientBoostingClassifier(random_state=0)
    gb_param_grid = {'n_estimators': [100], 'learning_rate': [0.01, 0.1], 'max_depth': [20]}
    gb_grid = model_selection.GridSearchCV(gb_est, gb_param_grid, n_jobs=2, cv=2, verbose=1)
    gb_grid.fit(train_data_X, train_data_Y)
    print('Top N Features Best GB Params:' + str(gb_grid.best_params_))
    print('Top N Features Best GB Score:' + str(gb_grid.best_score_))
    print('Top N Features GB Train Score:' + str(gb_grid.score(train_data_X, train_data_Y)))
    feature_imp_sorted_gb = pd.DataFrame({'feature': list(train_data_X),
                                          'importance': gb_grid.best_estimator_.feature_importances_}).sort_values(
        'importance', ascending=False)
    features_top_n_gb = feature_imp_sorted_gb.head(top_n_features)['feature']
    print('Sample 10 Feature from GB Classifier:')
    print(str(features_top_n_gb[:10]))

    # DecisionTree
    dt_est = DecisionTreeClassifier(random_state=0)
    dt_param_grid = {'min_samples_split': [2, 4], 'max_depth': [20]}
    dt_grid = model_selection.GridSearchCV(dt_est, dt_param_grid, n_jobs=2, cv=2, verbose=1)
    dt_grid.fit(train_data_X, train_data_Y)
    print('Top N Features Bset DT Params:' + str(dt_grid.best_params_))
    print('Top N Features Best DT Score:' + str(dt_grid.best_score_))
    print('Top N Features DT Train Score:' + str(dt_grid.score(train_data_X, train_data_Y)))
    feature_imp_sorted_dt = pd.DataFrame({'feature': list(train_data_X),
                                          'importance': dt_grid.best_estimator_.feature_importances_}).sort_values(
        'importance', ascending=False)
    features_top_n_dt = feature_imp_sorted_dt.head(top_n_features)['feature']
    print('Sample 10 Features from DT Classifier:')
    print(str(features_top_n_dt[:10]))

    # merge the three models
    features_top_n = pd.concat(
        [features_top_n_rf, features_top_n_ada, features_top_n_et, features_top_n_gb, features_top_n_dt],
        ignore_index=True).drop_duplicates()
    features_importance = pd.concat([feature_imp_sorted_rf, feature_imp_sorted_ada, feature_imp_sorted_et,
                                     feature_imp_sorted_gb, feature_imp_sorted_dt], ignore_index=True)

    return features_top_n, features_importance


def from_Blend_model(train_data_x, train_data_y, test_data_x,ids, nfold=7):
    KF = KFold(n_splits=nfold, random_state=2, shuffle=False)  # 不打乱可以沿用标签
    n_train = train_data_x.shape[0]
    n_test = len(test_data_x)

    def clf_out(clf, trainx, trainy, testx):
        oof_train = np.zeros((n_train,))
        oof_test = np.zeros((n_test,))
        oof_test_skf = np.zeros((nfold, n_test))
        for i, (trainindex, testindex) in enumerate(KF.split(trainx)):
            tx = trainx[trainindex]
            ty = trainy[trainindex]
            te = trainx[testindex]
            # 先用训练数据的训练索引训练
            clf.fit(tx, ty)
            # 然后用训练数据的测试索引预测
            oof_train[testindex] = clf.predict(te)
            # 然后预测测试集，填入交叉折里
            oof_test_skf[i, :] = clf.predict(testx)
        # 然后用交叉折里平均值作为测试集的预测值
        oof_test[:] = oof_test_skf.mean(axis=0)
        # 训练集预测值 作为训练数据 ，然后作为测试数据集
        return oof_train.reshape(-1, 1), oof_test.reshape(-1, 1)

    rf = ensemble.RandomForestRegressor(n_estimators=500, warm_start=True, max_features='sqrt', max_depth=6,
                                        min_samples_split=3,
                                        min_samples_leaf=2, n_jobs=-1, verbose=0)
    ada = ensemble.AdaBoostRegressor(n_estimators=500, learning_rate=0.1)

    et = ensemble.ExtraTreesRegressor(n_estimators=500, n_jobs=-1, max_depth=8, min_samples_leaf=2, verbose=0)

    gb = ensemble.GradientBoostingRegressor(n_estimators=500, learning_rate=0.008, min_samples_split=3,
                                            min_samples_leaf=2,
                                            max_depth=5, verbose=0)

    dt = DecisionTreeRegressor(max_depth=8)
    rfxtrain, rfxtest = clf_out(rf, train_data_x, train_data_y, test_data_x)
    adaxtrain, adaxtest = clf_out(ada, train_data_x, train_data_y, test_data_x)
    etxtrain, etxtest = clf_out(et, train_data_x, train_data_y, test_data_x)
    gbxtrain, gbxtest = clf_out(gb, train_data_x, train_data_y, test_data_x)
    dtxtrain, dtxtest = clf_out(dt, train_data_x, train_data_y, test_data_x)
    tX = np.concatenate([rfxtrain, adaxtrain, etxtrain, gbxtrain, dtxtrain], axis=1)
    tE = np.concatenate([rfxtest, adaxtest, etxtest, gbxtest, dtxtest], axis=1)

    clf = xgb.XGBClassifier(max_depth=10, learning_rate=0.001, n_estimators=200, objective='multi:softmax',
                            colsample_bytree=0.8)
    clf.fit(tX, train_data_y)
    predictions = clf.predict(tE)
    Submission = pd.DataFrame({'Id':ids,"SalePrice":predictions})
    Submission.to_csv('house_price_submit.csv',index=False,sep=',')


def Xhouse(train_data_x, train_data_y, test_data_x,ids):
    indim = train_data_x.shape[1]
    model = Sequential()
    model.add(Dense(32,activation='relu'))
    model.add(Dense(16,activation='relu'))
    model.add(Dense(1,activation='linear'))

    model.compile(optimizer='rmsprop',loss='rmse')

    model.fit(train_data_x,train_data_y,batch_size=4,epochs=10,validation_split=0.05)
    predictions = model.predict(test_data_x).reshape(-1)
    Submission = pd.DataFrame({'Id': ids, "SalePrice": predictions})
    Submission.to_csv('house_price_submit2.csv',index=False,sep=',')
