# coding:utf-8
# Author : hiicy redldw
# Date : 2019/06/04

import warnings
from sklearn import preprocessing
import pandas as pd
import numpy as np
from sklearn.model_selection import KFold, StratifiedKFold, train_test_split
import seaborn as sns
from matplotlib import pyplot as plt

"""
数据总览
数据情况了解 确定哪些该怎么处理
先处理缺失值
特征工程:
    与目标值的关联
    特征间的相互性
利用模型筛选特征
模型融合：
    第一层 利用kfold 计算多个模型的输出
    第二层：用xgb接第一个的输入 训练
"""
warnings.filterwarnings('ignore')
sns.set_style("whitegrid")
abbrfile = r'F:\Resources\DataSets\Happiness\happiness_train_abbr.csv'
file = r'F:\Resources\DataSets\Happiness\happiness_train_complete.csv'
testfile = r'F:\Resources\DataSets\Happiness\happiness_test_complete.csv'
abbr = pd.read_csv(abbrfile)
data = pd.read_csv(file, encoding='gb2312')
test = pd.read_csv(testfile, encoding='gb2312')
# print(data.info(max_cols=150))
test['happiness'] = 0
# REW:保证数据的同一分布，以及相同处理
data = data.append(test)  # 8000:
data.drop(['edu_other','index','id', 'join_party', 'edu_yr', 'survey_time', 'work_yr',
           'marital_1st', 's_birth', 'marital_now', 'f_birth', 'm_birth'], inplace=True, axis=1)
# map作用一个series的每个元素
data['invest_other'][data['invest_other'].notnull()] = 1
data['invest_other'].fillna(value=0, inplace=True)  # 产权属于其他 有还是 没有
data.loc[(data.property_other.notnull()), 'property_other'] = 1  # REW:那些条件都是返回行
data['property_other'].fillna(value=0, inplace=True)
# 用模型预测minor_child的缺失值
# def fill_missing_minor_child()
data['minor_child'].fillna(value=-8, inplace=True)
data['edu_status'].fillna(value=-2, inplace=True)
data['edu_status'].fillna(value=-2, inplace=True)
# 选择了众数来填充,因为缺失值少，众数差距大
data['hukou_loc'][data['hukou_loc'].isnull()] = data[data['hukou_loc'].notnull()]['hukou_loc'].value_counts().argmax()
# data['s_edu'].interpolate(method='nearest', inplace=True)
data['s_political'].interpolate(method='nearest', inplace=True)
data['s_hukou'].interpolate(method='nearest', inplace=True)
data['social_neighbor'].fillna(value=-8, inplace=True)
data['social_friend'].fillna(value=-8, inplace=True)
data['family_income'].fillna(
    data[(data['family_income'].notnull()) & (data['family_income'] > 0)]['family_income'].mean(), inplace=True)
fiter = preprocessing.StandardScaler().fit(data[['floor_area', 'income', 'family_income',
                                                 'height_cm', 'weight_jin', 'inc_exp']])
data[['floor_area', 'income', 'family_income',
      'height_cm', 'weight_jin', 'inc_exp']] = fiter.transform(
    data[['floor_area', 'income', 'family_income',
          'height_cm', 'weight_jin', 'inc_exp']])

data.loc[(data['s_income'].notnull()), 's_income'] = preprocessing.StandardScaler().fit_transform(
    data.loc[(data['s_income'].notnull()), 's_income'])

from sklearn import ensemble
from sklearn import model_selection
from sklearn.metrics import accuracy_score, make_scorer


# REW:多模型预测，然后再做模型的融合 来预测填充s_edu
def fill_s_edu(miss_sedu_train, miss_sedu_test, column):
    # print(miss_sedu_train.info(max_cols=140))
    miss_sedu_x_trian = miss_sedu_train.drop([f'{column}', 'happiness'], axis=1)
    miss_sedu_y_trian = miss_sedu_train[f'{column}'].astype('int')
    miss_sedu_x_test = miss_sedu_test.drop([f'{column}', 'happiness'], axis=1)
    gbm = ensemble.GradientBoostingClassifier(random_state=10)
    gbm_reg_param_grid = {'n_estimators': [2000], 'max_depth': [4], 'learning_rate': [0.01], 'max_features': [5]}
    gbm_reg_grid = model_selection.GridSearchCV(gbm, gbm_reg_param_grid, cv=10, n_jobs=25, verbose=1,
                                                scoring=make_scorer(accuracy_score))
    gbm_reg_grid.fit(miss_sedu_x_trian, miss_sedu_y_trian)
    print(f'{column} feature Best GB Params:' + str(gbm_reg_grid.best_params_))
    print(f'{column} feature Best GB Score:' + str(gbm_reg_grid.best_score_))
    print(f'GB Train Error for "{column}" Feature Classifier:' + str(
        gbm_reg_grid.score(miss_sedu_x_trian, miss_sedu_y_trian)))
    miss_sedu_test.loc[:, f'{column}_GB'] = gbm_reg_grid.predict(miss_sedu_x_test)
    print(miss_sedu_test[f'{column}_GB'][:4])

    # model 2 rf
    rf_reg = ensemble.RandomForestClassifier()
    rf_reg_param_grid = {'n_estimators': [200], 'max_depth': [5], 'random_state': [0]}
    rf_reg_grid = model_selection.GridSearchCV(rf_reg, rf_reg_param_grid, cv=10, n_jobs=25, verbose=1,
                                               scoring=make_scorer(accuracy_score))
    rf_reg_grid.fit(miss_sedu_x_trian, miss_sedu_y_trian)
    print(f'{column} feature Best RF Params:' + str(rf_reg_grid.best_params_))
    print(f'{column} feature Best RF Score:' + str(rf_reg_grid.best_score_))
    print(f'RF Train Error for "{column}" Feature Classifier' + str(
        rf_reg_grid.score(miss_sedu_x_trian, miss_sedu_y_trian)))
    miss_sedu_test.loc[:, f'{column}_RF'] = rf_reg_grid.predict(miss_sedu_x_test)
    print(miss_sedu_test[f'{column}_RF'][:4])

    # two models merge
    print('shape1', miss_sedu_test[f'{column}'].shape,
          miss_sedu_test[[f'{column}_GB', f'{column}_RF']].mode(axis=1).shape)
    # miss_sedu_x_test['S_edu'] = miss_sedu_x_test[['S_edu_GB', 'S_edu_LR']].mode(axis=1)
    # 模型融合一
    miss_sedu_test.loc[:, f'{column}'] = np.mean([miss_sedu_test[f'{column}_GB'], miss_sedu_test[f'{column}_RF']],
                                                 axis=0)
    print(miss_sedu_test[f'{column}'][:4])

    miss_sedu_test.drop([f'{column}_GB', f'{column}_RF'], axis=1, inplace=True)

    return miss_sedu_test[f'{column}']


tmplist = ['s_edu', 's_income', 's_work_exper', 's_work_status', 's_work_type',
           'work_status', "work_type", 'work_manage']
for i, fivalue in enumerate(tmplist):
    tlist = tmplist[:]
    tlist.remove(fivalue)
    tdata = data.drop(tlist, axis=1)
    missing_c_train = tdata[tdata[f'{fivalue}'].notnull()]
    missing_c_test = tdata[tdata[f'{fivalue}'].isnull()]
    data[f'{fivalue}'][data[f'{fivalue}'].isnull()] = fill_s_edu(missing_c_train, missing_c_test, column=fivalue)
    del tdata, tlist

# # 利用TSNE降维
# from sklearn.decomposition import PCA
# tsne = PCA(n_components=len(data.columns),random_state=32)
# train_y = data.pop('happiness')
# tsne.fit(data)
# train_x = tsne.transform(data)
# test_x = tsne.transform(test)
train_x = data[:8000]
test_x = data[8000:]
train_y = train_x.pop('happiness').values
train_x.pop('id')
ids = test_x.pop('id')


# REW:利用模型来筛选特征 利用不同的模型来对特征进行筛选，选出较为重要的特征
def get_top_n_features(x_train: pd.DataFrame, y_train: pd.Series, top_n_features):
    # randomforest
    rf_est = ensemble.RandomForestClassifier(random_state=0)
    rf_param_grid = {'n_estimators': [500], 'min_samples_split': [2, 3], 'max_depth': [20]}
    rf_grid = model_selection.GridSearchCV(rf_est, rf_param_grid, n_jobs=25, cv=10, verbose=1)
    rf_grid.fit(x_train, y_train)
    print('Top N Features Best RF Params:' + str(rf_grid.best_params_))
    print('Top N Features Best RF Score:' + str(rf_grid.best_score_))
    print('Top N Features RF Train Score:' + str(rf_grid.score(x_train, y_train)))
    feature_imp_sorted_rf = pd.DataFrame({'feature': list(x_train),
                                          'importance': rf_grid.best_estimator_.feature_importances_}).sort_values(
        'importance', ascending=False)
    features_top_n_rf = feature_imp_sorted_rf.head(top_n_features)['feature']
    print('Sample 10 Feeatures from RF Classifier')
    print(str(features_top_n_rf[:10]))

    gb_est = ensemble.GradientBoostingClassifier(random_state=1)
    gb_param_grid = {'n_estimators': [500], 'learning_rate': [0.001, 0.1], 'max_depth': [20]}
    gb_grid = model_selection.GridSearchCV(gb_est, gb_param_grid, n_jobs=25, cv=10, verbose=1)
    gb_est.fit(x_train, y_train)
    print('Top N Features Best GB Params:' + str(gb_grid.best_params_))
    print('Top N Features Best GB Score:' + str(gb_grid.best_score_))
    print('Top N Features GB Train Score:' + str(gb_grid.score(x_train, y_train)))
    feature_imp_sorted_gb = pd.DataFrame({'feature': list(x_train),
                                          'importance': gb_grid.best_estimator_.feature_importances_}).sort_values(
        'importance', ascending=False)
    features_top_n_gb = feature_imp_sorted_gb.head(top_n_features)['feature']
    print('Sample 10 Feeatures from RF Classifier')
    print(str(features_top_n_gb[:10]))

    ada_est = ensemble.AdaBoostClassifier(random_state=2)
    ada_param_grid = {'n_estimators': [500], 'learning_rate': [0.1, 1.], 'max_depth': [20]}
    ada_grid = model_selection.GridSearchCV(ada_est, ada_param_grid, n_jobs=25, cv=10, verbose=1)
    ada_est.fit(x_train, y_train)
    print('Top N Features Best ada Params:' + str(ada_grid.best_params_))
    print('Top N Features Best ada Score:' + str(ada_grid.best_score_))
    print('Top N Features ada Train Score:' + str(ada_grid.score(x_train, y_train)))
    feature_imp_sorted_ada = pd.DataFrame({'feature': list(x_train),
                                           'importance': ada_grid.best_estimator_.feature_importances_}).sort_values(
        'importance', ascending=False)
    features_top_n_ada = feature_imp_sorted_ada.head(top_n_features)['feature']
    print('Sample 10 Feeatures from RF Classifier')
    print(str(features_top_n_ada[:10]))

    et_est = ensemble.ExtraTreesClassifier(random_state=3)
    et_param_grid = {'n_estimators': [500], 'min_sample_split': [3, 6], 'max_depth': [20]}
    et_grid = model_selection.GridSearchCV(et_est, et_param_grid, n_jobs=25, cv=10, verbose=1)
    et_est.fit(x_train, y_train)
    print('Top N Features Best et Params:' + str(et_grid.best_params_))
    print('Top N Features Best et Score:' + str(et_grid.best_score_))
    print('Top N Features et Train Score:' + str(et_grid.score(x_train, y_train)))
    feature_imp_sorted_et = pd.DataFrame({'feature': list(x_train),
                                          'importance': et_grid.best_estimator_.feature_importances_}).sort_values(
        'importance', ascending=False)
    features_top_n_et = feature_imp_sorted_et.head(top_n_features)['feature']
    print('Sample 10 Feeatures from RF Classifier')
    print(str(features_top_n_et[:10]))

    topNfeatures = pd.concat([features_top_n_rf, features_top_n_gb, features_top_n_et, feature_imp_sorted_ada],
                             ignore_index=True).drop_duplicates()  # 有去重
    features_importance = pd.concat([feature_imp_sorted_rf, feature_imp_sorted_ada, feature_imp_sorted_et,
                                     feature_imp_sorted_gb], ignore_index=True)
    return topNfeatures, features_importance


top_n_feature = 70
topNfeatures, features_importance = get_top_n_features(train_x, train_y, top_n_feature)
x_train = train_x[topNfeatures].values
x_test = test_x[topNfeatures].values
nfold = 7
n_train = len(x_train)
n_test = len(x_test)
KF = KFold(n_splits=nfold, random_state=2, shuffle=False)  # 不打乱可以沿用标签


def plotFeature(feature_importance: pd.DataFrame, order=10):
    fi = feature_importance[:10]
    fimportance = 100 * fi['importance'] / fi['importance'].max()  # 得到比例
    findex = np.where(fimportance > 0.4 * 100)[0]
    pos = np.arange(findex.shape[0]) + 0.5

    plt.figure(figsize=(12, 8))
    plt.subplot(111)
    plt.barh(pos, fimportance[findex][::-1])
    plt.yticks(pos, fi['feature'][::-1])
    plt.xlabel('Relative Importance')
    plt.title('Feature Importance')
    plt.savefig('./f.jpg')
    plt.show()


# REW:模型融合 根据stack 用第一层的输出作第二层的输入
# 用交叉验证去执行
def clf_out(clf, trainx, trainy, testx):
    oof_train = np.zeros((n_train,))
    oof_test = np.zeros((n_test,))
    oof_test_skf = np.zeros((nfold, n_test))
    for i, (trainindex, testindex) in enumerate(KF.split(trainx)):
        tx = trainx[trainindex]
        ty = trainy[trainindex]
        te = trainx[testindex]
        clf.fit(tx, ty)
        oof_train[testindex] = clf.predict(te)
        oof_test_skf[i, :] = clf.predict(testx)
    oof_test[:] = oof_test_skf.mean(axis=0)
    return oof_train.reshape(-1, 1), oof_test.reshape(-1, 1)


rf = ensemble.RandomForestClassifier(n_estimators=500, warm_start=True, max_features='sqrt', max_depth=6,
                                     min_samples_split=3,
                                     min_samples_leaf=2, n_jobs=-1, verbose=0)

ada = ensemble.AdaBoostClassifier(n_estimators=500, learning_rate=0.1)

et = ensemble.ExtraTreesClassifier(n_estimators=500, n_jobs=-1, max_depth=8, min_samples_leaf=2, verbose=0)

gb = ensemble.GradientBoostingClassifier(n_estimators=500, learning_rate=0.008, min_samples_split=3, min_samples_leaf=2,
                                         max_depth=5, verbose=0)

dt = ensemble.DecisionTreeClassifier(max_depth=8)

rfxtrain, rfxtest = clf_out(rf, x_train, train_y, x_test)
adaxtrain, adaxtest = clf_out(ada, x_train, train_y, x_test)
etxtrain, etxtest = clf_out(et, x_train, train_y, x_test)
gbxtrain, gbxtest = clf_out(gb, x_train, train_y, x_test)
dtxtrain, dtxtest = clf_out(dt, x_train, train_y, x_test)

tX = np.concatenate([rfxtrain, adaxtrain, etxtrain, gbxtrain, dtxtrain], axis=1)
tE = np.concatenate([rfxtest, adaxtest, etxtest, gbxtest, dtxtest])
import xgboost as xgb

clf = xgb.XGBClassifier(max_depth=10, learning_rate=0.001, n_estimators=200, objective='multi:softmax',
                        colsample_bytree=0.8)

onex, twox, oney, twoy = train_test_split(tX, train_y,
                                          test_size=0.1, random_state=5)

clf.fit(onex, oney)
print(clf.score(twox, twoy))
predictions = clf.predict(tE)
StackingSubmission = pd.DataFrame({'id': ids, 'happiness': predictions})
StackingSubmission.to_csv('happiness_submit.csv', index=False, sep=',')

# print(abbr.head())
# print(data.max(axis=0))
# print(data.min(axis=0))
# print(data[data['property_other'].notnull()]['property_other'].count())
# print(data[data['edu_other'].notnull()][])


# print(abbr.describe())
# print(abbr.info())
# print(len(abbr.isnull()))
