# %% [markdown]
# # 说明
# 特征工程部分照搬社区的baseline未作改动，仅仅进行了模型的部分参数调整和基于stacking的模型融合。
# 另外，由于是输出概率，后续按照回归去做，故删除了不平衡样本的处理，一开始当成分类去做，最高只能到0.8+，按照回归轻松0.9+
# 参数仅做了简单的调整，非最优，线下0.9361018703876826，线上验证0.93536922

# %% [markdown]
# # 查看数据

# %%
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns

plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

# %%
train=pd.read_csv('train_set.csv')
test=pd.read_csv('test_set.csv')

# %%
data = pd.concat([train.drop(['y'],axis=1),test],axis=0).reset_index(drop=True)

# %%
# 对object型数据查看unique
str_features = []
num_features=[]
for col in train.columns:
    if train[col].dtype=='object':
        str_features.append(col)
        print(col,':  ',train[col].unique())
    if train[col].dtype=='int64' and col not in ['ID','y']:
        num_features.append(col)

# %% [markdown]
# # 特征工程

# %%
from scipy.stats import chi2_contingency       # 数值型特征检验，检验特征与标签的关系
from scipy.stats import f_oneway,ttest_ind     # 分类型特征检验，检验特征与标签的关系

# %%
#----------数据集处理--------------#
from sklearn.model_selection import train_test_split        # 划分训练集和验证集
from sklearn.model_selection import KFold,StratifiedKFold   # k折交叉
from imblearn.combine import SMOTETomek,SMOTEENN            # 综合采样
from imblearn.over_sampling import SMOTE                    # 过采样
from imblearn.under_sampling import RandomUnderSampler      # 欠采样

#----------数据处理--------------#
from sklearn.preprocessing import StandardScaler # 标准化
from sklearn.preprocessing import OneHotEncoder  # 热独编码
from sklearn.preprocessing import OrdinalEncoder

# %% [markdown]
# ## 特征处理

# %% [markdown]
# **连续变量即数值化数据做标准化处理**

# %%
# 异常值处理
def outlier_processing(dfx):
    df = dfx.copy()
    q1 = df.quantile(q=0.25)
    q3 = df.quantile(q=0.75)
    iqr = q3 - q1
    Umin = q1 - 1.5*iqr
    Umax = q3 + 1.5*iqr 
    df[df>Umax] = df[df<=Umax].max()
    df[df<Umin] = df[df>=Umin].min()
    return df

# %%
train['age']=outlier_processing(train['age'])
train['day']=outlier_processing(train['day'])
train['duration']=outlier_processing(train['duration'])
train['campaign']=outlier_processing(train['campaign'])


test['age']=outlier_processing(test['age'])
test['day']=outlier_processing(test['day'])
test['duration']=outlier_processing(test['duration'])
test['campaign']=outlier_processing(test['campaign'])

# %%
train[num_features].describe()

# %% [markdown]
# **分类变量做编码处理**

# %%
dummy_train=train.join(pd.get_dummies(train[str_features])).drop(str_features,axis=1).drop(['ID','y'],axis=1)
dummy_test=test.join(pd.get_dummies(test[str_features])).drop(str_features,axis=1).drop(['ID'],axis=1)

# %% [markdown]
# ## 统计检验与特征筛选 
# 
# 
# **连续变量-连续变量  相关分析**
# 
# **连续变量-分类变量  T检验/方差分析**
# 
# **分类变量-分类变量  卡方检验**

# %% [markdown]
# **对类别标签（离散变量）用卡方检验分析重要性**
# 
# 卡方检验认为显著水平大于95%是差异性显著的，这里即看p值是否是p>0.05，若p>0.05，则说明特征不会呈现差异性

# %%
for col in str_features:
    obs=pd.crosstab(train['y'],
                    train[col],
                    rownames=['y'],
                    colnames=[col])
    chi2, p, dof, expect = chi2_contingency(obs)
    print("{} 卡方检验p值: {:.4f}".format(col,p))

# %% [markdown]
# **对连续变量做方差分析进行特征筛选**
# 

# %%
from sklearn.feature_selection import SelectKBest,f_classif

f,p=f_classif(train[num_features],train['y'])
k = f.shape[0] - (p > 0.05).sum()
selector = SelectKBest(f_classif, k=k)
selector.fit(train[num_features],train['y'])

print('scores_:',selector.scores_)
print('pvalues_:',selector.pvalues_)
print('selected index:',selector.get_support(True))

# %%
# 标准化，返回值为标准化后的数据
standardScaler=StandardScaler()
ss=standardScaler.fit(dummy_train.loc[:,num_features])
dummy_train.loc[:,num_features]=ss.transform(dummy_train.loc[:,num_features])
dummy_test.loc[:,num_features]=ss.transform(dummy_test.loc[:,num_features])

# %%
X=dummy_train
y=train['y']

# %% [markdown]
# **因为后续是进行回归而非分类，个人认为没有必要进行不平衡处理，故此部分就注释掉了**

# %%
# X_train,X_valid,y_train,y_valid=train_test_split(X,y,test_size=0.2,random_state=2020)

# %%
# smote_tomek = SMOTETomek(random_state=2020)
# X_resampled, y_resampled = smote_tomek.fit_resample(X, y)

# %% [markdown]
# # 数据建模

# %%
#----------建模工具--------------#
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.model_selection import KFold,RepeatedKFold
import lightgbm as lgb
from sklearn.ensemble import RandomForestRegressor
import xgboost as xgb
from xgboost import XGBRegressor
from sklearn.linear_model import BayesianRidge
from catboost import CatBoostRegressor, Pool
from lightgbm import LGBMRegressor
#----------模型评估工具----------#
from sklearn.metrics import confusion_matrix # 混淆矩阵
from sklearn.metrics import classification_report
from sklearn.metrics import recall_score,f1_score
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import cross_val_score
from sklearn.metrics import roc_curve,auc
from sklearn.metrics import roc_auc_score

# %% [markdown]
# ## 模型建立和参数调整

# %% [markdown]
# ### 基于GridSearchCV的随机森林参数调整

# %%
# 随机森林
# param = {'n_estimators':[1500,1700,2000],
#          'max_features':[7,11,15]
#         }
# gs = GridSearchCV(estimator=RandomForestRegressor(), param_grid=param, cv=3, scoring="neg_mean_squared_error", n_jobs=-1, verbose=10) 
# gs.fit(X_resampled,y_resampled)
# print(gs.best_params_) 


# %% [markdown]
# ### 基于五折交叉验证的随机森林

# %%
n_fold = 5
folds = KFold(n_splits=n_fold, shuffle=True, random_state=2022)
oof_rf = np.zeros(len(X))
prediction_rf = np.zeros(len(dummy_test))
for fold_n, (train_index, valid_index) in enumerate(folds.split(X)):
    X_train, X_valid = X.iloc[train_index], X.iloc[valid_index]
    y_train, y_valid = y[train_index], y[valid_index]
#     smote_tomek = SMOTETomek(random_state=2022)
#     X_resampled, y_resampled = smote_tomek.fit_resample(X_train, y_train)
    model_rf = RandomForestRegressor(max_features=11,min_samples_leaf=1,n_estimators=1700,random_state=2022).fit(X_train,y_train)
    y_pred_valid = model_rf.predict(X_valid)
    y_pred = model_rf.predict(dummy_test)
    oof_rf[valid_index] = y_pred_valid.reshape(-1, )
    prediction_rf += y_pred
prediction_rf /= n_fold 
print(roc_auc_score(y, oof_rf))
#0.929373220326099

# %% [markdown]
# ### 基于GridSearchCV的XGB参数调整

# %%
# param = {'max_depth': [3],
#          'learning_rate': [0.01],
#         'subsample':[0.8],
#         'colsample_bytree':[0.6],
#          'n_estimators': [8000]

#         }
# gs = GridSearchCV(estimator=XGBRegressor(), param_grid=param, cv=3, scoring="neg_mean_squared_error", n_jobs=-1, verbose=10) 
# gs.fit(X,y)
# print(gs.best_params_) 


# %% [markdown]
# ### 基于五折交叉验证的XGB

# %%
from xgboost.callback import EarlyStopping

n_fold = 5
folds = KFold(n_splits=n_fold, shuffle=True, random_state=2022)
oof_xgb = np.zeros(len(X))
prediction_xgb = np.zeros(len(dummy_test))
for fold_n, (train_index, valid_index) in enumerate(folds.split(X)):
    X_train, X_valid = X.iloc[train_index], X.iloc[valid_index]
    y_train, y_valid = y[train_index], y[valid_index]
#     smote_tomek = SMOTETomek(random_state=2022)
#     X_resampled, y_resampled = smote_tomek.fit_resample(X_train, y_train)
    eval_set = [(X_valid, y_valid)]
    model_xgb = XGBRegressor(
        max_depth=9,learning_rate=0.01,n_estimators=10000,colsample_bytree=0.6,subsample=0.8,random_state=2022
    ).fit(X_train,y_train,eval_set=eval_set, verbose=True)
    y_pred_valid = model_xgb.predict(X_valid)
    y_pred = model_xgb.predict(dummy_test)
    oof_xgb[valid_index] = y_pred_valid.reshape(-1, )
    prediction_xgb += y_pred
prediction_xgb /= n_fold 
print(roc_auc_score(y, oof_xgb))
# 0.9326219985474677

# %% [markdown]
# ### 基于GridSearchCV的LGBM参数调整

# %%
# param = {'max_depth': [30],
#          'learning_rate': [0.01],
#          'num_leaves': [59],
#          'subsample': [0.7],
#          'colsample_bytree': [0.8],
#          'n_estimators': [10000]}
# gs = GridSearchCV(estimator=LGBMRegressor(), param_grid=param, cv=5, scoring="neg_mean_squared_error", n_jobs=-1) 
# gs.fit(X_resampled,y_resampled)
# print(gs.best_params_) 


# %% [markdown]
# ### 基于五折交叉验证的LGBM

# %%
n_fold = 5
folds = KFold(n_splits=n_fold, shuffle=True,random_state=1314)
params = {
    'learning_rate':0.01,
    'subsample': 0.7,
    'num_leaves': 59,
    'n_estimators':1500,
    'max_depth': 30,
    'colsample_bytree': 0.8,
    'verbose': -1,
    'seed': 2022,
    'n_jobs': -1
}

oof_lgb = np.zeros(len(X))
prediction_lgb  = np.zeros(len(dummy_test))
for fold_n, (train_index, valid_index) in enumerate(folds.split(X)):
    X_train, X_valid = X.iloc[train_index], X.iloc[valid_index]
    y_train, y_valid = y[train_index], y[valid_index]
#     smote_tomek = SMOTETomek(random_state=2022)
#     X_resampled, y_resampled = smote_tomek.fit_resample(X_train, y_train)
    model = lgb.LGBMRegressor(**params)
    model.fit(X_train, y_train,
              eval_set=[(X_train, y_train), (X_valid, y_valid)],
              eval_metric='auc')
    y_pred_valid = model.predict(X_valid)
    y_pred = model.predict(dummy_test, num_iteration=model.best_iteration_)
    oof_lgb[valid_index] = y_pred_valid.reshape(-1, )
    prediction_lgb  += y_pred
prediction_lgb  /= n_fold
print(roc_auc_score(y, oof_lgb))
# 0.9342991211145983

# %% [markdown]
# ### 基于GridSearchCV的catboost参数调整

# %%

# param = {'depth': [7,9,11],
#          'learning_rate': [0.01],
#          'iterations':  [8000]}
# gs = GridSearchCV(estimator=CatBoostRegressor(), param_grid=param, cv=3, scoring="neg_mean_squared_error", n_jobs=-1) 
# gs.fit(X_resampled,y_resampled)
# print(gs.best_params_) 

# %% [markdown]
# ### 基于五折交叉验证的catboost

# %%
# 本地交叉验证
n_fold = 5
folds = KFold(n_splits=n_fold, shuffle=True, random_state=1314)

oof_cat = np.zeros(len(X))
prediction_cat = np.zeros(len(dummy_test))
for fold_n, (train_index, valid_index) in enumerate(folds.split(X)):
    X_train, X_valid = X.iloc[train_index], X.iloc[valid_index]
    y_train, y_valid = y[train_index], y[valid_index]
#     smote_tomek = SMOTETomek(random_state=2022)
#     X_resampled, y_resampled = smote_tomek.fit_resample(X_train, y_train)
    train_pool = Pool(X_train, y_train)
    eval_pool = Pool(X_valid, y_valid)
    cbt_model = CatBoostRegressor(iterations=30000, # 增加迭代次数以提高性能
                           learning_rate=0.02, # 提高学习率加快收敛
                           depth=10, # 增加树深度提高模型复杂度
                           l2_leaf_reg=3.0, # 添加L2正则化防止过拟合
                           rsm=0.8, # 特征采样比例
                           use_best_model=True,
                           random_seed=2022,
                           logging_level='Verbose',
                           #task_type='GPU',
                           devices='0',
                           gpu_ram_part=0.5,
                           early_stopping_rounds=500) # 增加早停轮数
    
    cbt_model.fit(train_pool,
              eval_set=eval_pool,
              verbose=1000)

    y_pred_valid = cbt_model.predict(X_valid)
    y_pred_c = cbt_model.predict(dummy_test)
    oof_cat[valid_index] = y_pred_valid.reshape(-1, )
    prediction_cat += y_pred_c
prediction_cat /= n_fold 
print(roc_auc_score(y, oof_cat))
# 应该能达到 0.937+


# %%
print(roc_auc_score(y, oof_cat))
# 0.935264298588153

# %% [markdown]
# ### 基于stacking的模型融合

# %%
# from sklearn.linear_model import Bayesian
from sklearn.metrics import mean_squared_error,mean_absolute_error,make_scorer

# 将多个模型的结果进行stacking（叠加）
train_stack = np.vstack([oof_rf,oof_lgb,oof_cat,oof_xgb]).transpose()
test_stack = np.vstack([prediction_rf,prediction_lgb,prediction_cat,prediction_xgb]).transpose()
#贝叶斯分类器也使用交叉验证的方法，5折，重复2次
folds_stack = RepeatedKFold(n_splits=5, n_repeats=2, random_state=2018)
oof_stack = np.zeros(train_stack.shape[0])
predictions = np.zeros(test_stack.shape[0])
 
for fold_, (trn_idx, val_idx) in enumerate(folds_stack.split(train_stack,y)):
    print("fold {}".format(fold_))
    trn_data, trn_y = train_stack[trn_idx], y.iloc[trn_idx].values
    val_data, val_y = train_stack[val_idx], y.iloc[val_idx].values#
    
    clf_3 = BayesianRidge()
    clf_3.fit(trn_data, trn_y)
    
    oof_stack[val_idx] = clf_3.predict(val_data)#对验证集有一个预测，用于后面计算模型的偏差
    predictions += clf_3.predict(test_stack) / 10#对测试集的预测，除以10是因为5折交叉验证重复了2次
    
mean_squared_error(y.values, oof_stack)#计算出模型在训练集上的均方误差
print("CV score: {:<8.8f}".format(mean_squared_error(y.values, oof_stack)))

# %%
print(roc_auc_score(y, oof_stack))
# # 0.9361018703876826

# %% [markdown]
# # 保存结果

# %%
test['pred'] = predictions
test[['ID', 'pred']].to_csv('sub.csv', index=None, encoding="utf-8")


