# %% [markdown]
# # 优化版客户购买预测
# 采用更精细的超参数调优、增强特征工程和改进的模型融合策略

# %% [markdown]
# # 查看数据

# %%
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import chi2_contingency
from scipy.stats import f_oneway,ttest_ind
from sklearn.model_selection import train_test_split, KFold, StratifiedKFold, GridSearchCV
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import BayesianRidge
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
from catboost import CatBoostRegressor, Pool
from sklearn.metrics import roc_auc_score
from sklearn.feature_selection import SelectKBest, f_classif
from imblearn.combine import SMOTETomek
import warnings
warnings.filterwarnings('ignore')

plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

# %% 
train=pd.read_csv('train_set.csv')
test=pd.read_csv('test_set.csv')

# %% 
data = pd.concat([train.drop(['y'],axis=1),test],axis=0).reset_index(drop=True)

# %% 
# 对object型数据查看unique
str_features = []
num_features=[]
for col in train.columns:
    if train[col].dtype=='object':
        str_features.append(col)
        print(col,':  ',train[col].unique())
    if train[col].dtype=='int64' and col not in ['ID','y']:
        num_features.append(col)

# %% [markdown]
# # 特征工程

# %% 
# 异常值处理
def outlier_processing(dfx):
    df = dfx.copy()
    q1 = df.quantile(q=0.25)
    q3 = df.quantile(q=0.75)
    iqr = q3 - q1
    Umin = q1 - 1.5*iqr
    Umax = q3 + 1.5*iqr 
    df[df>Umax] = df[df<=Umax].max()
    df[df<Umin] = df[df>=Umin].min()
    return df

# %% 
train['age']=outlier_processing(train['age'])
train['day']=outlier_processing(train['day'])
train['duration']=outlier_processing(train['duration'])
train['campaign']=outlier_processing(train['campaign'])

test['age']=outlier_processing(test['age'])
test['day']=outlier_processing(test['day'])
test['duration']=outlier_processing(test['duration'])
test['campaign']=outlier_processing(test['campaign'])

# %% 
# 创建新特征
train['contact_month'] = train['contact'] + '_' + train['month']
test['contact_month'] = test['contact'] + '_' + test['month']

# %% 
# 分类变量编码
dummy_train=train.join(pd.get_dummies(train[str_features + ['contact_month']])).drop(str_features + ['contact_month'],axis=1).drop(['ID','y'],axis=1)
dummy_test=test.join(pd.get_dummies(test[str_features + ['contact_month']])).drop(str_features + ['contact_month'],axis=1).drop(['ID'],axis=1)

# %% 
# 特征选择
f,p=f_classif(train[num_features],train['y'])
k = f.shape[0] - (p > 0.05).sum()
selector = SelectKBest(f_classif, k=k)
selector.fit(train[num_features],train['y'])

print('scores_:',selector.scores_)
print('pvalues_:',selector.pvalues_)
print('selected index:',selector.get_support(True))

# %% 
# 标准化
standardScaler=StandardScaler()
ss=standardScaler.fit(dummy_train.loc[:,num_features])
dummy_train.loc[:,num_features]=ss.transform(dummy_train.loc[:,num_features])
dummy_test.loc[:,num_features]=ss.transform(dummy_test.loc[:,num_features])

# %% 
X=dummy_train
y=train['y']

# %% [markdown]
# # 模型优化

# %% 
# 随机森林优化
n_fold = 5
folds = KFold(n_splits=n_fold, shuffle=True, random_state=2022)
oof_rf = np.zeros(len(X))
prediction_rf = np.zeros(len(dummy_test))

# 优化后的随机森林参数
rf_params = {
    'n_estimators': 2000,
    'max_features': 10,
    'min_samples_leaf': 1,
    'max_depth': 15,
    'random_state': 2022
}

for fold_n, (train_index, valid_index) in enumerate(folds.split(X)):
    X_train, X_valid = X.iloc[train_index], X.iloc[valid_index]
    y_train, y_valid = y[train_index], y[valid_index]
    
    model_rf = RandomForestRegressor(**rf_params).fit(X_train,y_train)
    y_pred_valid = model_rf.predict(X_valid)
    y_pred = model_rf.predict(dummy_test)
    oof_rf[valid_index] = y_pred_valid.reshape(-1, )
    prediction_rf += y_pred
    
prediction_rf /= n_fold 
print("随机森林 ROC AUC:", roc_auc_score(y, oof_rf))

# %% 
# XGBoost优化
n_fold = 5
folds = KFold(n_splits=n_fold, shuffle=True, random_state=2022)
oof_xgb = np.zeros(len(X))
prediction_xgb = np.zeros(len(dummy_test))

# 优化后的XGBoost参数
xgb_params = {
    'max_depth': 6,
    'learning_rate': 0.01,
    'n_estimators': 10000,
    'colsample_bytree': 0.8,
    'subsample': 0.8,
    'reg_alpha': 0.01,
    'reg_lambda': 0.01,
    'random_state': 2022
}

for fold_n, (train_index, valid_index) in enumerate(folds.split(X)):
    X_train, X_valid = X.iloc[train_index], X.iloc[valid_index]
    y_train, y_valid = y[train_index], y[valid_index]
    
    model_xgb = XGBRegressor(**xgb_params).fit(X_train,y_train, 
                                               eval_set=[(X_valid, y_valid)], 
                                               verbose=False)
    y_pred_valid = model_xgb.predict(X_valid)
    y_pred = model_xgb.predict(dummy_test)
    oof_xgb[valid_index] = y_pred_valid.reshape(-1, )
    prediction_xgb += y_pred
    
prediction_xgb /= n_fold 
print("XGBoost ROC AUC:", roc_auc_score(y, oof_xgb))

# %% 
# LightGBM优化
n_fold = 5
folds = KFold(n_splits=n_fold, shuffle=True, random_state=2022)
oof_lgb = np.zeros(len(X))
prediction_lgb = np.zeros(len(dummy_test))

# 优化后的LightGBM参数
lgb_params = {
    'learning_rate': 0.01,
    'subsample': 0.8,
    'num_leaves': 63,
    'n_estimators': 2000,
    'max_depth': 10,
    'colsample_bytree': 0.8,
    'reg_alpha': 0.01,
    'reg_lambda': 0.01,
    'random_state': 2022,
    'verbose': -1
}

for fold_n, (train_index, valid_index) in enumerate(folds.split(X)):
    X_train, X_valid = X.iloc[train_index], X.iloc[valid_index]
    y_train, y_valid = y[train_index], y[valid_index]
    
    model = LGBMRegressor(**lgb_params)
    model.fit(X_train, y_train,
              eval_set=[(X_train, y_train), (X_valid, y_valid)],
              eval_metric='auc',
              verbose=False)
    y_pred_valid = model.predict(X_valid)
    y_pred = model.predict(dummy_test)
    oof_lgb[valid_index] = y_pred_valid.reshape(-1, )
    prediction_lgb += y_pred
    
prediction_lgb /= n_fold 
print("LightGBM ROC AUC:", roc_auc_score(y, oof_lgb))

# %% 
# CatBoost优化
n_fold = 5
folds = KFold(n_splits=n_fold, shuffle=True, random_state=2022)
oof_cat = np.zeros(len(X))
prediction_cat = np.zeros(len(dummy_test))

# 优化后的CatBoost参数
cat_params = {
    'iterations': 10000,
    'learning_rate': 0.02,
    'depth': 10,
    'l2_leaf_reg': 3.0,
    'rsm': 0.8,
    'use_best_model': True,
    'random_seed': 2022,
    'logging_level': 'Silent',
    'early_stopping_rounds': 500
}

for fold_n, (train_index, valid_index) in enumerate(folds.split(X)):
    X_train, X_valid = X.iloc[train_index], X.iloc[valid_index]
    y_train, y_valid = y[train_index], y[valid_index]
    
    train_pool = Pool(X_train, y_train)
    eval_pool = Pool(X_valid, y_valid)
    
    cbt_model = CatBoostRegressor(**cat_params)
    cbt_model.fit(train_pool,
                  eval_set=eval_pool,
                  verbose=False)
    
    y_pred_valid = cbt_model.predict(X_valid)
    y_pred_c = cbt_model.predict(dummy_test)
    oof_cat[valid_index] = y_pred_valid.reshape(-1, )
    prediction_cat += y_pred_c
    
prediction_cat /= n_fold 
print("CatBoost ROC AUC:", roc_auc_score(y, oof_cat))

# %% 
# 模型融合 - 改进的融合策略
from sklearn.linear_model import Ridge
from sklearn.ensemble import GradientBoostingRegressor

# 使用加权平均和更复杂的融合方法
train_stack = np.vstack([oof_rf,oof_lgb,oof_cat,oof_xgb]).transpose()
test_stack = np.vstack([prediction_rf,prediction_lgb,prediction_cat,prediction_xgb]).transpose()

# 使用GradientBoostingRegressor作为元学习器
meta_model = GradientBoostingRegressor(
    n_estimators=100,
    learning_rate=0.1,
    max_depth=3,
    random_state=2022
)

# 5折交叉验证进行stacking
folds_stack = KFold(n_splits=5, shuffle=True, random_state=2018)
oof_stack = np.zeros(train_stack.shape[0])
predictions = np.zeros(test_stack.shape[0])

for fold_, (trn_idx, val_idx) in enumerate(folds_stack.split(train_stack,y)):
    trn_data, trn_y = train_stack[trn_idx], y.iloc[trn_idx].values
    val_data, val_y = train_stack[val_idx], y.iloc[val_idx].values
    
    meta_model.fit(trn_data, trn_y)
    oof_stack[val_idx] = meta_model.predict(val_data)
    predictions += meta_model.predict(test_stack) / 5

print("Stacking ROC AUC:", roc_auc_score(y, oof_stack))

# %% 
# 保存结果
test['pred'] = predictions
test[['ID', 'pred']].to_csv('sub_optimized.csv', index=None, encoding="utf-8")

print("优化版模型完成，结果已保存到 sub_optimized.csv")