#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
简化但高效的客户购买预测模型 - Jupyter分段运行版本
目标：超越Baseline的0.936 AUC分数
"""

# ==================== 第1段：导入库和初始化 ====================
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings('ignore')

from sklearn.model_selection import KFold, StratifiedKFold, RepeatedKFold
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.metrics import roc_auc_score
from sklearn.linear_model import BayesianRidge
import lightgbm as lgb
import xgboost as xgb
from catboost import CatBoostRegressor, Pool

print("所有库导入完成！")

# ==================== 第2段：数据加载 ====================
def load_data():
    """加载数据"""
    train = pd.read_csv('train_set.csv')
    test = pd.read_csv('test_set.csv')
    print(f"训练集大小: {train.shape}")
    print(f"测试集大小: {test.shape}")
    return train, test

# 执行数据加载
train, test = load_data()

# ==================== 第3段：基础特征分析 ====================
def analyze_features(train):
    """分析特征类型"""
    str_features = []
    num_features = []
    
    for col in train.columns:
        if train[col].dtype == 'object':
            str_features.append(col)
        elif train[col].dtype in ['int64', 'float64'] and col not in ['ID', 'y']:
            num_features.append(col)
    
    print(f"数值特征: {num_features}")
    print(f"分类特征: {str_features}")
    return str_features, num_features

# 执行特征分析
str_features, num_features = analyze_features(train)

# ==================== 第4段：衍生特征创建 ====================
def create_derived_features(train, test):
    """创建衍生特征"""
    print("开始创建衍生特征...")
    
    # 年龄相关
    train['age_log'] = np.log1p(train['age'])
    test['age_log'] = np.log1p(test['age'])
    
    # 余额相关
    train['balance_abs'] = train['balance'].abs()
    test['balance_abs'] = test['balance'].abs()
    
    train['balance_positive'] = (train['balance'] > 0).astype(int)
    test['balance_positive'] = (test['balance'] > 0).astype(int)
    
    # 通话时长相关
    train['duration_log'] = np.log1p(train['duration'])
    test['duration_log'] = np.log1p(test['duration'])
    
    # 关键交互特征
    train['age_duration'] = train['age'] * train['duration']
    test['age_duration'] = test['age'] * test['duration']
    
    train['balance_duration'] = train['balance_abs'] * train['duration']
    test['balance_duration'] = test['balance_abs'] * test['duration']
    
    train['duration_per_campaign'] = train['duration'] / (train['campaign'] + 1)
    test['duration_per_campaign'] = test['duration'] / (test['campaign'] + 1)
    
    print("衍生特征创建完成！")
    return train, test

# 执行衍生特征创建
train, test = create_derived_features(train, test)

# ==================== 第5段：目标编码 ====================
def create_target_encoding(train, test, str_features):
    """创建目标编码特征"""
    print("开始目标编码...")
    
    kf = KFold(n_splits=5, shuffle=True, random_state=2022)
    
    for col in str_features:
        target_encoded = np.zeros(len(train))
        
        for train_idx, val_idx in kf.split(train):
            train_fold = train.iloc[train_idx]
            val_fold = train.iloc[val_idx]
            
            target_mean = train_fold.groupby(col)['y'].mean()
            global_mean = train_fold['y'].mean()
            
            target_encoded[val_idx] = val_fold[col].map(target_mean).fillna(global_mean)
        
        train[f'{col}_target'] = target_encoded
        
        # 对测试集进行编码
        target_mean_full = train.groupby(col)['y'].mean()
        global_mean_full = train['y'].mean()
        test[f'{col}_target'] = test[col].map(target_mean_full).fillna(global_mean_full)
    
    print("目标编码完成！")
    return train, test

# 执行目标编码
train, test = create_target_encoding(train, test, str_features)

# ==================== 第6段：独热编码 ====================
def create_onehot_encoding(train, test):
    """创建重要的独热编码特征"""
    print("开始独热编码...")
    
    important_cats = ['job', 'marital', 'education', 'contact', 'month', 'poutcome']
    onehot_features = []
    
    for col in important_cats:
        if col in train.columns:
            dummies_train = pd.get_dummies(train[col], prefix=col)
            dummies_test = pd.get_dummies(test[col], prefix=col)
            
            # 确保训练集和测试集有相同的列
            all_columns = set(dummies_train.columns) | set(dummies_test.columns)
            for dummy_col in all_columns:
                if dummy_col not in dummies_train.columns:
                    dummies_train[dummy_col] = 0
                if dummy_col not in dummies_test.columns:
                    dummies_test[dummy_col] = 0
            
            # 只保留重要的类别
            for dummy_col in sorted(all_columns):
                if dummy_col in dummies_train.columns and dummy_col in dummies_test.columns:
                    train[dummy_col] = dummies_train[dummy_col]
                    test[dummy_col] = dummies_test[dummy_col]
                    onehot_features.append(dummy_col)
    
    print(f"独热编码完成！新增特征数: {len(onehot_features)}")
    return train, test, onehot_features

# 执行独热编码
train, test, onehot_features = create_onehot_encoding(train, test)

# ==================== 第7段：特征准备和标准化 ====================
def prepare_final_features(train, test, num_features, str_features, onehot_features):
    """准备最终特征并标准化"""
    print("准备最终特征...")
    
    # 准备最终特征列表
    feature_cols = num_features + ['age_log', 'balance_abs', 'balance_positive', 'duration_log',
                                  'age_duration', 'balance_duration', 'duration_per_campaign']
    
    # 添加目标编码特征
    for col in str_features:
        feature_cols.append(f'{col}_target')
    
    # 添加独热编码特征
    feature_cols.extend(onehot_features)
    
    # 准备数据
    X = train[feature_cols].copy()
    X_test = test[feature_cols].copy()
    y = train['y']
    
    # 标准化
    scaler = StandardScaler()
    X_scaled = pd.DataFrame(scaler.fit_transform(X), columns=X.columns, index=X.index)
    X_test_scaled = pd.DataFrame(scaler.transform(X_test), columns=X_test.columns, index=X_test.index)
    
    print(f"最终特征数量: {X_scaled.shape[1]}")
    return X_scaled, y, X_test_scaled

# 执行特征准备
X, y, X_test = prepare_final_features(train, test, num_features, str_features, onehot_features)

print("特征工程阶段完成！可以开始模型训练...")

# ==================== 第8段：LightGBM模型训练 ====================
def train_lightgbm_model(X, y, X_test):
    """训练LightGBM模型"""
    print("训练高性能LightGBM...")
    
    # 使用更多折数
    n_fold = 20
    folds = StratifiedKFold(n_splits=n_fold, shuffle=True, random_state=2022)
    
    # 将回归问题转换为分层抽样
    y_binned = pd.cut(y, bins=5, labels=False)
    
    oof_lgb = np.zeros(len(X))
    prediction_lgb = np.zeros(len(X_test))
    
    lgb_params = {
        'objective': 'regression',
        'metric': 'auc',
        'boosting_type': 'gbdt',
        'num_leaves': 31,
        'learning_rate': 0.005,  # 更小的学习率
        'feature_fraction': 0.8,
        'bagging_fraction': 0.8,
        'bagging_freq': 5,
        'min_child_samples': 20,
        'max_depth': -1,
        'reg_alpha': 0.1,
        'reg_lambda': 0.1,
        'verbose': -1,
        'random_state': 2022,
        'n_estimators': 20000  # 更多树
    }
    
    for fold_n, (train_index, valid_index) in enumerate(folds.split(X, y_binned)):
        X_train, X_valid = X.iloc[train_index], X.iloc[valid_index]
        y_train, y_valid = y.iloc[train_index], y.iloc[valid_index]
        
        model = lgb.LGBMRegressor(**lgb_params)
        model.fit(X_train, y_train,
                 eval_set=[(X_valid, y_valid)],
                 eval_metric='auc',
                 callbacks=[lgb.early_stopping(1000), lgb.log_evaluation(0)])
        
        y_pred_valid = model.predict(X_valid)
        y_pred = model.predict(X_test)
        
        oof_lgb[valid_index] = y_pred_valid
        prediction_lgb += y_pred
    
    prediction_lgb /= n_fold
    lgb_auc = roc_auc_score(y, oof_lgb)
    print(f"LightGBM AUC: {lgb_auc:.6f}")
    
    return oof_lgb, prediction_lgb, lgb_auc

# 执行LightGBM训练（可选择性运行）
# oof_lgb, pred_lgb, lgb_auc = train_lightgbm_model(X, y, X_test)

# ==================== 第9段：XGBoost模型训练 ====================
def train_xgboost_model(X, y, X_test):
    """训练XGBoost模型"""
    print("训练高性能XGBoost...")
    
    n_fold = 20
    folds = StratifiedKFold(n_splits=n_fold, shuffle=True, random_state=2022)
    y_binned = pd.cut(y, bins=5, labels=False)
    
    oof_xgb = np.zeros(len(X))
    prediction_xgb = np.zeros(len(X_test))
    
    for fold_n, (train_index, valid_index) in enumerate(folds.split(X, y_binned)):
        X_train, X_valid = X.iloc[train_index], X.iloc[valid_index]
        y_train, y_valid = y.iloc[train_index], y.iloc[valid_index]
        
        model = xgb.XGBRegressor(
            max_depth=6,
            learning_rate=0.005,  # 更小的学习率
            n_estimators=20000,   # 更多树
            colsample_bytree=0.8,
            subsample=0.8,
            reg_alpha=0.1,
            reg_lambda=0.1,
            random_state=2022,
            eval_metric='auc',
            early_stopping_rounds=1000
        )
        
        model.fit(X_train, y_train,
                 eval_set=[(X_valid, y_valid)],
                 verbose=False)
        
        y_pred_valid = model.predict(X_valid)
        y_pred = model.predict(X_test)
        
        oof_xgb[valid_index] = y_pred_valid
        prediction_xgb += y_pred
    
    prediction_xgb /= n_fold
    xgb_auc = roc_auc_score(y, oof_xgb)
    print(f"XGBoost AUC: {xgb_auc:.6f}")
    
    return oof_xgb, prediction_xgb, xgb_auc

# 执行XGBoost训练（可选择性运行）
# oof_xgb, pred_xgb, xgb_auc = train_xgboost_model(X, y, X_test)

# ==================== 第10段：CatBoost模型训练 ====================
def train_catboost_model(X, y, X_test):
    """训练CatBoost模型"""
    print("训练高性能CatBoost...")
    
    n_fold = 20
    folds = StratifiedKFold(n_splits=n_fold, shuffle=True, random_state=2022)
    y_binned = pd.cut(y, bins=5, labels=False)
    
    oof_cat = np.zeros(len(X))
    prediction_cat = np.zeros(len(X_test))
    
    for fold_n, (train_index, valid_index) in enumerate(folds.split(X, y_binned)):
        X_train, X_valid = X.iloc[train_index], X.iloc[valid_index]
        y_train, y_valid = y.iloc[train_index], y.iloc[valid_index]
        
        train_pool = Pool(X_train, y_train)
        eval_pool = Pool(X_valid, y_valid)
        
        model = CatBoostRegressor(
            iterations=20000,     # 更多迭代
            learning_rate=0.005,  # 更小的学习率
            depth=6,
            l2_leaf_reg=3.0,
            rsm=0.8,
            bagging_temperature=0.2,
            random_strength=0.2,
            use_best_model=True,
            random_seed=2022,
            logging_level='Silent',
            early_stopping_rounds=1000
        )
        
        model.fit(train_pool, eval_set=eval_pool, verbose=False)
        
        y_pred_valid = model.predict(X_valid)
        y_pred = model.predict(X_test)
        
        oof_cat[valid_index] = y_pred_valid
        prediction_cat += y_pred
    
    prediction_cat /= n_fold
    cat_auc = roc_auc_score(y, oof_cat)
    print(f"CatBoost AUC: {cat_auc:.6f}")
    
    return oof_cat, prediction_cat, cat_auc

# 执行CatBoost训练（可选择性运行）
# oof_cat, pred_cat, cat_auc = train_catboost_model(X, y, X_test)

# ==================== 第11段：模型融合 ====================
def advanced_ensemble(oof_predictions, test_predictions, y):
    """高级模型融合"""
    print("开始高级模型融合...")
    
    # 准备stacking数据
    train_stack = np.column_stack([oof_predictions[model] for model in oof_predictions.keys()])
    test_stack = np.column_stack([test_predictions[model] for model in test_predictions.keys()])
    
    # 使用重复交叉验证
    folds_stack = RepeatedKFold(n_splits=20, n_repeats=10, random_state=2022)
    
    # 使用BayesianRidge作为元学习器
    meta_model = BayesianRidge()
    
    oof_stack = np.zeros(train_stack.shape[0])
    predictions_stack = np.zeros(test_stack.shape[0])
    
    for fold_, (trn_idx, val_idx) in enumerate(folds_stack.split(train_stack, y)):
        trn_data, trn_y = train_stack[trn_idx], y.iloc[trn_idx].values
        val_data, val_y = train_stack[val_idx], y.iloc[val_idx].values
        
        meta_model.fit(trn_data, trn_y)
        oof_stack[val_idx] = meta_model.predict(val_data)
        predictions_stack += meta_model.predict(test_stack) / folds_stack.get_n_splits()
    
    final_auc = roc_auc_score(y, oof_stack)
    print(f"最终融合模型 AUC: {final_auc:.6f}")
    
    return predictions_stack, final_auc

# ==================== 第12段：完整流程示例 ====================
"""
完整运行示例：

# 1. 训练所有模型
oof_lgb, pred_lgb, lgb_auc = train_lightgbm_model(X, y, X_test)
oof_xgb, pred_xgb, xgb_auc = train_xgboost_model(X, y, X_test)
oof_cat, pred_cat, cat_auc = train_catboost_model(X, y, X_test)

# 2. 准备融合数据
oof_predictions = {
    'lgb': oof_lgb,
    'xgb': oof_xgb,
    'cat': oof_cat
}

test_predictions = {
    'lgb': pred_lgb,
    'xgb': pred_xgb,
    'cat': pred_cat
}

# 3. 模型融合
final_predictions, final_auc = advanced_ensemble(oof_predictions, test_predictions, y)

# 4. 保存结果
submission = pd.DataFrame({
    'ID': test['ID'],
    'pred': final_predictions
})
submission.to_csv('high_performance_submission.csv', index=False)

print(f"最终AUC分数: {final_auc:.6f}")
print("结果已保存到 high_performance_submission.csv")
"""

# 1. 训练所有模型
oof_lgb, pred_lgb, lgb_auc = train_lightgbm_model(X, y, X_test)
oof_xgb, pred_xgb, xgb_auc = train_xgboost_model(X, y, X_test)
oof_cat, pred_cat, cat_auc = train_catboost_model(X, y, X_test)

# 2. 准备融合数据
oof_predictions = {
    'lgb': oof_lgb,
    'xgb': oof_xgb,
    'cat': oof_cat
}

test_predictions = {
    'lgb': pred_lgb,
    'xgb': pred_xgb,
    'cat': pred_cat
}

# 3. 模型融合
final_predictions, final_auc = advanced_ensemble(oof_predictions, test_predictions, y)

# 4. 保存结果
submission = pd.DataFrame({
    'ID': test['ID'],
    'pred': final_predictions
})
submission.to_csv('high_performance_submission.csv', index=False)

print(f"最终AUC分数: {final_auc:.6f}")
print("结果已保存到 high_performance_submission.csv")