#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
简化但高效的客户购买预测模型
目标：超越Baseline的0.936 AUC分数
"""

import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings('ignore')

from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.metrics import roc_auc_score
from sklearn.linear_model import BayesianRidge
import lightgbm as lgb
import xgboost as xgb
from catboost import CatBoostRegressor, Pool

class HighPerformanceCustomerPrediction:
    def __init__(self):
        self.models = {}
        
    def load_data(self):
        """加载数据"""
        self.train = pd.read_csv('train_set.csv')
        self.test = pd.read_csv('test_set.csv')
        print(f"训练集大小: {self.train.shape}")
        print(f"测试集大小: {self.test.shape}")
        
    def enhanced_feature_engineering(self):
        """增强特征工程"""
        print("开始增强特征工程...")
        
        # 分离数值和分类特征
        str_features = []
        num_features = []
        
        for col in self.train.columns:
            if self.train[col].dtype == 'object':
                str_features.append(col)
            elif self.train[col].dtype in ['int64', 'float64'] and col not in ['ID', 'y']:
                num_features.append(col)
        
        # 1. 保持原始特征不变（不做异常值处理）
        
        # 2. 创建关键衍生特征
        # 年龄相关
        self.train['age_log'] = np.log1p(self.train['age'])
        self.test['age_log'] = np.log1p(self.test['age'])
        
        # 余额相关
        self.train['balance_abs'] = self.train['balance'].abs()
        self.test['balance_abs'] = self.test['balance'].abs()
        
        self.train['balance_positive'] = (self.train['balance'] > 0).astype(int)
        self.test['balance_positive'] = (self.test['balance'] > 0).astype(int)
        
        # 通话时长相关
        self.train['duration_log'] = np.log1p(self.train['duration'])
        self.test['duration_log'] = np.log1p(self.test['duration'])
        
        # 关键交互特征
        self.train['age_duration'] = self.train['age'] * self.train['duration']
        self.test['age_duration'] = self.test['age'] * self.test['duration']
        
        self.train['balance_duration'] = self.train['balance_abs'] * self.train['duration']
        self.test['balance_duration'] = self.test['balance_abs'] * self.test['duration']
        
        self.train['duration_per_campaign'] = self.train['duration'] / (self.train['campaign'] + 1)
        self.test['duration_per_campaign'] = self.test['duration'] / (self.test['campaign'] + 1)
        
        # 3. 目标编码（Target Encoding）
        from sklearn.model_selection import KFold
        kf = KFold(n_splits=5, shuffle=True, random_state=2022)
        
        for col in str_features:
            target_encoded = np.zeros(len(self.train))
            
            for train_idx, val_idx in kf.split(self.train):
                train_fold = self.train.iloc[train_idx]
                val_fold = self.train.iloc[val_idx]
                
                target_mean = train_fold.groupby(col)['y'].mean()
                global_mean = train_fold['y'].mean()
                
                target_encoded[val_idx] = val_fold[col].map(target_mean).fillna(global_mean)
            
            self.train[f'{col}_target'] = target_encoded
            
            # 对测试集进行编码
            target_mean_full = self.train.groupby(col)['y'].mean()
            global_mean_full = self.train['y'].mean()
            self.test[f'{col}_target'] = self.test[col].map(target_mean_full).fillna(global_mean_full)
        
        # 4. 准备最终特征
        feature_cols = num_features + ['age_log', 'balance_abs', 'balance_positive', 'duration_log',
                                      'age_duration', 'balance_duration', 'duration_per_campaign']
        
        # 添加目标编码特征
        for col in str_features:
            feature_cols.append(f'{col}_target')
        
        # 添加重要的独热编码特征
        important_cats = ['job', 'marital', 'education', 'contact', 'month', 'poutcome']
        for col in important_cats:
            if col in self.train.columns:
                dummies_train = pd.get_dummies(self.train[col], prefix=col)
                dummies_test = pd.get_dummies(self.test[col], prefix=col)
                
                # 确保训练集和测试集有相同的列
                all_columns = set(dummies_train.columns) | set(dummies_test.columns)
                for dummy_col in all_columns:
                    if dummy_col not in dummies_train.columns:
                        dummies_train[dummy_col] = 0
                    if dummy_col not in dummies_test.columns:
                        dummies_test[dummy_col] = 0
                
                # 只保留重要的类别
                for dummy_col in sorted(all_columns):
                    if dummy_col in dummies_train.columns and dummy_col in dummies_test.columns:
                        self.train[dummy_col] = dummies_train[dummy_col]
                        self.test[dummy_col] = dummies_test[dummy_col]
                        feature_cols.append(dummy_col)
        
        # 5. 准备最终数据
        X = self.train[feature_cols].copy()
        X_test = self.test[feature_cols].copy()
        
        # 标准化
        scaler = StandardScaler()
        X_scaled = pd.DataFrame(scaler.fit_transform(X), columns=X.columns, index=X.index)
        X_test_scaled = pd.DataFrame(scaler.transform(X_test), columns=X_test.columns, index=X_test.index)
        
        self.X = X_scaled
        self.y = self.train['y']
        self.X_test = X_test_scaled
        
        print(f"最终特征数量: {self.X.shape[1]}")
        return self.X, self.y, self.X_test
    
    def train_high_performance_models(self):
        """训练高性能模型"""
        print("开始训练高性能模型...")
        
        # 使用更多折数
        n_fold = 20
        folds = StratifiedKFold(n_splits=n_fold, shuffle=True, random_state=2022)
        
        # 将回归问题转换为分层抽样
        y_binned = pd.cut(self.y, bins=5, labels=False)
        
        oof_predictions = {}
        test_predictions = {}
        
        # 1. 高性能LightGBM
        print("训练高性能LightGBM...")
        oof_lgb = np.zeros(len(self.X))
        prediction_lgb = np.zeros(len(self.X_test))
        
        lgb_params = {
            'objective': 'regression',
            'metric': 'auc',
            'boosting_type': 'gbdt',
            'num_leaves': 31,
            'learning_rate': 0.005,  # 更小的学习率
            'feature_fraction': 0.8,
            'bagging_fraction': 0.8,
            'bagging_freq': 5,
            'min_child_samples': 20,
            'max_depth': -1,
            'reg_alpha': 0.1,
            'reg_lambda': 0.1,
            'verbose': -1,
            'random_state': 2022,
            'n_estimators': 20000  # 更多树
        }
        
        for fold_n, (train_index, valid_index) in enumerate(folds.split(self.X, y_binned)):
            X_train, X_valid = self.X.iloc[train_index], self.X.iloc[valid_index]
            y_train, y_valid = self.y.iloc[train_index], self.y.iloc[valid_index]
            
            model = lgb.LGBMRegressor(**lgb_params)
            model.fit(X_train, y_train,
                     eval_set=[(X_valid, y_valid)],
                     eval_metric='auc',
                     callbacks=[lgb.early_stopping(1000), lgb.log_evaluation(0)])
            
            y_pred_valid = model.predict(X_valid)
            y_pred = model.predict(self.X_test)
            
            oof_lgb[valid_index] = y_pred_valid
            prediction_lgb += y_pred
        
        prediction_lgb /= n_fold
        oof_predictions['lgb'] = oof_lgb
        test_predictions['lgb'] = prediction_lgb
        print(f"LightGBM AUC: {roc_auc_score(self.y, oof_lgb):.6f}")
        
        # 2. 高性能XGBoost
        print("训练高性能XGBoost...")
        oof_xgb = np.zeros(len(self.X))
        prediction_xgb = np.zeros(len(self.X_test))
        
        for fold_n, (train_index, valid_index) in enumerate(folds.split(self.X, y_binned)):
            X_train, X_valid = self.X.iloc[train_index], self.X.iloc[valid_index]
            y_train, y_valid = self.y.iloc[train_index], self.y.iloc[valid_index]
            
            model = xgb.XGBRegressor(
                max_depth=6,
                learning_rate=0.005,  # 更小的学习率
                n_estimators=20000,   # 更多树
                colsample_bytree=0.8,
                subsample=0.8,
                reg_alpha=0.1,
                reg_lambda=0.1,
                random_state=2022,
                eval_metric='auc',
                early_stopping_rounds=1000
            )
            
            model.fit(X_train, y_train,
                     eval_set=[(X_valid, y_valid)],
                     verbose=False)
            
            y_pred_valid = model.predict(X_valid)
            y_pred = model.predict(self.X_test)
            
            oof_xgb[valid_index] = y_pred_valid
            prediction_xgb += y_pred
        
        prediction_xgb /= n_fold
        oof_predictions['xgb'] = oof_xgb
        test_predictions['xgb'] = prediction_xgb
        print(f"XGBoost AUC: {roc_auc_score(self.y, oof_xgb):.6f}")
        
        # 3. 高性能CatBoost
        print("训练高性能CatBoost...")
        oof_cat = np.zeros(len(self.X))
        prediction_cat = np.zeros(len(self.X_test))
        
        for fold_n, (train_index, valid_index) in enumerate(folds.split(self.X, y_binned)):
            X_train, X_valid = self.X.iloc[train_index], self.X.iloc[valid_index]
            y_train, y_valid = self.y.iloc[train_index], self.y.iloc[valid_index]
            
            train_pool = Pool(X_train, y_train)
            eval_pool = Pool(X_valid, y_valid)
            
            model = CatBoostRegressor(
                iterations=20000,     # 更多迭代
                learning_rate=0.005,  # 更小的学习率
                depth=6,
                l2_leaf_reg=3.0,
                rsm=0.8,
                bagging_temperature=0.2,
                random_strength=0.2,
                use_best_model=True,
                random_seed=2022,
                logging_level='Silent',
                early_stopping_rounds=1000
            )
            
            model.fit(train_pool, eval_set=eval_pool, verbose=False)
            
            y_pred_valid = model.predict(X_valid)
            y_pred = model.predict(self.X_test)
            
            oof_cat[valid_index] = y_pred_valid
            prediction_cat += y_pred
        
        prediction_cat /= n_fold
        oof_predictions['cat'] = oof_cat
        test_predictions['cat'] = prediction_cat
        print(f"CatBoost AUC: {roc_auc_score(self.y, oof_cat):.6f}")
        
        return oof_predictions, test_predictions
    
    def advanced_ensemble(self, oof_predictions, test_predictions):
        """高级模型融合"""
        print("开始高级模型融合...")
        
        # 准备stacking数据
        train_stack = np.column_stack([oof_predictions[model] for model in oof_predictions.keys()])
        test_stack = np.column_stack([test_predictions[model] for model in test_predictions.keys()])
        
        # 使用重复交叉验证
        from sklearn.model_selection import RepeatedKFold
        folds_stack = RepeatedKFold(n_splits=20, n_repeats=10, random_state=2022)
        
        # 使用BayesianRidge作为元学习器
        meta_model = BayesianRidge()
        
        oof_stack = np.zeros(train_stack.shape[0])
        predictions_stack = np.zeros(test_stack.shape[0])
        
        for fold_, (trn_idx, val_idx) in enumerate(folds_stack.split(train_stack, self.y)):
            trn_data, trn_y = train_stack[trn_idx], self.y.iloc[trn_idx].values
            val_data, val_y = train_stack[val_idx], self.y.iloc[val_idx].values
            
            meta_model.fit(trn_data, trn_y)
            oof_stack[val_idx] = meta_model.predict(val_data)
            predictions_stack += meta_model.predict(test_stack) / folds_stack.get_n_splits()
        
        final_auc = roc_auc_score(self.y, oof_stack)
        print(f"最终融合模型 AUC: {final_auc:.6f}")
        
        return predictions_stack, final_auc
    
    def run_optimization(self):
        """运行优化流程"""
        print("开始高性能客户购买预测模型...")
        
        # 1. 加载数据
        self.load_data()
        
        # 2. 特征工程
        X, y, X_test = self.enhanced_feature_engineering()
        
        # 3. 训练模型
        oof_predictions, test_predictions = self.train_high_performance_models()
        
        # 4. 模型融合
        final_predictions, final_auc = self.advanced_ensemble(oof_predictions, test_predictions)
        
        # 5. 保存结果
        submission = pd.DataFrame({
            'ID': self.test['ID'],
            'pred': final_predictions
        })
        submission.to_csv('high_performance_submission.csv', index=False)
        
        print(f"\n高性能优化完成！最终AUC分数: {final_auc:.6f}")
        print("结果已保存到 high_performance_submission.csv")
        
        return final_auc

if __name__ == "__main__":
    # 运行优化
    optimizer = HighPerformanceCustomerPrediction()
    final_score = optimizer.run_optimization()