#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
优化版客户购买预测模型
目标：提升ROC AUC分数
"""

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import chi2_contingency, f_oneway, ttest_ind
from sklearn.model_selection import train_test_split, KFold, RepeatedKFold, StratifiedKFold
from sklearn.preprocessing import StandardScaler, LabelEncoder, PolynomialFeatures
from sklearn.feature_selection import SelectKBest, f_classif, RFE
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import BayesianRidge, ElasticNet
from sklearn.metrics import roc_auc_score, mean_squared_error
import lightgbm as lgb
import xgboost as xgb
from xgboost import XGBRegressor
from catboost import CatBoostRegressor, Pool
from lightgbm import LGBMRegressor
import optuna
import warnings
warnings.filterwarnings('ignore')

plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

class AdvancedCustomerPurchasePrediction:
    def __init__(self):
        self.models = {}
        self.scalers = {}
        self.feature_names = []
        
    def load_data(self):
        """加载数据"""
        self.train = pd.read_csv('train_set.csv')
        self.test = pd.read_csv('test_set.csv')
        self.data = pd.concat([self.train.drop(['y'], axis=1), self.test], axis=0).reset_index(drop=True)
        print(f"训练集大小: {self.train.shape}")
        print(f"测试集大小: {self.test.shape}")
        
    def advanced_feature_engineering(self):
        """高级特征工程"""
        print("开始高级特征工程...")
        
        # 分离数值和分类特征
        str_features = []
        num_features = []
        
        for col in self.train.columns:
            if self.train[col].dtype == 'object':
                str_features.append(col)
            elif self.train[col].dtype in ['int64', 'float64'] and col not in ['ID', 'y']:
                num_features.append(col)
        
        # 1. 异常值处理（更温和的方法）
        def outlier_processing_advanced(dfx, method='iqr'):
            df = dfx.copy()
            if method == 'iqr':
                q1 = df.quantile(q=0.25)
                q3 = df.quantile(q=0.75)
                iqr = q3 - q1
                # 使用更宽松的阈值
                Umin = q1 - 2.0 * iqr
                Umax = q3 + 2.0 * iqr
            elif method == 'zscore':
                mean = df.mean()
                std = df.std()
                Umin = mean - 3 * std
                Umax = mean + 3 * std
            
            df = np.clip(df, Umin, Umax)
            return df
        
        # 对数值特征进行异n        for col in num_features:
            self.train[col] = outlier_processing_advanced(self.train[col])
            self.test[col] = outlier_processing_advanced(self.test[col])
        
        # 2. 创建新的衍生特征
        # 年龄分组
        self.train['age_group'] = pd.cut(self.train['age'], bins=[0, 25, 35, 45, 55, 100], 
                                        labels=['young', 'adult', 'middle', 'senior', 'elder'])
        self.test['age_group'] = pd.cut(self.test['age'], bins=[0, 25, 35, 45, 55, 100], 
                                       labels=['young', 'adult', 'middle', 'senior', 'elder'])
        
        # 余额分组
        self.train['balance_group'] = pd.cut(self.train['balance'], bins=[-np.inf, 0, 1000, 5000, np.inf], 
                                            labels=['negative', 'low', 'medium', 'high'])
        self.test['balance_group'] = pd.cut(self.test['balance'], bins=[-np.inf, 0, 1000, 5000, np.inf], 
                                           labels=['negative', 'low', 'medium', 'high'])
        
        # 通话时长分组
        self.train['duration_group'] = pd.cut(self.train['duration'], bins=[0, 100, 300, 600, np.inf], 
                                             labels=['short', 'medium', 'long', 'very_long'])
        self.test['duration_group'] = pd.cut(self.test['duration'], bins=[0, 100, 300, 600, np.inf], 
                                            labels=['short', 'medium', 'long', 'very_long'])
        
        # 3. 特征交互
        # 年龄与余额的self.train['age_balance_ratio'] = self.train['age'] / (self.train['balance'].abs() + 1)
        self.test['age_balance_ratio'] = self.test['age'] / (self.test['balance'].abs() + 1)
        
        # 通话时长与联系次数的交互
        self.train['duration_campaign_ratio'] = self.train['duration'] / (self.train['campaign'] + 1)
        self.test['duration_campaign_ratio'] = self.test['duration'] / (self.test['campaign'] + 1)
        
        # 是否有贷款和房贷的组合特征
        self.train['loan_housing_combo'] = self.train['loan'] + '_' + self.train['housing']
        self.test['loan_housing_combo'] = self.test['loan'] + '_' + self.test['housing']
        
        # 4. 对数变换（针对偏斜分布）
        skewed_features = ['balance', 'duration']
        for col in skewed_features:
            if col in self.train.columns:
                self.train[f'{col}_log'] = np.log1p(self.train[col] - self.train[col].min() + 1)
                self.test[f'{col}_log'] = np.log1p(self.test[col] - self.test[col].min() + 1)
        
        # 更新特征列表
        str_features.extend(['age_group', 'balance_group', 'duration_group', 'loan_housing_combo'])
        num_features.extend(['age_balance_ratio', 'duration_campaign_ratio', 'balance_log', 'duration_log'])
        
        # 5. 编码处理
        # 对分类特征进行标签编码和独热编码的组合
        dummy_train = self.train.copy()
        dummy_test = self.test.copy()
        
        # 独热编码
        for col in str_features:
            if col in dummy_train.columns:
                dummies_train = pd.get_dummies(dummy_train[col], prefix=col)
                dummies_test = pd.get_dummies(dummy_test[col], prefix=col)
                
                # 确保训练集和测试集有相同的列
                all_columns = set(dummies_train.columns) | set(dummies_test.columns)
                for dummy_col in all_columns:
                    if dummy_col not in dummies_train.columns:
                        dummies_train[dummy_col] = 0
                    if dummy_col not in dummies_test.columns:
                        dummies_test[dummy_col] = 0
                
                dummy_train = pd.concat([dummy_train, dummies_train], axis=1)
                dummy_test = pd.concat([dummy_test, dummies_test], axis=1)
        
        # 删除原始分类特征和ID
        cols_to_drop = str_features + ['ID']
        if 'y' in dummy_train.columns:
            cols_to_drop.append('y')
        
        dummy_train = dummy_train.drop(cols_to_drop, axis=1)
        dummy_test = dummy_test.drop([col for col in cols_to_drop if col != 'y'], axis=1)
        
        # 6. 特征选择
        # 使用方差分析进行特征选择
        selector = SelectKBest(f_classif, k='all')
        selector.fit(dummy_train, self.train['y'])
        
        # 选择p值小于0.05的特征
        selected_features = dummy_train.columns[selector.pvalues_ < 0.05]
        dummy_train = dummy_train[selected_features]
        dummy_test = dummy_test[selected_features]
        
        print(f"特征工程后特征数量: {dummy_train.shape[1]}")
        
        # 7. 标准化
        scaler = StandardScaler()
        dummy_train_scaled = pd.DataFrame(
            scaler.fit_transform(dummy_train),
            columns=dummy_train.columns,
            index=dummy_train.index
        )
        dummy_test_scaled = pd.DataFrame(
            scaler.transform(dummy_test),
            columns=dummy_test.columns,
            index=dummy_test.index
        )
        
        self.X = dummy_train_scaled
        self.y = self.train['y']
        self.X_test = dummy_test_scaled
        self.feature_names = list(dummy_train.columns)
        
        return self.X, self.y, self.X_test
    
    def optimize_lightgbm(self, trial):
        """使用Optuna优化LightGBM参数"""
        params = {
            'objective': 'regression',
            'metric': 'auc',
            'boosting_type': 'gbdt',
            'num_leaves': trial.suggest_int('num_leaves', 20, 100),
            'learning_rate': trial.suggest_float('learning_rate', 0.01, 0.3),
            'feature_fraction': trial.suggest_float('feature_fraction', 0.4, 1.0),
            'bagging_fraction': trial.suggest_float('bagging_fraction', 0.4, 1.0),
            'bagging_freq': trial.suggest_int('bagging_freq', 1, 7),
            'min_child_samples': trial.suggest_int('min_child_samples', 5, 100),
            'max_depth': trial.suggest_int('max_depth', 3, 15),
            'reg_alpha': trial.suggest_float('reg_alpha', 0.0, 1.0),
            'reg_lambda': trial.suggest_float('reg_lambda', 0.0, 1.0),
            'verbose': -1,
            'random_state': 2022
        }
        
        # 5折交叉验证
        kf = KFold(n_splits=5, shuffle=True, random_state=2022)
        auc_scores = []
        
        for train_idx, val_idx in kf.split(self.X):
            X_train, X_val = self.X.iloc[train_idx], self.X.iloc[val_idx]
            y_train, y_val = self.y.iloc[train_idx], self.y.iloc[val_idx]
            
            model = lgb.LGBMRegressor(**params, n_estimators=1000)
            model.fit(X_train, y_train, 
                     eval_set=[(X_val, y_val)],
                     eval_metric='auc',
                     callbacks=[lgb.early_stopping(100), lgb.log_evaluation(0)])
            
            y_pred = model.predict(X_val)
            auc = roc_auc_score(y_val, y_pred)
            auc_scores.append(auc)
        
        return np.mean(auc_scores)
    
    def train_optimized_models(self):
        """训练优化后的模型"""
        print("开始训练优化模型...")
        
        n_fold = 5
        folds = KFold(n_splits=n_fold, shuffle=True, random_state=2022)
        
        # 存储预测结果
        oof_predictions = {}
        test_predictions = {}
        
        # 1. 优化后的LightGBM
        print("训练优化LightGBM...")
        study = optuna.create_study(direction='maximize', sampler=optuna.samplers.TPESampler(seed=2022))
        study.optimize(self.optimize_lightgbm, n_trials=50, show_progress_bar=True)
        
        best_lgb_params = study.best_params
        best_lgb_params.update({
            'objective': 'regression',
            'metric': 'auc',
            'verbose': -1,
            'random_state': 2022
        })
        
        oof_lgb = np.zeros(len(self.X))
        prediction_lgb = np.zeros(len(self.X_test))
        
        for fold_n, (train_index, valid_index) in enumerate(folds.split(self.X)):
            X_train, X_valid = self.X.iloc[train_index], self.X.iloc[valid_index]
            y_train, y_valid = self.y.iloc[train_index], self.y.iloc[valid_index]
            
            model = lgb.LGBMRegressor(**best_lgb_params, n_estimators=2000)
            model.fit(X_train, y_train,
                     eval_set=[(X_valid, y_valid)],
                     eval_metric='auc',
                     callbacks=[lgb.early_stopping(200), lgb.log_evaluation(0)])
            
            y_pred_valid = model.predict(X_valid)
            y_pred = model.predict(self.X_test)
            
            oof_lgb[valid_index] = y_pred_valid
            prediction_lgb += y_pred
        
        prediction_lgb /= n_fold
        oof_predictions['lgb'] = oof_lgb
        test_predictions['lgb'] = prediction_lgb
        print(f"LightGBM AUC: {roc_auc_score(self.y, oof_lgb):.6f}")
        
        # 2. 优化后的XGBoost
        print("训练优化XGBoost...")
        oof_xgb = np.zeros(len(self.X))
        prediction_xgb = np.zeros(len(self.X_test))
        
        for fold_n, (train_index, valid_index) in enumerate(folds.split(self.X)):
            X_train, X_valid = self.X.iloc[train_index], self.X.iloc[valid_index]
            y_train, y_valid = self.y.iloc[train_index], self.y.iloc[valid_index]
            
            model = XGBRegressor(
                max_depth=8,
                learning_rate=0.02,
                n_estimators=3000,
                colsample_bytree=0.7,
                subsample=0.8,
                reg_alpha=0.1,
                reg_lambda=0.1,
                random_state=2022,
                eval_metric='auc',
                early_stopping_rounds=200
            )
            
            model.fit(X_train, y_train,
                     eval_set=[(X_valid, y_valid)],
                     verbose=False)
            
            y_pred_valid = model.predict(X_valid)
            y_pred = model.predict(self.X_test)
            
            oof_xgb[valid_index] = y_pred_valid
            prediction_xgb += y_pred
        
        prediction_xgb /= n_fold
        oof_predictions['xgb'] = oof_xgb
        test_predictions['xgb'] = prediction_xgb
        print(f"XGBoost AUC: {roc_auc_score(self.y, oof_xgb):.6f}")
        
        # 3. 优化后的CatBoost
        print("训练优化CatBoost...")
        oof_cat = np.zeros(len(self.X))
        prediction_cat = np.zeros(len(self.X_test))
        
        for fold_n, (train_index, valid_index) in enumerate(folds.split(self.X)):
            X_train, X_valid = self.X.iloc[train_index], self.X.iloc[valid_index]
            y_train, y_valid = self.y.iloc[train_index], self.y.iloc[valid_index]
            
            train_pool = Pool(X_train, y_train)
            eval_pool = Pool(X_valid, y_valid)
            
            model = CatBoostRegressor(
                iterations=5000,
                learning_rate=0.03,
                depth=8,
                l2_leaf_reg=5.0,
                rsm=0.7,
                bagging_temperature=0.2,
                random_strength=0.2,
                use_best_model=True,
                random_seed=2022,
                logging_level='Silent',
                early_stopping_rounds=300
            )
            
            model.fit(train_pool, eval_set=eval_pool, verbose=False)
            
            y_pred_valid = model.predict(X_valid)
            y_pred = model.predict(self.X_test)
            
            oof_cat[valid_index] = y_pred_valid
            prediction_cat += y_pred
        
        prediction_cat /= n_fold
        oof_predictions['cat'] = oof_cat
        test_predictions['cat'] = prediction_cat
        print(f"CatBoost AUC: {roc_auc_score(self.y, oof_cat):.6f}")
        
        # 4. 随机森林（作为多样性模型）
        print("训练随机森林...")
        oof_rf = np.zeros(len(self.X))
        prediction_rf = np.zeros(len(self.X_test))
        
        for fold_n, (train_index, valid_index) in enumerate(folds.split(self.X)):
            X_train, X_valid = self.X.iloc[train_index], self.X.iloc[valid_index]
            y_train, y_valid = self.y.iloc[train_index], self.y.iloc[valid_index]
            
            model = RandomForestRegressor(
                n_estimators=2000,
                max_depth=15,
                min_samples_split=5,
                min_samples_leaf=2,
                max_features='sqrt',
                random_state=2022,
                n_jobs=-1
            )
            
            model.fit(X_train, y_train)
            
            y_pred_valid = model.predict(X_valid)
            y_pred = model.predict(self.X_test)
            
            oof_rf[valid_index] = y_pred_valid
            prediction_rf += y_pred
        
        prediction_rf /= n_fold
        oof_predictions['rf'] = oof_rf
        test_predictions['rf'] = prediction_rf
        print(f"RandomForest AUC: {roc_auc_score(self.y, oof_rf):.6f}")
        
        return oof_predictions, test_predictions
    
    def advanced_ensemble(self, oof_predictions, test_predictions):
        """高级模型融合"""
        print("开始高级模型融合...")
        
        # 准备stacking数据
        train_stack = np.column_stack([oof_predictions[model] for model in oof_predictions.keys()])
        test_stack = np.column_stack([test_predictions[model] for model in test_predictions.keys()])
        
        # 多层stacking
        folds_stack = RepeatedKFold(n_splits=5, n_repeats=3, random_state=2022)
        oof_stack = np.zeros(train_stack.shape[0])
        predictions_stack = np.zeros(test_stack.shape[0])
        
        # 第一层：使用多个不同的元学习器
        meta_models = [
            BayesianRidge(),
            ElasticNet(alpha=0.1, l1_ratio=0.5, random_state=2022),
            LGBMRegressor(n_estimators=100, learning_rate=0.1, random_state=2022, verbose=-1)
        ]
        
        meta_predictions = []
        
        for i, meta_model in enumerate(meta_models):
            oof_meta = np.zeros(train_stack.shape[0])
            pred_meta = np.zeros(test_stack.shape[0])
            
            for fold_, (trn_idx, val_idx) in enumerate(folds_stack.split(train_stack, self.y)):
                trn_data, trn_y = train_stack[trn_idx], self.y.iloc[trn_idx].values
                val_data, val_y = train_stack[val_idx], self.y.iloc[val_idx].values
                
                meta_model.fit(trn_data, trn_y)
                oof_meta[val_idx] = meta_model.predict(val_data)
                pred_meta += meta_model.predict(test_stack) / folds_stack.get_n_splits()
            
            meta_predictions.append((oof_meta, pred_meta))
            print(f"Meta Model {i+1} AUC: {roc_auc_score(self.y, oof_meta):.6f}")
        
        # 第二层：对元学习器的结果进行加权平均
        weights = [0.4, 0.3, 0.3]  # 可以根据性能调整权重
        
        final_oof = np.zeros(train_stack.shape[0])
        final_pred = np.zeros(test_stack.shape[0])
        
        for i, (oof_meta, pred_meta) in enumerate(meta_predictions):
            final_oof += weights[i] * oof_meta
            final_pred += weights[i] * pred_meta
        
        final_auc = roc_auc_score(self.y, final_oof)
        print(f"最终融合模型 AUC: {final_auc:.6f}")
        
        return final_pred, final_auc
    
    def run_optimization(self):
        """运行完整的优化流程"""
        print("开始客户购买预测模型优化...")
        
        # 1. 加载数据
        self.load_data()
        
        # 2. 高级特征工程
        X, y, X_test = self.advanced_feature_engineering()
        
        # 3. 训练优化模型
        oof_predictions, test_predictions = self.train_optimized_models()
        
        # 4. 高级模型融合
        final_predictions, final_auc = self.advanced_ensemble(oof_predictions, test_predictions)
        
        # 5. 保存结果
        submission = pd.DataFrame({
            'ID': self.test['ID'],
            'pred': final_predictions
        })
        submission.to_csv('optimized_submission.csv', index=False)
        
        print(f"\n优化完成！最终AUC分数: {final_auc:.6f}")
        print("结果已保存到 optimized_submission.csv")
        
        return final_auc

if __name__ == "__main__":
    # 运行优化
    optimizer = AdvancedCustomerPurchasePrediction()
    final_score = optimizer.run_optimization()