#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
超级优化版客户购买预测模型
目标：超越Baseline的0.936 AUC分数
"""

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import chi2_contingency, f_oneway, ttest_ind
from sklearn.model_selection import train_test_split, KFold, RepeatedKFold, StratifiedKFold
from sklearn.preprocessing import StandardScaler, LabelEncoder, PolynomialFeatures, RobustScaler
from sklearn.feature_selection import SelectKBest, f_classif, RFE, mutual_info_regression
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor, GradientBoostingRegressor
from sklearn.linear_model import BayesianRidge, ElasticNet, Ridge, Lasso
from sklearn.metrics import roc_auc_score, mean_squared_error
from sklearn.neural_network import MLPRegressor
import lightgbm as lgb
import xgboost as xgb
from xgboost import XGBRegressor
from catboost import CatBoostRegressor, Pool
from lightgbm import LGBMRegressor
import optuna
import warnings
warnings.filterwarnings('ignore')

plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

class SuperAdvancedCustomerPurchasePrediction:
    def __init__(self):
        self.models = {}
        self.scalers = {}
        self.feature_names = []
        
    def load_data(self):
        """加载数据"""
        self.train = pd.read_csv('train_set.csv')
        self.test = pd.read_csv('test_set.csv')
        self.data = pd.concat([self.train.drop(['y'], axis=1), self.test], axis=0).reset_index(drop=True)
        print(f"训练集大小: {self.train.shape}")
        print(f"测试集大小: {self.test.shape}")
        
    def super_feature_engineering(self):
        """超级特征工程"""
        print("开始超级特征工程...")
        
        # 分离数值和分类特征
        str_features = []
        num_features = []
        
        for col in self.train.columns:
            if self.train[col].dtype == 'object':
                str_features.append(col)
            elif self.train[col].dtype in ['int64', 'float64'] and col not in ['ID', 'y']:
                num_features.append(col)
        
        # 1. 更温和的异常值处理
        def outlier_processing_super(dfx, method='percentile'):
            df = dfx.copy()
            if method == 'percentile':
                # 使用更宽松的百分位数
                lower = df.quantile(0.01)
                upper = df.quantile(0.99)
                df = np.clip(df, lower, upper)
            return df
        
        for col in num_features:
            self.train[col] = outlier_processing_super(self.train[col])
            self.test[col] = outlier_processing_super(self.test[col])
        
        # 2. 创建更多衍生特征
        # 年龄相关特征
        self.train['age_squared'] = self.train['age'] ** 2
        self.test['age_squared'] = self.test['age'] ** 2
        
        self.train['age_log'] = np.log1p(self.train['age'])
        self.test['age_log'] = np.log1p(self.test['age'])
        
        # 余额相关特征
        self.train['balance_abs'] = self.train['balance'].abs()
        self.test['balance_abs'] = self.test['balance'].abs()
        
        self.train['balance_positive'] = (self.train['balance'] > 0).astype(int)
        self.test['balance_positive'] = (self.test['balance'] > 0).astype(int)
        
        self.train['balance_log'] = np.log1p(self.train['balance_abs'])
        self.test['balance_log'] = np.log1p(self.test['balance_abs'])
        
        # 通话时长相关特征
        self.train['duration_log'] = np.log1p(self.train['duration'])
        self.test['duration_log'] = np.log1p(self.test['duration'])
        
        self.train['duration_squared'] = self.train['duration'] ** 2
        self.test['duration_squared'] = self.test['duration'] ** 2
        
        # 联系次数相关特征
        self.train['campaign_log'] = np.log1p(self.train['campaign'])
        self.test['campaign_log'] = np.log1p(self.test['campaign'])
        
        # 3. 更多特征交互
        # 年龄与其他特征的交互
        self.train['age_duration'] = self.train['age'] * self.train['duration']
        self.test['age_duration'] = self.test['age'] * self.test['duration']
        
        self.train['age_campaign'] = self.train['age'] * self.train['campaign']
        self.test['age_campaign'] = self.test['age'] * self.test['campaign']
        
        # 余额与其他特征的交互
        self.train['balance_duration'] = self.train['balance_abs'] * self.train['duration']
        self.test['balance_duration'] = self.test['balance_abs'] * self.test['duration']
        
        # 比率特征
        self.train['duration_per_campaign'] = self.train['duration'] / (self.train['campaign'] + 1)
        self.test['duration_per_campaign'] = self.test['duration'] / (self.test['campaign'] + 1)
        
        self.train['balance_per_age'] = self.train['balance_abs'] / (self.train['age'] + 1)
        self.test['balance_per_age'] = self.test['balance_abs'] / (self.test['age'] + 1)
        
        # 4. 分组特征（更细粒度）
        # 年龄分组（更多组别）
        self.train['age_group_fine'] = pd.cut(self.train['age'], 
                                             bins=[0, 20, 25, 30, 35, 40, 45, 50, 55, 60, 100], 
                                             labels=[f'age_{i}' for i in range(10)])
        self.test['age_group_fine'] = pd.cut(self.test['age'], 
                                            bins=[0, 20, 25, 30, 35, 40, 45, 50, 55, 60, 100], 
                                            labels=[f'age_{i}' for i in range(10)])
        
        # 余额分组（更多组别）
        balance_bins = [-np.inf, -1000, 0, 500, 1000, 2000, 5000, np.inf]
        self.train['balance_group_fine'] = pd.cut(self.train['balance'], 
                                                 bins=balance_bins, 
                                                 labels=[f'balance_{i}' for i in range(7)])
        self.test['balance_group_fine'] = pd.cut(self.test['balance'], 
                                                bins=balance_bins, 
                                                labels=[f'balance_{i}' for i in range(7)])
        
        # 通话时长分组（更多组别）
        duration_bins = [0, 50, 100, 200, 300, 500, 1000, np.inf]
        self.train['duration_group_fine'] = pd.cut(self.train['duration'], 
                                                  bins=duration_bins, 
                                                  labels=[f'duration_{i}' for i in range(7)])
        self.test['duration_group_fine'] = pd.cut(self.test['duration'], 
                                                 bins=duration_bins, 
                                                 labels=[f'duration_{i}' for i in range(7)])
        
        # 5. 组合分类特征
        self.train['job_marital'] = self.train['job'] + '_' + self.train['marital']
        self.test['job_marital'] = self.test['job'] + '_' + self.test['marital']
        
        self.train['education_job'] = self.train['education'] + '_' + self.train['job']
        self.test['education_job'] = self.test['education'] + '_' + self.test['job']
        
        self.train['contact_month'] = self.train['contact'] + '_' + self.train['month']
        self.test['contact_month'] = self.test['contact'] + '_' + self.test['month']
        
        # 更新特征列表
        new_str_features = ['age_group_fine', 'balance_group_fine', 'duration_group_fine', 
                           'job_marital', 'education_job', 'contact_month']
        new_num_features = ['age_squared', 'age_log', 'balance_abs', 'balance_positive', 
                           'balance_log', 'duration_log', 'duration_squared', 'campaign_log',
                           'age_duration', 'age_campaign', 'balance_duration', 
                           'duration_per_campaign', 'balance_per_age']
        
        str_features.extend(new_str_features)
        num_features.extend(new_num_features)
        
        # 6. 编码处理
        dummy_train = self.train.copy()
        dummy_test = self.test.copy()
        
        # 对分类特征进行独热编码
        for col in str_features:
            if col in dummy_train.columns:
                dummies_train = pd.get_dummies(dummy_train[col], prefix=col)
                dummies_test = pd.get_dummies(dummy_test[col], prefix=col)
                
                # 确保训练集和测试集有相同的列
                all_columns = set(dummies_train.columns) | set(dummies_test.columns)
                for dummy_col in all_columns:
                    if dummy_col not in dummies_train.columns:
                        dummies_train[dummy_col] = 0
                    if dummy_col not in dummies_test.columns:
                        dummies_test[dummy_col] = 0
                
                dummy_train = pd.concat([dummy_train, dummies_train], axis=1)
                dummy_test = pd.concat([dummy_test, dummies_test], axis=1)
        
        # 删除原始分类特征和ID
        cols_to_drop = str_features + ['ID']
        if 'y' in dummy_train.columns:
            cols_to_drop.append('y')
        
        dummy_train = dummy_train.drop(cols_to_drop, axis=1)
        dummy_test = dummy_test.drop([col for col in cols_to_drop if col != 'y'], axis=1)
        
        # 7. 多项式特征（选择性添加）
        poly_features = ['age', 'balance_abs', 'duration', 'campaign']
        poly_data_train = dummy_train[poly_features]
        poly_data_test = dummy_test[poly_features]
        
        poly = PolynomialFeatures(degree=2, interaction_only=True, include_bias=False)
        poly_train = poly.fit_transform(poly_data_train)
        poly_test = poly.transform(poly_data_test)
        
        poly_feature_names = [f'poly_{i}' for i in range(poly_train.shape[1] - len(poly_features))]
        poly_df_train = pd.DataFrame(poly_train[:, len(poly_features):], 
                                    columns=poly_feature_names, 
                                    index=dummy_train.index)
        poly_df_test = pd.DataFrame(poly_test[:, len(poly_features):], 
                                   columns=poly_feature_names, 
                                   index=dummy_test.index)
        
        dummy_train = pd.concat([dummy_train, poly_df_train], axis=1)
        dummy_test = pd.concat([dummy_test, poly_df_test], axis=1)
        
        # 8. 特征选择（更严格）
        # 使用互信息进行特征选择
        mi_scores = mutual_info_regression(dummy_train, self.train['y'], random_state=2022)
        mi_threshold = np.percentile(mi_scores, 25)  # 保留前75%的特征
        selected_features = dummy_train.columns[mi_scores > mi_threshold]
        
        dummy_train = dummy_train[selected_features]
        dummy_test = dummy_test[selected_features]
        
        print(f"特征工程后特征数量: {dummy_train.shape[1]}")
        
        # 9. 使用RobustScaler进行标准化（对异常值更鲁棒）
        scaler = RobustScaler()
        dummy_train_scaled = pd.DataFrame(
            scaler.fit_transform(dummy_train),
            columns=dummy_train.columns,
            index=dummy_train.index
        )
        dummy_test_scaled = pd.DataFrame(
            scaler.transform(dummy_test),
            columns=dummy_test.columns,
            index=dummy_test.index
        )
        
        self.X = dummy_train_scaled
        self.y = self.train['y']
        self.X_test = dummy_test_scaled
        self.feature_names = list(dummy_train.columns)
        
        return self.X, self.y, self.X_test
    
    def train_super_models(self):
        """训练超级模型组合"""
        print("开始训练超级模型组合...")
        
        n_fold = 10  # 增加折数
        folds = StratifiedKFold(n_splits=n_fold, shuffle=True, random_state=2022)
        
        # 将回归问题转换为分层抽样
        y_binned = pd.cut(self.y, bins=5, labels=False)
        
        # 存储预测结果
        oof_predictions = {}
        test_predictions = {}
        
        # 1. 超级优化的LightGBM
        print("训练超级LightGBM...")
        oof_lgb = np.zeros(len(self.X))
        prediction_lgb = np.zeros(len(self.X_test))
        
        lgb_params = {
            'objective': 'regression',
            'metric': 'auc',
            'boosting_type': 'gbdt',
            'num_leaves': 31,
            'learning_rate': 0.01,
            'feature_fraction': 0.8,
            'bagging_fraction': 0.8,
            'bagging_freq': 5,
            'min_child_samples': 20,
            'max_depth': -1,
            'reg_alpha': 0.1,
            'reg_lambda': 0.1,
            'verbose': -1,
            'random_state': 2022,
            'n_estimators': 5000
        }
        
        for fold_n, (train_index, valid_index) in enumerate(folds.split(self.X, y_binned)):
            X_train, X_valid = self.X.iloc[train_index], self.X.iloc[valid_index]
            y_train, y_valid = self.y.iloc[train_index], self.y.iloc[valid_index]
            
            model = lgb.LGBMRegressor(**lgb_params)
            model.fit(X_train, y_train,
                     eval_set=[(X_valid, y_valid)],
                     eval_metric='auc',
                     callbacks=[lgb.early_stopping(300), lgb.log_evaluation(0)])
            
            y_pred_valid = model.predict(X_valid)
            y_pred = model.predict(self.X_test)
            
            oof_lgb[valid_index] = y_pred_valid
            prediction_lgb += y_pred
        
        prediction_lgb /= n_fold
        oof_predictions['lgb'] = oof_lgb
        test_predictions['lgb'] = prediction_lgb
        print(f"Super LightGBM AUC: {roc_auc_score(self.y, oof_lgb):.6f}")
        
        # 2. 超级优化的XGBoost
        print("训练超级XGBoost...")
        oof_xgb = np.zeros(len(self.X))
        prediction_xgb = np.zeros(len(self.X_test))
        
        for fold_n, (train_index, valid_index) in enumerate(folds.split(self.X, y_binned)):
            X_train, X_valid = self.X.iloc[train_index], self.X.iloc[valid_index]
            y_train, y_valid = self.y.iloc[train_index], self.y.iloc[valid_index]
            
            model = XGBRegressor(
                max_depth=6,
                learning_rate=0.01,
                n_estimators=5000,
                colsample_bytree=0.8,
                subsample=0.8,
                reg_alpha=0.1,
                reg_lambda=0.1,
                random_state=2022,
                eval_metric='auc',
                early_stopping_rounds=300
            )
            
            model.fit(X_train, y_train,
                     eval_set=[(X_valid, y_valid)],
                     verbose=False)
            
            y_pred_valid = model.predict(X_valid)
            y_pred = model.predict(self.X_test)
            
            oof_xgb[valid_index] = y_pred_valid
            prediction_xgb += y_pred
        
        prediction_xgb /= n_fold
        oof_predictions['xgb'] = oof_xgb
        test_predictions['xgb'] = prediction_xgb
        print(f"Super XGBoost AUC: {roc_auc_score(self.y, oof_xgb):.6f}")
        
        # 3. 超级优化的CatBoost
        print("训练超级CatBoost...")
        oof_cat = np.zeros(len(self.X))
        prediction_cat = np.zeros(len(self.X_test))
        
        for fold_n, (train_index, valid_index) in enumerate(folds.split(self.X, y_binned)):
            X_train, X_valid = self.X.iloc[train_index], self.X.iloc[valid_index]
            y_train, y_valid = self.y.iloc[train_index], self.y.iloc[valid_index]
            
            train_pool = Pool(X_train, y_train)
            eval_pool = Pool(X_valid, y_valid)
            
            model = CatBoostRegressor(
                iterations=5000,
                learning_rate=0.01,
                depth=6,
                l2_leaf_reg=3.0,
                rsm=0.8,
                bagging_temperature=0.2,
                random_strength=0.2,
                use_best_model=True,
                random_seed=2022,
                logging_level='Silent',
                early_stopping_rounds=300
            )
            
            model.fit(train_pool, eval_set=eval_pool, verbose=False)
            
            y_pred_valid = model.predict(X_valid)
            y_pred = model.predict(self.X_test)
            
            oof_cat[valid_index] = y_pred_valid
            prediction_cat += y_pred
        
        prediction_cat /= n_fold
        oof_predictions['cat'] = oof_cat
        test_predictions['cat'] = prediction_cat
        print(f"Super CatBoost AUC: {roc_auc_score(self.y, oof_cat):.6f}")
        
        # 4. ExtraTrees（极端随机树）
        print("训练ExtraTrees...")
        oof_et = np.zeros(len(self.X))
        prediction_et = np.zeros(len(self.X_test))
        
        for fold_n, (train_index, valid_index) in enumerate(folds.split(self.X, y_binned)):
            X_train, X_valid = self.X.iloc[train_index], self.X.iloc[valid_index]
            y_train, y_valid = self.y.iloc[train_index], self.y.iloc[valid_index]
            
            model = ExtraTreesRegressor(
                n_estimators=1000,
                max_depth=15,
                min_samples_split=5,
                min_samples_leaf=2,
                max_features='sqrt',
                random_state=2022,
                n_jobs=-1
            )
            
            model.fit(X_train, y_train)
            
            y_pred_valid = model.predict(X_valid)
            y_pred = model.predict(self.X_test)
            
            oof_et[valid_index] = y_pred_valid
            prediction_et += y_pred
        
        prediction_et /= n_fold
        oof_predictions['et'] = oof_et
        test_predictions['et'] = prediction_et
        print(f"ExtraTrees AUC: {roc_auc_score(self.y, oof_et):.6f}")
        
        # 5. 神经网络
        print("训练神经网络...")
        oof_nn = np.zeros(len(self.X))
        prediction_nn = np.zeros(len(self.X_test))
        
        for fold_n, (train_index, valid_index) in enumerate(folds.split(self.X, y_binned)):
            X_train, X_valid = self.X.iloc[train_index], self.X.iloc[valid_index]
            y_train, y_valid = self.y.iloc[train_index], self.y.iloc[valid_index]
            
            model = MLPRegressor(
                hidden_layer_sizes=(200, 100, 50),
                activation='relu',
                solver='adam',
                alpha=0.01,
                learning_rate='adaptive',
                max_iter=1000,
                random_state=2022,
                early_stopping=True,
                validation_fraction=0.1
            )
            
            model.fit(X_train, y_train)
            
            y_pred_valid = model.predict(X_valid)
            y_pred = model.predict(self.X_test)
            
            oof_nn[valid_index] = y_pred_valid
            prediction_nn += y_pred
        
        prediction_nn /= n_fold
        oof_predictions['nn'] = oof_nn
        test_predictions['nn'] = prediction_nn
        print(f"Neural Network AUC: {roc_auc_score(self.y, oof_nn):.6f}")
        
        return oof_predictions, test_predictions
    
    def super_ensemble(self, oof_predictions, test_predictions):
        """超级模型融合"""
        print("开始超级模型融合...")
        
        # 准备stacking数据
        train_stack = np.column_stack([oof_predictions[model] for model in oof_predictions.keys()])
        test_stack = np.column_stack([test_predictions[model] for model in test_predictions.keys()])
        
        # 使用更复杂的融合策略
        folds_stack = RepeatedKFold(n_splits=10, n_repeats=5, random_state=2022)
        
        # 多个元学习器
        meta_models = [
            ('bayesian', BayesianRidge()),
            ('elastic', ElasticNet(alpha=0.01, l1_ratio=0.5, random_state=2022)),
            ('ridge', Ridge(alpha=1.0, random_state=2022)),
            ('lgb_meta', LGBMRegressor(n_estimators=500, learning_rate=0.05, random_state=2022, verbose=-1)),
            ('xgb_meta', XGBRegressor(n_estimators=500, learning_rate=0.05, random_state=2022, eval_metric='auc'))
        ]
        
        meta_predictions = []
        meta_weights = []
        
        for name, meta_model in meta_models:
            oof_meta = np.zeros(train_stack.shape[0])
            pred_meta = np.zeros(test_stack.shape[0])
            
            for fold_, (trn_idx, val_idx) in enumerate(folds_stack.split(train_stack, self.y)):
                trn_data, trn_y = train_stack[trn_idx], self.y.iloc[trn_idx].values
                val_data, val_y = train_stack[val_idx], self.y.iloc[val_idx].values
                
                meta_model.fit(trn_data, trn_y)
                oof_meta[val_idx] = meta_model.predict(val_data)
                pred_meta += meta_model.predict(test_stack) / folds_stack.get_n_splits()
            
            meta_auc = roc_auc_score(self.y, oof_meta)
            meta_predictions.append((oof_meta, pred_meta))
            meta_weights.append(meta_auc)
            print(f"Meta Model {name} AUC: {meta_auc:.6f}")
        
        # 根据性能加权融合
        meta_weights = np.array(meta_weights)
        meta_weights = meta_weights / meta_weights.sum()
        
        final_oof = np.zeros(train_stack.shape[0])
        final_pred = np.zeros(test_stack.shape[0])
        
        for i, (oof_meta, pred_meta) in enumerate(meta_predictions):
            final_oof += meta_weights[i] * oof_meta
            final_pred += meta_weights[i] * pred_meta
        
        final_auc = roc_auc_score(self.y, final_oof)
        print(f"最终超级融合模型 AUC: {final_auc:.6f}")
        
        return final_pred, final_auc
    
    def run_super_optimization(self):
        """运行超级优化流程"""
        print("开始超级客户购买预测模型优化...")
        
        # 1. 加载数据
        self.load_data()
        
        # 2. 超级特征工程
        X, y, X_test = self.super_feature_engineering()
        
        # 3. 训练超级模型
        oof_predictions, test_predictions = self.train_super_models()
        
        # 4. 超级模型融合
        final_predictions, final_auc = self.super_ensemble(oof_predictions, test_predictions)
        
        # 5. 保存结果
        submission = pd.DataFrame({
            'ID': self.test['ID'],
            'pred': final_predictions
        })
        submission.to_csv('super_optimized_submission.csv', index=False)
        
        print(f"\n超级优化完成！最终AUC分数: {final_auc:.6f}")
        print("结果已保存到 super_optimized_submission.csv")
        
        return final_auc

if __name__ == "__main__":
    # 运行超级优化
    optimizer = SuperAdvancedCustomerPurchasePrediction()
    final_score = optimizer.run_super_optimization()