#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
终极优化版客户购买预测模型
目标：必须超越Baseline的0.936 AUC分数
"""

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import chi2_contingency, f_oneway, ttest_ind
from sklearn.model_selection import train_test_split, KFold, RepeatedKFold, StratifiedKFold
from sklearn.preprocessing import StandardScaler, LabelEncoder, PolynomialFeatures, RobustScaler, PowerTransformer
from sklearn.feature_selection import SelectKBest, f_classif, RFE, mutual_info_regression, SelectFromModel
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor, GradientBoostingRegressor, VotingRegressor
from sklearn.linear_model import BayesianRidge, ElasticNet, Ridge, Lasso, HuberRegressor
from sklearn.metrics import roc_auc_score, mean_squared_error
from sklearn.neural_network import MLPRegressor
from sklearn.svm import SVR
from sklearn.decomposition import PCA
import lightgbm as lgb
import xgboost as xgb
from xgboost import XGBRegressor
from catboost import CatBoostRegressor, Pool
from lightgbm import LGBMRegressor
import optuna
import warnings
warnings.filterwarnings('ignore')

plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

class UltimateCustomerPurchasePrediction:
    def __init__(self):
        self.models = {}
        self.scalers = {}
        self.feature_names = []
        
    def load_data(self):
        """加载数据"""
        self.train = pd.read_csv('train_set.csv')
        self.test = pd.read_csv('test_set.csv')
        self.data = pd.concat([self.train.drop(['y'], axis=1), self.test], axis=0).reset_index(drop=True)
        print(f"训练集大小: {self.train.shape}")
        print(f"测试集大小: {self.test.shape}")
        
    def ultimate_feature_engineering(self):
        """终极特征工程"""
        print("开始终极特征工程...")
        
        # 分离数值和分类特征
        str_features = []
        num_features = []
        
        for col in self.train.columns:
            if self.train[col].dtype == 'object':
                str_features.append(col)
            elif self.train[col].dtype in ['int64', 'float64'] and col not in ['ID', 'y']:
                num_features.append(col)
        
        # 1. 保持原始数据不变（不做异常值处理）
        print("保持原始数据分布...")
        
        # 2. 创建大量衍生特征
        # 数值特征的各种变换
        for col in num_features:
            # 平方根变换
            self.train[f'{col}_sqrt'] = np.sqrt(self.train[col] - self.train[col].min() + 1)
            self.test[f'{col}_sqrt'] = np.sqrt(self.test[col] - self.test[col].min() + 1)
            
            # 立方根变换
            self.train[f'{col}_cbrt'] = np.cbrt(self.train[col])
            self.test[f'{col}_cbrt'] = np.cbrt(self.test[col])
            
            # 对数变换
            self.train[f'{col}_log'] = np.log1p(self.train[col] - self.train[col].min() + 1)
            self.test[f'{col}_log'] = np.log1p(self.test[col] - self.test[col].min() + 1)
            
            # 平方变换
            self.train[f'{col}_square'] = self.train[col] ** 2
            self.test[f'{col}_square'] = self.test[col] ** 2
            
            # 倒数变换（避免除零）
            self.train[f'{col}_inv'] = 1 / (self.train[col] + 1)
            self.test[f'{col}_inv'] = 1 / (self.test[col] + 1)
        
        # 3. 特征交互（所有数值特征两两组合）
        for i, col1 in enumerate(num_features):
            for j, col2 in enumerate(num_features[i+1:], i+1):
                # 乘积
                self.train[f'{col1}_{col2}_mul'] = self.train[col1] * self.train[col2]
                self.test[f'{col1}_{col2}_mul'] = self.test[col1] * self.test[col2]
                
                # 比值（避免除零）
                self.train[f'{col1}_{col2}_div'] = self.train[col1] / (self.train[col2] + 1)
                self.test[f'{col1}_{col2}_div'] = self.test[col1] / (self.test[col2] + 1)
                
                # 差值
                self.train[f'{col1}_{col2}_diff'] = self.train[col1] - self.train[col2]
                self.test[f'{col1}_{col2}_diff'] = self.test[col1] - self.test[col2]
                
                # 和
                self.train[f'{col1}_{col2}_sum'] = self.train[col1] + self.train[col2]
                self.test[f'{col1}_{col2}_sum'] = self.test[col1] + self.test[col2]
        
        # 4. 分类特征的目标编码（Target Encoding）
        from sklearn.model_selection import KFold
        kf = KFold(n_splits=5, shuffle=True, random_state=2022)
        
        for col in str_features:
            target_encoded = np.zeros(len(self.train))
            
            for train_idx, val_idx in kf.split(self.train):
                train_fold = self.train.iloc[train_idx]
                val_fold = self.train.iloc[val_idx]
                
                # 计算目标编码
                target_mean = train_fold.groupby(col)['y'].mean()
                global_mean = train_fold['y'].mean()
                
                # 对验证集进行编码
                target_encoded[val_idx] = val_fold[col].map(target_mean).fillna(global_mean)
            
            self.train[f'{col}_target_encoded'] = target_encoded
            
            # 对测试集进行编码
            target_mean_full = self.train.groupby(col)['y'].mean()
            global_mean_full = self.train['y'].mean()
            self.test[f'{col}_target_encoded'] = self.test[col].map(target_mean_full).fillna(global_mean_full)
        
        # 5. 分类特征的频率编码
        for col in str_features:
            freq_map = self.train[col].value_counts().to_dict()
            self.train[f'{col}_freq'] = self.train[col].map(freq_map)
            self.test[f'{col}_freq'] = self.test[col].map(freq_map).fillna(0)
        
        # 6. 分类特征的组合
        for i, col1 in enumerate(str_features):
            for j, col2 in enumerate(str_features[i+1:], i+1):
                self.train[f'{col1}_{col2}_combo'] = self.train[col1].astype(str) + '_' + self.train[col2].astype(str)
                self.test[f'{col1}_{col2}_combo'] = self.test[col1].astype(str) + '_' + self.test[col2].astype(str)
        
        # 7. 统计特征
        # 按分类特征分组的数值特征统计
        for cat_col in str_features:
            for num_col in num_features:
                # 均值
                group_mean = self.train.groupby(cat_col)[num_col].mean()
                self.train[f'{cat_col}_{num_col}_mean'] = self.train[cat_col].map(group_mean)
                self.test[f'{cat_col}_{num_col}_mean'] = self.test[cat_col].map(group_mean).fillna(group_mean.mean())
                
                # 标准差
                group_std = self.train.groupby(cat_col)[num_col].std()
                self.train[f'{cat_col}_{num_col}_std'] = self.train[cat_col].map(group_std)
                self.test[f'{cat_col}_{num_col}_std'] = self.test[cat_col].map(group_std).fillna(group_std.mean())
        
        # 8. 准备最终特征
        # 获取所有新创建的特征
        all_features = []
        for col in self.train.columns:
            if col not in ['ID', 'y'] and self.train[col].dtype in ['int64', 'float64']:
                all_features.append(col)
        
        # 对分类特征进行独热编码
        categorical_features = []
        for col in self.train.columns:
            if self.train[col].dtype == 'object' and col not in ['ID']:
                categorical_features.append(col)
        
        dummy_train = self.train[all_features].copy()
        dummy_test = self.test[all_features].copy()
        
        # 添加独热编码特征
        for col in categorical_features:
            if col in self.train.columns:
                dummies_train = pd.get_dummies(self.train[col], prefix=col)
                dummies_test = pd.get_dummies(self.test[col], prefix=col)
                
                # 确保训练集和测试集有相同的列
                all_columns = set(dummies_train.columns) | set(dummies_test.columns)
                for dummy_col in all_columns:
                    if dummy_col not in dummies_train.columns:
                        dummies_train[dummy_col] = 0
                    if dummy_col not in dummies_test.columns:
                        dummies_test[dummy_col] = 0
                
                dummy_train = pd.concat([dummy_train, dummies_train], axis=1)
                dummy_test = pd.concat([dummy_test, dummies_test], axis=1)
        
        print(f"特征工程后特征数量: {dummy_train.shape[1]}")
        
        # 9. 处理无穷值和NaN值
        dummy_train = dummy_train.replace([np.inf, -np.inf], np.nan)
        dummy_test = dummy_test.replace([np.inf, -np.inf], np.nan)
        
        # 填充NaN值
        dummy_train = dummy_train.fillna(dummy_train.median())
        dummy_test = dummy_test.fillna(dummy_train.median())
        
        # 特征选择（保留更多特征）
        # 使用方差阈值进行初步筛选
        from sklearn.feature_selection import VarianceThreshold
        var_selector = VarianceThreshold(threshold=0.01)
        dummy_train_var = var_selector.fit_transform(dummy_train)
        dummy_test_var = var_selector.transform(dummy_test)
        
        selected_features = dummy_train.columns[var_selector.get_support()]
        dummy_train = pd.DataFrame(dummy_train_var, columns=selected_features, index=dummy_train.index)
        dummy_test = pd.DataFrame(dummy_test_var, columns=selected_features, index=dummy_test.index)
        
        print(f"特征选择后特征数量: {dummy_train.shape[1]}")
        
        # 10. 使用RobustScaler进行标准化（更稳健）
        scaler = RobustScaler()
        dummy_train_scaled = pd.DataFrame(
            scaler.fit_transform(dummy_train),
            columns=dummy_train.columns,
            index=dummy_train.index
        )
        dummy_test_scaled = pd.DataFrame(
            scaler.transform(dummy_test),
            columns=dummy_test.columns,
            index=dummy_test.index
        )
        
        self.X = dummy_train_scaled
        self.y = self.train['y']
        self.X_test = dummy_test_scaled
        self.feature_names = list(dummy_train.columns)
        
        return self.X, self.y, self.X_test
    
    def train_ultimate_models(self):
        """训练终极模型组合"""
        print("开始训练终极模型组合...")
        
        # 使用更多折数和重复
        n_fold = 15
        folds = StratifiedKFold(n_splits=n_fold, shuffle=True, random_state=2022)
        
        # 将回归问题转换为分层抽样
        y_binned = pd.cut(self.y, bins=10, labels=False)
        
        # 存储预测结果
        oof_predictions = {}
        test_predictions = {}
        
        # 1. 多个LightGBM模型（不同参数）
        lgb_configs = [
            {'num_leaves': 31, 'learning_rate': 0.005, 'feature_fraction': 0.8, 'bagging_fraction': 0.8, 'max_depth': -1},
            {'num_leaves': 63, 'learning_rate': 0.01, 'feature_fraction': 0.7, 'bagging_fraction': 0.9, 'max_depth': 8},
            {'num_leaves': 127, 'learning_rate': 0.02, 'feature_fraction': 0.9, 'bagging_fraction': 0.7, 'max_depth': 10},
        ]
        
        for i, config in enumerate(lgb_configs):
            print(f"训练LightGBM模型 {i+1}...")
            oof_lgb = np.zeros(len(self.X))
            prediction_lgb = np.zeros(len(self.X_test))
            
            params = {
                'objective': 'regression',
                'metric': 'auc',
                'boosting_type': 'gbdt',
                'verbose': -1,
                'random_state': 2022,
                'n_estimators': 10000,
                **config
            }
            
            for fold_n, (train_index, valid_index) in enumerate(folds.split(self.X, y_binned)):
                X_train, X_valid = self.X.iloc[train_index], self.X.iloc[valid_index]
                y_train, y_valid = self.y.iloc[train_index], self.y.iloc[valid_index]
                
                model = lgb.LGBMRegressor(**params)
                model.fit(X_train, y_train,
                         eval_set=[(X_valid, y_valid)],
                         eval_metric='auc',
                         callbacks=[lgb.early_stopping(500), lgb.log_evaluation(0)])
                
                y_pred_valid = model.predict(X_valid)
                y_pred = model.predict(self.X_test)
                
                oof_lgb[valid_index] = y_pred_valid
                prediction_lgb += y_pred
            
            prediction_lgb /= n_fold
            oof_predictions[f'lgb_{i}'] = oof_lgb
            test_predictions[f'lgb_{i}'] = prediction_lgb
            print(f"LightGBM {i+1} AUC: {roc_auc_score(self.y, oof_lgb):.6f}")
        
        # 2. 多个XGBoost模型
        xgb_configs = [
            {'max_depth': 6, 'learning_rate': 0.005, 'colsample_bytree': 0.8, 'subsample': 0.8},
            {'max_depth': 8, 'learning_rate': 0.01, 'colsample_bytree': 0.7, 'subsample': 0.9},
            {'max_depth': 10, 'learning_rate': 0.02, 'colsample_bytree': 0.9, 'subsample': 0.7},
        ]
        
        for i, config in enumerate(xgb_configs):
            print(f"训练XGBoost模型 {i+1}...")
            oof_xgb = np.zeros(len(self.X))
            prediction_xgb = np.zeros(len(self.X_test))
            
            for fold_n, (train_index, valid_index) in enumerate(folds.split(self.X, y_binned)):
                X_train, X_valid = self.X.iloc[train_index], self.X.iloc[valid_index]
                y_train, y_valid = self.y.iloc[train_index], self.y.iloc[valid_index]
                
                model = XGBRegressor(
                    n_estimators=10000,
                    random_state=2022,
                    eval_metric='auc',
                    early_stopping_rounds=500,
                    **config
                )
                
                model.fit(X_train, y_train,
                         eval_set=[(X_valid, y_valid)],
                         verbose=False)
                
                y_pred_valid = model.predict(X_valid)
                y_pred = model.predict(self.X_test)
                
                oof_xgb[valid_index] = y_pred_valid
                prediction_xgb += y_pred
            
            prediction_xgb /= n_fold
            oof_predictions[f'xgb_{i}'] = oof_xgb
            test_predictions[f'xgb_{i}'] = prediction_xgb
            print(f"XGBoost {i+1} AUC: {roc_auc_score(self.y, oof_xgb):.6f}")
        
        # 3. 多个CatBoost模型
        cat_configs = [
            {'depth': 6, 'learning_rate': 0.005, 'l2_leaf_reg': 3.0},
            {'depth': 8, 'learning_rate': 0.01, 'l2_leaf_reg': 5.0},
            {'depth': 10, 'learning_rate': 0.02, 'l2_leaf_reg': 1.0},
        ]
        
        for i, config in enumerate(cat_configs):
            print(f"训练CatBoost模型 {i+1}...")
            oof_cat = np.zeros(len(self.X))
            prediction_cat = np.zeros(len(self.X_test))
            
            for fold_n, (train_index, valid_index) in enumerate(folds.split(self.X, y_binned)):
                X_train, X_valid = self.X.iloc[train_index], self.X.iloc[valid_index]
                y_train, y_valid = self.y.iloc[train_index], self.y.iloc[valid_index]
                
                train_pool = Pool(X_train, y_train)
                eval_pool = Pool(X_valid, y_valid)
                
                model = CatBoostRegressor(
                    iterations=10000,
                    random_seed=2022,
                    logging_level='Silent',
                    early_stopping_rounds=500,
                    use_best_model=True,
                    **config
                )
                
                model.fit(train_pool, eval_set=eval_pool, verbose=False)
                
                y_pred_valid = model.predict(X_valid)
                y_pred = model.predict(self.X_test)
                
                oof_cat[valid_index] = y_pred_valid
                prediction_cat += y_pred
            
            prediction_cat /= n_fold
            oof_predictions[f'cat_{i}'] = oof_cat
            test_predictions[f'cat_{i}'] = prediction_cat
            print(f"CatBoost {i+1} AUC: {roc_auc_score(self.y, oof_cat):.6f}")
        
        return oof_predictions, test_predictions
    
    def ultimate_ensemble(self, oof_predictions, test_predictions):
        """终极模型融合"""
        print("开始终极模型融合...")
        
        # 准备stacking数据
        train_stack = np.column_stack([oof_predictions[model] for model in oof_predictions.keys()])
        test_stack = np.column_stack([test_predictions[model] for model in test_predictions.keys()])
        
        # 使用最复杂的融合策略
        folds_stack = RepeatedKFold(n_splits=15, n_repeats=10, random_state=2022)
        
        # 最优元学习器
        meta_model = BayesianRidge()
        
        oof_stack = np.zeros(train_stack.shape[0])
        predictions_stack = np.zeros(test_stack.shape[0])
        
        for fold_, (trn_idx, val_idx) in enumerate(folds_stack.split(train_stack, self.y)):
            trn_data, trn_y = train_stack[trn_idx], self.y.iloc[trn_idx].values
            val_data, val_y = train_stack[val_idx], self.y.iloc[val_idx].values
            
            meta_model.fit(trn_data, trn_y)
            oof_stack[val_idx] = meta_model.predict(val_data)
            predictions_stack += meta_model.predict(test_stack) / folds_stack.get_n_splits()
        
        final_auc = roc_auc_score(self.y, oof_stack)
        print(f"终极融合模型 AUC: {final_auc:.6f}")
        
        return predictions_stack, final_auc
    
    def run_ultimate_optimization(self):
        """运行终极优化流程"""
        print("开始终极客户购买预测模型优化...")
        
        # 1. 加载数据
        self.load_data()
        
        # 2. 终极特征工程
        X, y, X_test = self.ultimate_feature_engineering()
        
        # 3. 训练终极模型
        oof_predictions, test_predictions = self.train_ultimate_models()
        
        # 4. 终极模型融合
        final_predictions, final_auc = self.ultimate_ensemble(oof_predictions, test_predictions)
        
        # 5. 保存结果
        submission = pd.DataFrame({
            'ID': self.test['ID'],
            'pred': final_predictions
        })
        submission.to_csv('ultimate_submission.csv', index=False)
        
        print(f"\n终极优化完成！最终AUC分数: {final_auc:.6f}")
        print("结果已保存到 ultimate_submission.csv")
        
        return final_auc

if __name__ == "__main__":
    # 运行终极优化
    optimizer = UltimateCustomerPurchasePrediction()
    final_score = optimizer.run_ultimate_optimization()