import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings('ignore')

from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn.preprocessing import StandardScaler, LabelEncoder, MinMaxScaler
from sklearn.metrics import roc_auc_score, log_loss
from sklearn.ensemble import RandomForestClassifier
from sklearn.cluster import KMeans, SpectralClustering
from sklearn.decomposition import PCA

import lightgbm as lgb
import xgboost as xgb

try:
    import tensorflow as tf
    from tensorflow.keras.models import Model, Sequential
    from tensorflow.keras.layers import Dense, Input, Dropout, LSTM, Embedding, Attention
    from tensorflow.keras.layers import BatchNormalization, Concatenate, Add, Multiply
    from tensorflow.keras.optimizers import Adam
    from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
    from tensorflow.keras.regularizers import l2
    TENSORFLOW_AVAILABLE = True
except ImportError:
    print("TensorFlow not available, using simplified models")
    TENSORFLOW_AVAILABLE = False

import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
import os

class AdvancedLoanPredictor:
    def __init__(self):
        """
        高级贷款违约预测器 - 实现技术路线.md中的完整方案
        """
        # 特征分组定义
        self.feature_groups = {
            # 1. 身份标识类（3项）
            'identity': ['id', 'policyCode', 'regionCode'],
            
            # 2. 核心金融指标（7项）
            'financial': ['loanAmnt', 'interestRate', 'installment', 'dti', 
                         'annualIncome', 'revolBal', 'revolUtil'],
            
            # 3. 个人资信历史（19项）
            'credit_history': ['openAcc', 'totalAcc', 'ficoRangeLow', 'ficoRangeHigh', 
                              'delinquency_2years', 'pubRec', 'pubRecBankruptcies', 'earliesCreditLine',
                              'employmentTitle', 'employmentLength', 'homeOwnership', 'title', 'applicationType',
                              'grade', 'subGrade', 'purpose', 'verificationStatus', 'initialListStatus'],
            
            # 4. 时空行为特征（6项）
            'spatiotemporal': ['issueDate', 'postCode', 'term', 'purpose', 'regionCode', 'applicationType'],
            
            # 5. 匿名行为特征（15项）
            'anonymous': [f'n{i}' for i in range(15)]
        }
        
        self.models = {}
        self.scalers = {}
        self.encoders = {}
        self.feature_importances = {}
        
    def load_and_enhanced_preprocessing(self):
        """加载数据并进行增强特征工程"""
        print("Loading and preprocessing data...")
        
        self.train_data = pd.read_csv('train.csv')
        self.test_data = pd.read_csv('testA.csv')
        
        print(f"Train shape: {self.train_data.shape}, Test shape: {self.test_data.shape}")
        
        # 目标变量
        self.y = self.train_data['isDefault'].values
        print(f"Default rate: {self.y.mean():.4f}")
        
        # 合并数据
        all_data = pd.concat([
            self.train_data.drop('isDefault', axis=1), 
            self.test_data
        ], ignore_index=True)
        
        # 1. 时间特征工程
        all_data['issueDate'] = pd.to_datetime(all_data['issueDate'])
        all_data['issue_year'] = all_data['issueDate'].dt.year
        all_data['issue_month'] = all_data['issueDate'].dt.month
        all_data['issue_quarter'] = all_data['issueDate'].dt.quarter
        all_data['issue_weekday'] = all_data['issueDate'].dt.weekday
        all_data['issue_day'] = all_data['issueDate'].dt.day
        
        # 经济周期特征（技术路线提到的关键特征）
        all_data['is_recession_period'] = (all_data['issue_year'] == 2008).astype(int)
        all_data['post_recession'] = (all_data['issue_year'] > 2008).astype(int)
        
        # 2. 就业时长数值化处理
        def parse_employment_length(emp_length):
            if pd.isna(emp_length) or emp_length == 'n/a':
                return 0
            elif '< 1' in str(emp_length):
                return 0.5
            elif '10+' in str(emp_length):
                return 10
            else:
                try:
                    return int(str(emp_length).split()[0])
                except:
                    return 0
        
        all_data['employment_length_numeric'] = all_data['employmentLength'].apply(parse_employment_length)
        
        # 3. 高级金融指标工程
        all_data['debt_to_income_ratio'] = all_data['loanAmnt'] / (all_data['annualIncome'] + 1)
        all_data['monthly_payment_ratio'] = all_data['installment'] / (all_data['annualIncome']/12 + 1)
        all_data['utilization_rate'] = all_data['revolBal'] / (all_data['annualIncome']/12 + 1)
        all_data['credit_length_ratio'] = all_data['openAcc'] / (all_data['totalAcc'] + 1)
        all_data['fico_range_width'] = all_data['ficoRangeHigh'] - all_data['ficoRangeLow']
        all_data['avg_fico'] = (all_data['ficoRangeHigh'] + all_data['ficoRangeLow']) / 2
        
        # 信用风险评分
        all_data['risk_score'] = (
            all_data['debt_to_income_ratio'] * 0.3 +
            all_data['monthly_payment_ratio'] * 0.2 +
            all_data['delinquency_2years'] * 0.2 +
            all_data['pubRec'] * 0.1 +
            (1 - all_data['avg_fico']/850) * 0.2
        )
        
        # 4. 匿名特征深度挖掘（技术路线核心突破点）
        anonymous_cols = [f'n{i}' for i in range(15)]
        
        # 填充缺失值
        for col in anonymous_cols:
            all_data[col] = all_data[col].fillna(all_data[col].median())
        
        # 行为波动率指数
        key_anonymous = ['n0', 'n3', 'n12']
        all_data['behavior_volatility'] = (
            all_data[key_anonymous].std(axis=1) / (all_data[key_anonymous].mean(axis=1) + 1e-6)
        )
        
        # 匿名特征统计量
        all_data['anonymous_sum'] = all_data[anonymous_cols].sum(axis=1)
        all_data['anonymous_mean'] = all_data[anonymous_cols].mean(axis=1)
        all_data['anonymous_std'] = all_data[anonymous_cols].std(axis=1)
        all_data['anonymous_max'] = all_data[anonymous_cols].max(axis=1)
        all_data['anonymous_min'] = all_data[anonymous_cols].min(axis=1)
        all_data['anonymous_range'] = all_data['anonymous_max'] - all_data['anonymous_min']
        
        # 行为模式特征
        all_data['high_activity'] = (all_data['anonymous_sum'] > all_data['anonymous_sum'].quantile(0.8)).astype(int)
        all_data['low_activity'] = (all_data['anonymous_sum'] < all_data['anonymous_sum'].quantile(0.2)).astype(int)
        
        # 5. 类别变量编码
        categorical_features = ['grade', 'subGrade', 'homeOwnership', 'verificationStatus', 
                               'purpose', 'initialListStatus', 'applicationType']
        
        for col in categorical_features:
            if col in all_data.columns:
                le = LabelEncoder()
                all_data[col] = le.fit_transform(all_data[col].astype(str).fillna('Unknown'))
                self.encoders[col] = le
        
        # 6. 数值特征标准化准备
        numeric_cols = all_data.select_dtypes(include=[np.number]).columns
        all_data[numeric_cols] = all_data[numeric_cols].fillna(all_data[numeric_cols].median())
        
        # 分离训练和测试数据
        self.X_train = all_data.iloc[:len(self.train_data)].copy()
        self.X_test = all_data.iloc[len(self.train_data):].copy()
        
        print("Enhanced preprocessing completed!")
        print(f"Total features: {self.X_train.shape[1]}")
        
    def create_autoencoder_for_anonymous(self, input_dim, encoding_dim=8):
        """创建匿名特征自编码器（技术路线核心组件）"""
        if not TENSORFLOW_AVAILABLE:
            return None, None
            
        # 输入层
        input_layer = Input(shape=(input_dim,))
        
        # 编码器
        encoded = Dense(16, activation='relu', kernel_regularizer=l2(0.001))(input_layer)
        encoded = BatchNormalization()(encoded)
        encoded = Dropout(0.2)(encoded)
        encoded = Dense(encoding_dim, activation='relu', kernel_regularizer=l2(0.001))(encoded)
        
        # 解码器
        decoded = Dense(16, activation='relu', kernel_regularizer=l2(0.001))(encoded)
        decoded = BatchNormalization()(decoded)
        decoded = Dropout(0.2)(decoded)
        decoded = Dense(input_dim, activation='linear')(decoded)
        
        # 模型
        autoencoder = Model(input_layer, decoded)
        encoder = Model(input_layer, encoded)
        
        autoencoder.compile(
            optimizer=Adam(learning_rate=0.001),
            loss='mse',
            metrics=['mae']
        )
        
        return autoencoder, encoder
    
    def create_attention_fusion_model(self, num_models=4):
        """创建注意力融合模型（技术路线核心架构）"""
        if not TENSORFLOW_AVAILABLE:
            return None
            
        # 输入层 - 各子模型的预测概率
        input_layer = Input(shape=(num_models,), name='model_predictions')
        
        # 多层感知机提取特征
        hidden1 = Dense(16, activation='relu')(input_layer)
        hidden1 = BatchNormalization()(hidden1)
        hidden1 = Dropout(0.3)(hidden1)
        
        hidden2 = Dense(8, activation='relu')(hidden1)
        hidden2 = BatchNormalization()(hidden2)
        hidden2 = Dropout(0.2)(hidden2)
        
        # 注意力权重计算
        attention_weights = Dense(num_models, activation='softmax', name='attention_weights')(hidden2)
        
        # 加权特征
        weighted_features = Multiply()([input_layer, attention_weights])
        
        # 额外的非线性变换
        enhanced_features = Add()([weighted_features, input_layer])  # 残差连接
        
        # 最终预测
        final_hidden = Dense(4, activation='relu')(enhanced_features)
        final_hidden = Dropout(0.1)(final_hidden)
        output = Dense(1, activation='sigmoid', name='final_prediction')(final_hidden)
        
        model = Model(inputs=input_layer, outputs=output)
        model.compile(
            optimizer=Adam(learning_rate=0.001),
            loss='binary_crossentropy',
            metrics=['accuracy', 'AUC']
        )
        
        return model
    
    def train_anonymous_autoencoder_model(self):
        """训练匿名特征自编码器模型"""
        print("Training anonymous behavior autoencoder model...")
        
        # 准备匿名特征
        anonymous_features = self.feature_groups['anonymous'] + [
            'behavior_volatility', 'anonymous_sum', 'anonymous_mean', 'anonymous_std',
            'anonymous_range', 'high_activity', 'low_activity'
        ]
        
        X_anonymous = self.X_train[anonymous_features].values
        
        # 标准化
        scaler = StandardScaler()
        X_anonymous_scaled = scaler.fit_transform(X_anonymous)
        self.scalers['anonymous'] = scaler
        
        if TENSORFLOW_AVAILABLE:
            # 训练自编码器
            autoencoder, encoder = self.create_autoencoder_for_anonymous(X_anonymous_scaled.shape[1], encoding_dim=8)
            
            if autoencoder is not None:
                # 训练自编码器
                autoencoder.fit(
                    X_anonymous_scaled, X_anonymous_scaled,
                    epochs=100,
                    batch_size=512,
                    validation_split=0.2,
                    callbacks=[
                        EarlyStopping(patience=10, restore_best_weights=True),
                        ReduceLROnPlateau(patience=5, factor=0.5)
                    ],
                    verbose=0
                )
                
                # 提取编码特征
                encoded_features = encoder.predict(X_anonymous_scaled)
                
                # 使用编码特征训练分类器
                classifier = lgb.LGBMClassifier(
                    objective='binary',
                    num_leaves=15,
                    learning_rate=0.05,
                    random_state=42,
                    verbose=-1
                )
                
                classifier.fit(encoded_features, self.y)
                
                self.models['anonymous'] = {
                    'autoencoder': autoencoder,
                    'encoder': encoder,
                    'classifier': classifier
                }
                
                return classifier.predict_proba(encoded_features)[:, 1]
        
        # 回退到传统方法
        print("Using fallback LightGBM model for anonymous features...")
        fallback_model = lgb.LGBMClassifier(
            objective='binary',
            num_leaves=15,
            learning_rate=0.05,
            random_state=42,
            verbose=-1
        )
        
        fallback_model.fit(X_anonymous_scaled, self.y)
        self.models['anonymous'] = {'classifier': fallback_model}
        
        return fallback_model.predict_proba(X_anonymous_scaled)[:, 1]
    
    def train_enhanced_financial_model(self):
        """训练增强金融指标模型"""
        print("Training enhanced financial model...")
        
        financial_features = self.feature_groups['financial'] + [
            'debt_to_income_ratio', 'monthly_payment_ratio', 'utilization_rate',
            'credit_length_ratio', 'fico_range_width', 'avg_fico', 'risk_score'
        ]
        
        X_financial = self.X_train[financial_features]
        
        # 使用XGBoost和LightGBM的集成
        xgb_model = xgb.XGBClassifier(
            objective='binary:logistic',
            n_estimators=300,
            max_depth=6,
            learning_rate=0.05,
            subsample=0.8,
            colsample_bytree=0.8,
            random_state=42,
            eval_metric='logloss'
        )
        
        lgb_model = lgb.LGBMClassifier(
            objective='binary',
            num_leaves=31,
            learning_rate=0.05,
            feature_fraction=0.9,
            bagging_fraction=0.8,
            bagging_freq=5,
            verbose=-1,
            random_state=42
        )
        
        xgb_model.fit(X_financial, self.y)
        lgb_model.fit(X_financial, self.y)
        
        # 集成预测
        xgb_probs = xgb_model.predict_proba(X_financial)[:, 1]
        lgb_probs = lgb_model.predict_proba(X_financial)[:, 1]
        
        # 简单平均集成
        ensemble_probs = (xgb_probs + lgb_probs) / 2
        
        self.models['financial'] = {
            'xgb': xgb_model,
            'lgb': lgb_model
        }
        
        # 保存特征重要性
        self.feature_importances['financial'] = {
            'xgb': dict(zip(financial_features, xgb_model.feature_importances_)),
            'lgb': dict(zip(financial_features, lgb_model.feature_importances_))
        }
        
        return ensemble_probs
    
    def train_enhanced_credit_model(self):
        """训练增强信用历史模型"""
        print("Training enhanced credit history model...")
        
        credit_features = ['openAcc', 'totalAcc', 'ficoRangeLow', 'ficoRangeHigh', 
                          'delinquency_2years', 'pubRec', 'pubRecBankruptcies',
                          'employment_length_numeric', 'grade', 'subGrade', 
                          'homeOwnership', 'verificationStatus', 'purpose',
                          'credit_length_ratio', 'avg_fico']
        
        X_credit = self.X_train[credit_features]
        
        # 使用多层随机森林（深度森林简化版）
        rf_models = []
        for i in range(3):  # 3层随机森林
            rf = RandomForestClassifier(
                n_estimators=150,
                max_depth=12,
                min_samples_split=20,
                min_samples_leaf=10,
                random_state=42+i,
                n_jobs=-1
            )
            rf.fit(X_credit, self.y)
            rf_models.append(rf)
        
        # 集成预测
        ensemble_probs = np.mean([
            rf.predict_proba(X_credit)[:, 1] for rf in rf_models
        ], axis=0)
        
        self.models['credit_history'] = rf_models
        
        return ensemble_probs
    
    def train_enhanced_spatiotemporal_model(self):
        """训练增强时空特征模型"""
        print("Training enhanced spatiotemporal model...")
        
        spatial_features = ['postCode', 'regionCode', 'term', 'issue_year', 
                           'issue_month', 'issue_quarter', 'issue_weekday',
                           'applicationType', 'is_recession_period', 'post_recession']
        
        X_spatial = self.X_train[spatial_features]
        
        # 使用gradient boosting处理时空特征
        model = lgb.LGBMClassifier(
            objective='binary',
            num_leaves=50,
            learning_rate=0.05,
            feature_fraction=0.8,
            bagging_fraction=0.7,
            bagging_freq=3,
            verbose=-1,
            random_state=42
        )
        
        model.fit(X_spatial, self.y)
        self.models['spatiotemporal'] = model
        
        return model.predict_proba(X_spatial)[:, 1]
    
    def train_all_enhanced_models(self):
        """训练所有增强模型"""
        print("=== Starting Advanced Multi-Model Training Pipeline ===")
        
        # 数据预处理
        self.load_and_enhanced_preprocessing()
        
        # 训练各个增强子模型
        financial_probs = self.train_enhanced_financial_model()
        credit_probs = self.train_enhanced_credit_model()
        spatial_probs = self.train_enhanced_spatiotemporal_model()
        anonymous_probs = self.train_anonymous_autoencoder_model()
        
        # 训练注意力融合模型
        fusion_probs = self.train_attention_fusion_model(
            financial_probs, credit_probs, spatial_probs, anonymous_probs
        )
        
        # 评估性能
        self.evaluate_all_models(financial_probs, credit_probs, spatial_probs, 
                                anonymous_probs, fusion_probs)
        
        print("All enhanced models trained successfully!")
        return self.models
    
    def train_attention_fusion_model(self, fin_probs, credit_probs, spatial_probs, anon_probs):
        """训练注意力融合模型"""
        print("Training attention fusion model...")
        
        X_fusion = np.column_stack([fin_probs, credit_probs, spatial_probs, anon_probs])
        
        if TENSORFLOW_AVAILABLE:
            # 使用深度注意力融合模型
            fusion_model = self.create_attention_fusion_model(num_models=4)
            
            if fusion_model is not None:
                # 训练融合模型
                fusion_model.fit(
                    X_fusion, self.y,
                    epochs=150,
                    batch_size=2048,
                    validation_split=0.2,
                    callbacks=[
                        EarlyStopping(patience=15, restore_best_weights=True),
                        ReduceLROnPlateau(patience=7, factor=0.5, min_lr=1e-6)
                    ],
                    verbose=1
                )
                
                self.models['fusion'] = fusion_model
                return fusion_model.predict(X_fusion).flatten()
        
        # 回退到LightGBM
        print("Using fallback LightGBM for fusion...")
        fallback_fusion = lgb.LGBMClassifier(
            objective='binary',
            num_leaves=10,
            learning_rate=0.05,
            random_state=42,
            verbose=-1
        )
        
        fallback_fusion.fit(X_fusion, self.y)
        self.models['fusion'] = fallback_fusion
        
        return fallback_fusion.predict_proba(X_fusion)[:, 1]
    
    def evaluate_all_models(self, fin_probs, credit_probs, spatial_probs, anon_probs, fusion_probs):
        """评估所有模型性能"""
        print("\n=== Enhanced Model Performance Evaluation ===")
        
        models_probs = {
            'Enhanced Financial Model': fin_probs,
            'Enhanced Credit History Model': credit_probs,
            'Enhanced Spatiotemporal Model': spatial_probs,
            'Anonymous Autoencoder Model': anon_probs,
            'Attention Fusion Model': fusion_probs
        }
        
        for model_name, probs in models_probs.items():
            auc = roc_auc_score(self.y, probs)
            logloss = log_loss(self.y, probs)
            print(f"{model_name:<30} AUC: {auc:.4f}, LogLoss: {logloss:.4f}")
    
    def predict_test_enhanced(self):
        """使用增强模型预测测试数据"""
        print("Predicting test data with enhanced models...")
        
        # 金融模型预测
        financial_features = self.feature_groups['financial'] + [
            'debt_to_income_ratio', 'monthly_payment_ratio', 'utilization_rate',
            'credit_length_ratio', 'fico_range_width', 'avg_fico', 'risk_score'
        ]
        X_test_financial = self.X_test[financial_features]
        
        fin_xgb_probs = self.models['financial']['xgb'].predict_proba(X_test_financial)[:, 1]
        fin_lgb_probs = self.models['financial']['lgb'].predict_proba(X_test_financial)[:, 1]
        fin_test_probs = (fin_xgb_probs + fin_lgb_probs) / 2
        
        # 信用历史模型预测
        credit_features = ['openAcc', 'totalAcc', 'ficoRangeLow', 'ficoRangeHigh', 
                          'delinquency_2years', 'pubRec', 'pubRecBankruptcies',
                          'employment_length_numeric', 'grade', 'subGrade', 
                          'homeOwnership', 'verificationStatus', 'purpose',
                          'credit_length_ratio', 'avg_fico']
        X_test_credit = self.X_test[credit_features]
        
        credit_test_probs = np.mean([
            rf.predict_proba(X_test_credit)[:, 1] for rf in self.models['credit_history']
        ], axis=0)
        
        # 时空模型预测
        spatial_features = ['postCode', 'regionCode', 'term', 'issue_year', 
                           'issue_month', 'issue_quarter', 'issue_weekday',
                           'applicationType', 'is_recession_period', 'post_recession']
        X_test_spatial = self.X_test[spatial_features]
        
        spatial_test_probs = self.models['spatiotemporal'].predict_proba(X_test_spatial)[:, 1]
        
        # 匿名特征模型预测
        anonymous_features = self.feature_groups['anonymous'] + [
            'behavior_volatility', 'anonymous_sum', 'anonymous_mean', 'anonymous_std',
            'anonymous_range', 'high_activity', 'low_activity'
        ]
        X_test_anonymous = self.X_test[anonymous_features].values
        X_test_anonymous_scaled = self.scalers['anonymous'].transform(X_test_anonymous)
        
        if 'encoder' in self.models['anonymous']:
            # 使用自编码器
            encoded_test = self.models['anonymous']['encoder'].predict(X_test_anonymous_scaled)
            anon_test_probs = self.models['anonymous']['classifier'].predict_proba(encoded_test)[:, 1]
        else:
            # 使用传统方法
            anon_test_probs = self.models['anonymous']['classifier'].predict_proba(X_test_anonymous_scaled)[:, 1]
        
        # 融合预测
        X_test_fusion = np.column_stack([fin_test_probs, credit_test_probs, spatial_test_probs, anon_test_probs])
        
        if TENSORFLOW_AVAILABLE and hasattr(self.models['fusion'], 'predict'):
            final_predictions = self.models['fusion'].predict(X_test_fusion).flatten()
        else:
            final_predictions = self.models['fusion'].predict_proba(X_test_fusion)[:, 1]
        
        return final_predictions
    
    def generate_feature_importance_report(self):
        """生成特征重要性报告"""
        print("\n=== Feature Importance Analysis ===")
        
        if 'financial' in self.feature_importances:
            print("\nTop 10 Financial Features (XGBoost):")
            fin_importance = self.feature_importances['financial']['xgb']
            sorted_features = sorted(fin_importance.items(), key=lambda x: x[1], reverse=True)[:10]
            for feature, importance in sorted_features:
                print(f"  {feature:<25}: {importance:.4f}")

def main():
    # 初始化高级预测器
    predictor = AdvancedLoanPredictor()
    
    # 训练所有增强模型
    models = predictor.train_all_enhanced_models()
    
    # 预测测试数据
    predictions = predictor.predict_test_enhanced()
    
    # 创建提交文件
    submission = pd.DataFrame({
        'id': predictor.test_data['id'],
        'isDefault': predictions
    })
    
    submission.to_csv('advanced_submission.csv', index=False)
    print(f"\nAdvanced predictions saved to advanced_submission.csv")
    
    # 生成特征重要性报告
    predictor.generate_feature_importance_report()
    
    # 显示预测结果统计
    print(f"\n=== Advanced Prediction Statistics ===")
    print(f"Mean prediction: {predictions.mean():.4f}")
    print(f"Std prediction: {predictions.std():.4f}")
    print(f"Min prediction: {predictions.min():.4f}")
    print(f"Max prediction: {predictions.max():.4f}")
    print(f"Samples predicted as high risk (>0.5): {(predictions > 0.5).sum()}")
    print(f"Samples predicted as very high risk (>0.8): {(predictions > 0.8).sum()}")

if __name__ == "__main__":
    main() 