import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import lightgbm as lgb
import matplotlib.pyplot as plt
import matplotlib
import shap

# 设置中文字体支持
matplotlib.rcParams['font.sans-serif'] = ['SimHei', 'DejaVu Sans']
matplotlib.rcParams['axes.unicode_minus'] = False

# 数据预处理
def load_data():
    base = pd.read_csv('customer_base.csv')
    behavior = pd.read_csv('customer_behavior_assets.csv')
    
    # 获取最新的客户资产数据（按统计月份排序，取每个客户的最新记录）
    latest_data = behavior.sort_values('stat_month').groupby('customer_id').tail(1)
    
    # 合并基础信息和行为资产数据
    merged = pd.merge(base, latest_data, on='customer_id', how='inner')
    
    # 计算客户未来3个月资产达到100万+的概率
    # 假设资产线性增长，基于最近两个月的数据计算增长率
    customer_sorted = behavior.sort_values(['customer_id', 'stat_month'])
    customer_sorted['prev_assets'] = customer_sorted.groupby('customer_id')['total_assets'].shift(1)
    customer_sorted['asset_growth'] = customer_sorted['total_assets'] - customer_sorted['prev_assets']
    customer_sorted['growth_rate'] = customer_sorted['asset_growth'] / customer_sorted['prev_assets']

    # 计算每个客户的平均增长率
    growth_rates = customer_sorted.groupby('customer_id')['growth_rate'].mean().reset_index()
    growth_rates.columns = ['customer_id', 'avg_growth_rate']

    # 合并增长率数据
    merged = pd.merge(merged, growth_rates, on='customer_id', how='left')

    # 填充缺失的增长率（对于只有一条记录的客户）
    merged['avg_growth_rate'] = merged['avg_growth_rate'].fillna(0)

    # 计算3个月后预测资产
    merged['predicted_assets_3m'] = merged['total_assets'] * (1 + merged['avg_growth_rate']) ** 3

    # 创建目标变量：3个月内资产是否能达到100万+
    merged['target'] = (merged['predicted_assets_3m'] >= 1000000).astype(int)
    
    return merged

# 特征工程 - 处理分类变量
def feature_engineering(data):
    # 数值型特征
    numeric_features = ['age', 'monthly_income', 'total_assets', 'deposit_balance', 'financial_balance', 
                       'fund_balance', 'insurance_balance', 'product_count', 'financial_repurchase_count',
                       'credit_card_monthly_expense', 'investment_monthly_count', 'app_login_count',
                       'app_financial_view_time', 'app_product_compare_count', 'avg_growth_rate']
    
    # 分类型特征
    categorical_features = ['gender', 'occupation', 'occupation_type', 'lifecycle_stage', 
                           'marriage_status', 'city_level', 'branch_name', 'asset_level',
                           'deposit_flag', 'financial_flag', 'fund_flag', 'insurance_flag']
    
    # 处理数值型特征
    X_numeric = data[numeric_features].fillna(0)
    
    # 处理分类型特征 - 使用标签编码
    X_categorical = data[categorical_features].copy()
    label_encoders = {}
    for feature in categorical_features:
        le = LabelEncoder()
        X_categorical[feature] = le.fit_transform(X_categorical[feature].astype(str))
        label_encoders[feature] = le
    
    # 合并所有特征
    X = pd.concat([X_numeric, X_categorical], axis=1)
    
    return X, label_encoders, numeric_features, categorical_features

# 模型训练
def train_model(X_train, y_train, X_test, y_test):
    # 创建LightGBM数据集
    train_data = lgb.Dataset(X_train, label=y_train)
    valid_data = lgb.Dataset(X_test, label=y_test, reference=train_data)
    
    # 设置参数
    params = {
        'objective': 'binary',
        'metric': 'binary_logloss',
        'boosting_type': 'gbdt',
        'num_leaves': 31,
        'learning_rate': 0.05,
        'feature_fraction': 0.9,
        'bagging_fraction': 0.8,
        'bagging_freq': 5,
        'verbose': 0
    }
    
    # 训练模型
    print("正在训练LightGBM模型...")
    model = lgb.train(
        params,
        train_data,
        valid_sets=[valid_data],
        num_boost_round=100,
        callbacks=[lgb.log_evaluation(0)]
    )
    
    return model

# 全局解释 - 识别最重要的特征
def global_explanation(model, X_train, feature_names):
    print("正在进行全局解释...")
    
    # 创建SHAP解释器
    explainer = shap.TreeExplainer(model)
    
    # 计算SHAP值（采样一部分数据以提高计算效率）
    sample_data = shap.utils.sample(X_train, 1000) if len(X_train) > 1000 else X_train
    shap_values = explainer.shap_values(sample_data)
    
    # 绘制特征重要性图
    plt.figure(figsize=(10, 8))
    shap.summary_plot(shap_values, sample_data, feature_names=feature_names, show=False)
    plt.title('SHAP特征重要性全局解释')
    plt.tight_layout()
    plt.savefig('image_show/shap_global_importance.png', dpi=300, bbox_inches='tight')
    plt.close()
    print("全局解释图表已保存到 shap_global_importance.png")
    
    # 输出平均绝对SHAP值
    shap_df = pd.DataFrame({
        'feature': feature_names,
        'importance': np.abs(shap_values).mean(0)
    }).sort_values('importance', ascending=False)
    
    print("\n基于SHAP的特征重要性排序:")
    print(shap_df.head(10))
    
    return explainer, shap_values, sample_data

# 局部解释 - 解释单个客户预测结果
def local_explanation(model, explainer, X_test, feature_names, customer_index=0):
    print(f"\n正在进行局部解释（客户索引: {customer_index}）...")
    
    # 选择一个客户进行解释
    customer_data = X_test.iloc[customer_index:customer_index+1]
    
    # 预测
    prediction = model.predict(customer_data)[0]
    print(f"客户预测概率: {prediction:.4f}")
    print(f"预测结果: {'会' if prediction > 0.5 else '不会'}在3个月内达到100万资产")
    
    # 计算该客户的SHAP值
    shap_values = explainer.shap_values(customer_data)
    
    # 绘制单个客户决策图
    plt.figure(figsize=(12, 8))
    shap.waterfall_plot(shap.Explanation(values=shap_values[0], 
                                         base_values=explainer.expected_value, 
                                         data=customer_data.iloc[0], 
                                         feature_names=feature_names), show=False)
    plt.title(f'客户 {customer_index} 的SHAP局部解释')
    plt.tight_layout()
    plt.savefig(f'image_show/shap_local_explanation_customer_{customer_index}.png', dpi=300, bbox_inches='tight')
    plt.close()
    print(f"局部解释图表已保存到 shap_local_explanation_customer_{customer_index}.png")
    
    # 输出特征贡献度
    feature_contributions = pd.DataFrame({
        'feature': feature_names,
        'contribution': shap_values[0]
    }).sort_values('contribution', key=abs, ascending=False)
    
    print(f"\n客户 {customer_index} 的特征贡献度:")
    print(feature_contributions.head(10))

def main():
    # 加载数据
    data = load_data()
    
    # 特征工程
    X, label_encoders, numeric_features, categorical_features = feature_engineering(data)
    y = data['target']
    feature_names = list(X.columns)
    
    # 划分训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
    
    # 训练模型
    model = train_model(X_train, y_train, X_test, y_test)
    
    # 预测并计算准确率
    y_pred = model.predict(X_test)
    y_pred_binary = (y_pred > 0.5).astype(int)
    accuracy = np.mean(y_pred_binary == y_test)
    print(f"模型准确率: {accuracy:.4f}")
    
    # 全局解释
    explainer, shap_values, sample_data = global_explanation(model, X_train, feature_names)
    
    # 局部解释（解释第一个客户）
    local_explanation(model, explainer, X_test, feature_names, customer_index=0)
    
    # 局部解释（解释第10个客户）
    local_explanation(model, explainer, X_test, feature_names, customer_index=10)
    
    print("\nSHAP分析完成！")

if __name__ == '__main__':
    main()