import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import lightgbm as lgb
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns

# 设置中文字体支持
matplotlib.rcParams['font.sans-serif'] = ['SimHei', 'DejaVu Sans']
matplotlib.rcParams['axes.unicode_minus'] = False

# 数据预处理
def load_data():
    base = pd.read_csv('customer_base.csv')
    behavior = pd.read_csv('customer_behavior_assets.csv')
    
    # 获取最新的客户资产数据（按统计月份排序，取每个客户的最新记录）
    latest_data = behavior.sort_values('stat_month').groupby('customer_id').tail(1)
    
    # 合并基础信息和行为资产数据
    merged = pd.merge(base, latest_data, on='customer_id', how='inner')
    
    # 计算客户未来3个月资产达到100万+的概率
    # 假设资产线性增长，基于最近两个月的数据计算增长率
    customer_sorted = behavior.sort_values(['customer_id', 'stat_month'])
    customer_sorted['prev_assets'] = customer_sorted.groupby('customer_id')['total_assets'].shift(1)
    customer_sorted['asset_growth'] = customer_sorted['total_assets'] - customer_sorted['prev_assets']
    customer_sorted['growth_rate'] = customer_sorted['asset_growth'] / customer_sorted['prev_assets']

    # 计算每个客户的平均增长率
    growth_rates = customer_sorted.groupby('customer_id')['growth_rate'].mean().reset_index()
    growth_rates.columns = ['customer_id', 'avg_growth_rate']

    # 合并增长率数据
    merged = pd.merge(merged, growth_rates, on='customer_id', how='left')

    # 填充缺失的增长率（对于只有一条记录的客户）
    merged['avg_growth_rate'] = merged['avg_growth_rate'].fillna(0)

    # 计算3个月后预测资产
    merged['predicted_assets_3m'] = merged['total_assets'] * (1 + merged['avg_growth_rate']) ** 3

    # 创建目标变量：3个月内资产是否能达到100万+
    merged['target'] = (merged['predicted_assets_3m'] >= 1000000).astype(int)
    
    return merged

# 特征工程 - 处理分类变量
def feature_engineering(data):
    # 数值型特征
    numeric_features = ['age', 'monthly_income', 'total_assets', 'deposit_balance', 'financial_balance', 
                       'fund_balance', 'insurance_balance', 'product_count', 'financial_repurchase_count',
                       'credit_card_monthly_expense', 'investment_monthly_count', 'app_login_count',
                       'app_financial_view_time', 'app_product_compare_count', 'avg_growth_rate']
    
    # 分类型特征
    categorical_features = ['gender', 'occupation', 'occupation_type', 'lifecycle_stage', 
                           'marriage_status', 'city_level', 'branch_name', 'asset_level',
                           'deposit_flag', 'financial_flag', 'fund_flag', 'insurance_flag']
    
    # 处理数值型特征
    X_numeric = data[numeric_features].fillna(0)
    
    # 处理分类型特征 - 使用标签编码
    X_categorical = data[categorical_features].copy()
    label_encoders = {}
    for feature in categorical_features:
        le = LabelEncoder()
        X_categorical[feature] = le.fit_transform(X_categorical[feature].astype(str))
        label_encoders[feature] = le
    
    # 合并所有特征
    X = pd.concat([X_numeric, X_categorical], axis=1)
    
    return X, label_encoders

# 模型训练与可视化
def train_and_visualize():
    # 加载数据
    data = load_data()
    
    # 特征工程
    X, label_encoders = feature_engineering(data)
    y = data['target']
    
    # 划分训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
    
    # 创建LightGBM数据集
    train_data = lgb.Dataset(X_train, label=y_train)
    valid_data = lgb.Dataset(X_test, label=y_test, reference=train_data)
    
    # 设置参数
    params = {
        'objective': 'binary',
        'metric': 'binary_logloss',
        'boosting_type': 'gbdt',
        'num_leaves': 31,
        'learning_rate': 0.05,
        'feature_fraction': 0.9,
        'bagging_fraction': 0.8,
        'bagging_freq': 5,
        'verbose': 0
    }
    
    # 训练模型
    print("正在训练LightGBM模型...")
    model = lgb.train(
        params,
        train_data,
        valid_sets=[valid_data],
        num_boost_round=100,
        callbacks=[lgb.log_evaluation(0)]
    )
    
    # 预测
    y_pred = model.predict(X_test, num_iteration=model.best_iteration)
    y_pred_binary = (y_pred > 0.5).astype(int)
    
    # 计算准确率
    accuracy = np.mean(y_pred_binary == y_test)
    print(f"模型准确率: {accuracy:.4f}")
    
    # 获取特征重要性
    feature_importance = pd.DataFrame({
        'feature': X.columns,
        'importance': model.feature_importance(importance_type='gain')
    }).sort_values('importance', ascending=False)
    
    print("\n特征重要性排序:")
    print(feature_importance)
    
    # 保存特征重要性到CSV文件
    feature_importance.to_csv('image_show/lightgbm_feature_importance.csv', index=False)
    print("\n特征重要性已保存到 lightgbm_feature_importance.csv 文件")
    
    # 可视化特征重要性
    plt.figure(figsize=(10, 8))
    top_features = feature_importance.head(15)
    colors = plt.cm.viridis(np.linspace(0, 1, len(top_features)))
    plt.barh(range(len(top_features)), top_features['importance'], color=colors)
    plt.yticks(range(len(top_features)), top_features['feature'])
    plt.xlabel('重要性得分')
    plt.title('LightGBM特征重要性排序 (前15个特征)')
    plt.gca().invert_yaxis()  # 重要性从高到低排列
    
    plt.tight_layout()
    plt.savefig('image_show/lightgbm_feature_importance.png', dpi=300, bbox_inches='tight')
    plt.close()
    print("\n特征重要性图表已保存到 lightgbm_feature_importance.png 文件")
    
    # 可视化全部特征重要性
    plt.figure(figsize=(12, 10))
    colors = plt.cm.viridis(np.linspace(0, 1, len(feature_importance)))
    plt.barh(range(len(feature_importance)), feature_importance['importance'], color=colors)
    plt.yticks(range(len(feature_importance)), feature_importance['feature'])
    plt.xlabel('重要性得分')
    plt.title('LightGBM全部特征重要性排序')
    plt.gca().invert_yaxis()  # 重要性从高到低排列
    
    plt.tight_layout()
    plt.savefig('image_show/lightgbm_feature_importance_full.png', dpi=300, bbox_inches='tight')
    plt.close()
    print("\n全部特征重要性图表已保存到 lightgbm_feature_importance_full.png 文件")
    
    # 显示前20个最重要的特征
    print("\n前20个最重要的特征:")
    print(feature_importance.head(20))

if __name__ == '__main__':
    train_and_visualize()