import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
import lightgbm as lgb
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt

# 设置中文字体支持
plt.rcParams['font.sans-serif'] = ['SimHei', 'Arial Unicode MS', 'DejaVu Sans']
plt.rcParams['axes.unicode_minus'] = False

def load_and_prepare_data():
    """
    加载并准备数据
    """
    # 读取数据
    customer_base = pd.read_csv('customer_base.csv')
    customer_behavior = pd.read_csv('customer_behavior_assets.csv')
    
    # 获取最新月份的数据作为当前状态
    latest_data = customer_behavior.groupby('customer_id').tail(1).copy()
    
    # 创建目标变量：客户在未来3个月内资产是否达到100万+
    # 由于数据中没有真正的"未来"数据，我们通过现有数据模拟：
    # 如果客户当前已经是高价值客户（资产100万+），则标记为1
    latest_data['is_high_value'] = (latest_data['total_assets'] >= 1000000).astype(int)
    
    # 合并客户基础信息和行为信息
    merged_data = pd.merge(latest_data, customer_base, on='customer_id', how='left')
    
    return merged_data

def feature_engineering(data):
    """
    特征工程
    """
    # 选择用于建模的特征
    feature_columns = [
        'age', 'gender', 'monthly_income', 'total_assets', 
        'deposit_balance', 'financial_balance', 'fund_balance', 
        'insurance_balance', 'product_count', 'app_login_count',
        'app_financial_view_time', 'investment_monthly_count'
    ]
    
    # 创建特征数据框
    features = data[feature_columns].copy()
    
    # 处理性别变量（转换为数值）
    features['gender'] = LabelEncoder().fit_transform(features['gender'])
    
    # 处理缺失值
    features = features.fillna(features.median())
    
    return features

def train_lightgbm(X, y):
    """
    训练LightGBM模型
    """
    # 划分训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.2, random_state=42, stratify=y
    )
    
    # 创建LightGBM数据集
    train_data = lgb.Dataset(X_train, label=y_train)
    valid_data = lgb.Dataset(X_test, label=y_test, reference=train_data)
    
    # 设置参数
    params = {
        'objective': 'binary',
        'metric': 'binary_logloss',
        'boosting_type': 'gbdt',
        'num_leaves': 31,
        'learning_rate': 0.05,
        'feature_fraction': 0.9,
        'bagging_fraction': 0.8,
        'bagging_freq': 5,
        'verbose': 0
    }
    
    # 训练模型
    model = lgb.train(
        params,
        train_data,
        valid_sets=[train_data, valid_data],
        num_boost_round=100,
        callbacks=[lgb.log_evaluation(0)]
    )
    
    return model, X_train, X_test, y_train, y_test

def visualize_feature_importance(model, feature_names, top_n=12):
    """
    可视化特征重要性
    """
    # 获取特征重要性
    importance = model.feature_importance()
    
    # 创建特征重要性DataFrame
    feature_importance = pd.DataFrame({
        'feature': feature_names,
        'importance': importance
    }).sort_values('importance', ascending=True)
    
    # 创建可视化图表
    plt.figure(figsize=(10, 8))
    plt.barh(range(len(feature_importance)), feature_importance['importance'], color='skyblue')
    plt.yticks(range(len(feature_importance)), feature_importance['feature'])
    plt.xlabel('Feature Importance')
    plt.title('LightGBM Feature Importance')
    
    # 添加数值标签
    for i, v in enumerate(feature_importance['importance']):
        plt.text(v + 0.1, i, f'{v:.2f}', va='center')
    
    plt.tight_layout()
    plt.savefig('image_show/lightgbm_feature_importance.png', dpi=300, bbox_inches='tight')
    plt.show()
    
    return feature_importance

def print_feature_importance(feature_importance):
    """
    打印特征重要性排序（文本形式）
    """
    print("特征重要性排序:")
    print("=" * 50)
    for i, (idx, row) in enumerate(feature_importance[::-1].iterrows(), 1):
        print(f"{i:2d}. {row['feature']:25s}: {row['importance']:.2f}")

def main():
    """
    主函数
    """
    print("加载并准备数据...")
    data = load_and_prepare_data()
    
    print("进行特征工程...")
    X = feature_engineering(data)
    y = data['is_high_value']
    
    print("训练LightGBM模型...")
    model, X_train, X_test, y_train, y_test = train_lightgbm(X, y)
    
    # 输出模型准确率
    y_pred = model.predict(X_test, num_iteration=model.best_iteration)
    y_pred_binary = (y_pred > 0.5).astype(int)
    accuracy = np.mean(y_pred_binary == y_test)
    print(f"\n模型准确率: {accuracy:.4f}")
    
    # 显示特征重要性
    feature_importance = visualize_feature_importance(model, X.columns.tolist())
    
    # 打印特征重要性排序
    print("\n生成特征重要性排序...")
    print_feature_importance(feature_importance)
    
    # 输出数据分布
    print(f"\n数据分布:")
    print("=" * 50)
    print(f"高价值客户数量: {y.sum()}")
    print(f"总客户数量: {len(y)}")
    print(f"高价值客户比例: {y.mean():.2%}")
    
    return model, feature_importance

if __name__ == "__main__":
    model, feature_importance = main()