import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier, plot_tree
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt

# 设置中文字体支持
plt.rcParams['font.sans-serif'] = ['SimHei', 'Arial Unicode MS', 'DejaVu Sans']
plt.rcParams['axes.unicode_minus'] = False

def load_and_prepare_data():
    """
    加载并准备数据
    """
    # 读取数据
    customer_base = pd.read_csv('customer_base.csv')
    customer_behavior = pd.read_csv('customer_behavior_assets.csv')
    
    # 获取最新月份的数据作为当前状态
    latest_data = customer_behavior.groupby('customer_id').tail(1).copy()
    
    # 创建一个更复杂的预测目标：
    # 基于多个因素综合判断客户是否具有高潜力
    # 高潜力客户的特征：总资产较高、金融产品配置丰富、活跃度高
    latest_data['financial_diversity'] = (
        (latest_data['deposit_balance'] > 0).astype(int) +
        (latest_data['financial_balance'] > 0).astype(int) +
        (latest_data['fund_balance'] > 0).astype(int) +
        (latest_data['insurance_balance'] > 0).astype(int)
    )
    
    # 定义高潜力客户：总资产在前25%且金融产品多样性高（>=3种）且活跃度高
    asset_threshold = latest_data['total_assets'].quantile(0.75)
    activity_threshold = latest_data['app_login_count'].quantile(0.75)
    
    latest_data['is_high_potential'] = (
        (latest_data['total_assets'] >= asset_threshold) & 
        (latest_data['financial_diversity'] >= 3) & 
        (latest_data['app_login_count'] >= activity_threshold)
    ).astype(int)
    
    # 合并客户基础信息和行为信息
    merged_data = pd.merge(latest_data, customer_base, on='customer_id', how='left')
    
    return merged_data

def feature_engineering(data):
    """
    特征工程
    """
    # 选择用于建模的特征
    feature_columns = [
        'age', 'gender', 'monthly_income', 'total_assets', 
        'deposit_balance', 'financial_balance', 'fund_balance', 
        'insurance_balance', 'product_count', 'app_login_count',
        'app_financial_view_time', 'investment_monthly_count'
    ]
    
    # 创建特征数据框
    features = data[feature_columns].copy()
    
    # 处理性别变量（转换为数值）
    features['gender'] = LabelEncoder().fit_transform(features['gender'])
    
    # 处理缺失值
    features = features.fillna(features.median())
    
    return features

def train_decision_tree(X, y):
    """
    训练决策树模型
    """
    # 划分训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.2, random_state=42, stratify=y
    )
    
    # 训练决策树模型（最大深度为4）
    model = DecisionTreeClassifier(max_depth=4, random_state=42)
    model.fit(X_train, y_train)
    
    return model, X_train, X_test, y_train, y_test

def visualize_decision_tree(model, feature_names):
    """
    可视化决策树
    """
    # 创建可视化图表
    plt.figure(figsize=(20, 10))
    plot_tree(model, feature_names=feature_names, class_names=['Not High Potential', 'High Potential'], 
              filled=True, rounded=True, fontsize=10)
    plt.title('Decision Tree for High Potential Customer Prediction (Max Depth = 4)')
    plt.savefig('image_show/decision_tree_visualization.png', dpi=300, bbox_inches='tight')
    plt.show()

def print_decision_tree_rules(tree, feature_names, class_names):
    """
    打印决策树规则（文本形式）
    """
    tree_ = tree.tree_
    feature_name = [
        feature_names[i] if i != -2 else "undefined!"
        for i in tree_.feature
    ]
    
    def recurse(node, depth):
        indent = "  " * depth
        if tree_.feature[node] != -2:
            name = feature_name[node]
            threshold = tree_.threshold[node]
            print(f"{indent}if {name} <= {threshold:.2f}:")
            recurse(tree_.children_left[node], depth + 1)
            print(f"{indent}else:  # if {name} > {threshold:.2f}")
            recurse(tree_.children_right[node], depth + 1)
        else:
            class_index = np.argmax(tree_.value[node])
            class_count = tree_.value[node][0][class_index]
            total_count = sum(tree_.value[node][0])
            probability = class_count / total_count if total_count > 0 else 0
            print(f"{indent}predict {class_names[class_index]} "
                  f"(confidence: {probability:.2f}, samples: {int(total_count)})")
    
    print("Decision Tree Rules:")
    print("=" * 50)
    recurse(0, 0)

def main():
    """
    主函数
    """
    print("加载并准备数据...")
    data = load_and_prepare_data()
    
    print("进行特征工程...")
    X = feature_engineering(data)
    y = data['is_high_potential']
    
    print("训练决策树模型...")
    model, X_train, X_test, y_train, y_test = train_decision_tree(X, y)
    
    # 输出模型准确率
    accuracy = model.score(X_test, y_test)
    print(f"\n模型准确率: {accuracy:.4f}")
    
    # 打印决策树规则
    print("\n生成决策树规则文本...")
    print_decision_tree_rules(model, X.columns.tolist(), ['Not High Potential', 'High Potential'])
    
    # 可视化决策树
    print("\n生成决策树可视化图表...")
    visualize_decision_tree(model, X.columns.tolist())
    
    # 显示特征重要性
    feature_importance = pd.DataFrame({
        'feature': X.columns,
        'importance': model.feature_importances_
    }).sort_values('importance', ascending=False)
    
    print("\n特征重要性:")
    print("=" * 50)
    for _, row in feature_importance.iterrows():
        print(f"{row['feature']:25s}: {row['importance']:.4f}")
    
    # 输出数据分布
    print(f"\n数据分布:")
    print("=" * 50)
    print(f"高潜力客户数量: {y.sum()}")
    print(f"总客户数量: {len(y)}")
    print(f"高潜力客户比例: {y.mean():.2%}")
    
    return model, feature_importance

if __name__ == "__main__":
    model, feature_importance = main()