import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.metrics import classification_report, confusion_matrix, roc_auc_score, accuracy_score, precision_score, recall_score, f1_score
from sklearn.model_selection import GridSearchCV
import joblib
import matplotlib.pyplot as plt
import seaborn as sns

def load_features():
    """加载特征数据"""
    try:
        features = pd.read_csv('features.csv')
        return features
    except FileNotFoundError:
        print("找不到特征文件，请先运行data_preprocessing.py生成特征")
        return None

def prepare_data(features_df):
    """准备模型训练数据"""
    if features_df is None:
        return None, None, None, None, None, None
    
    # 选择特征和目标变量
    # 排除ID、日期和目标变量
    exclude_columns = ['customer_id', 'first_purchase_date', 'last_purchase_date', 
                       'repurchase_count', 'is_repeat_buyer']
    
    X = features_df.drop(exclude_columns, axis=1)
    y = features_df['is_repeat_buyer']
    
    # 分割数据集
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42, stratify=y)
    
    # 特征标准化
    scaler = StandardScaler()
    X_train_scaled = scaler.fit_transform(X_train)
    X_test_scaled = scaler.transform(X_test)
    
    # 保存特征名称
    feature_names = X.columns.tolist()
    
    return X_train_scaled, X_test_scaled, y_train, y_test, feature_names, scaler

def train_model(X_train, y_train, feature_names):
    """训练复购预测模型"""
    if X_train is None:
        return None
    
    # 定义要尝试的模型
    models = {
        'Random Forest': RandomForestClassifier(random_state=42),
        'Gradient Boosting': GradientBoostingClassifier(random_state=42)
    }
    
    # 定义超参数网格
    param_grids = {
        'Random Forest': {
            'n_estimators': [100, 200],
            'max_depth': [None, 10, 20]
        },
        'Gradient Boosting': {
            'n_estimators': [100, 200],
            'learning_rate': [0.01, 0.1],
            'max_depth': [3, 5]
        }
    }
    
    best_model = None
    best_auc = 0
    best_model_name = ''
    
    # 进行网格搜索
    for model_name, model in models.items():
        print(f"训练 {model_name}...")
        grid_search = GridSearchCV(model, param_grids[model_name], cv=5, scoring='roc_auc', n_jobs=-1)
        grid_search.fit(X_train, y_train)
        
        print(f"{model_name} 最佳参数: {grid_search.best_params_}")
        print(f"{model_name} 交叉验证 AUC: {grid_search.best_score_:.4f}")
        
        if grid_search.best_score_ > best_auc:
            best_auc = grid_search.best_score_
            best_model = grid_search.best_estimator_
            best_model_name = model_name
    
    print(f"最佳模型: {best_model_name}")
    
    # 绘制特征重要性
    if hasattr(best_model, 'feature_importances_'):
        plot_feature_importance(best_model.feature_importances_, feature_names, best_model_name)
    
    return best_model

def plot_feature_importance(importances, feature_names, model_name):
    """绘制特征重要性图"""
    indices = np.argsort(importances)[::-1]
    top_features = 10  # 只显示前10个重要特征
    
    plt.figure(figsize=(12, 8))
    plt.title(f'{model_name} 特征重要性')
    plt.bar(range(top_features), importances[indices][:top_features], align='center')
    plt.xticks(range(top_features), [feature_names[i] for i in indices[:top_features]], rotation=90)
    plt.tight_layout()
    plt.savefig('feature_importance.png')
    plt.close()

def evaluate_model(model, X_test, y_test):
    """评估模型性能"""
    if model is None:
        return
    
    # 预测
    y_pred = model.predict(X_test)
    y_pred_proba = model.predict_proba(X_test)[:, 1]
    
    # 计算评估指标
    accuracy = accuracy_score(y_test, y_pred)
    precision = precision_score(y_test, y_pred)
    recall = recall_score(y_test, y_pred)
    f1 = f1_score(y_test, y_pred)
    auc = roc_auc_score(y_test, y_pred_proba)
    
    print(f"准确率: {accuracy:.4f}")
    print(f"精确率: {precision:.4f}")
    print(f"召回率: {recall:.4f}")
    print(f"F1分数: {f1:.4f}")
    print(f"AUC: {auc:.4f}")
    
    # 打印分类报告
    print("\n分类报告:")
    print(classification_report(y_test, y_pred))
    
    # 绘制混淆矩阵
    cm = confusion_matrix(y_test, y_pred)
    plot_confusion_matrix(cm)
    
    return {
        'accuracy': accuracy,
        'precision': precision,
        'recall': recall,
        'f1': f1,
        'auc': auc
    }

def plot_confusion_matrix(cm):
    """绘制混淆矩阵"""
    plt.figure(figsize=(10, 7))
    sns.heatmap(cm, annot=True, fmt='d', cmap='Blues')
    plt.xlabel('预测标签')
    plt.ylabel('真实标签')
    plt.title('混淆矩阵')
    plt.savefig('confusion_matrix.png')
    plt.close()

def save_model(model, scaler, feature_names):
    """保存模型和相关对象"""
    if model is not None:
        joblib.dump(model, 'repurchase_model.pkl')
        joblib.dump(scaler, 'scaler.pkl')
        joblib.dump(feature_names, 'feature_names.pkl')
        print("模型和相关对象已保存")

if __name__ == "__main__":
    features = load_features()
    X_train, X_test, y_train, y_test, feature_names, scaler = prepare_data(features)
    model = train_model(X_train, y_train, feature_names)
    metrics = evaluate_model(model, X_test, y_test)
    save_model(model, scaler, feature_names)    