from sklearn.metrics import accuracy_score, roc_auc_score, precision_score, recall_score, f1_score, confusion_matrix


def model_predict(model, X_train, y_train, X_test, y_test):
    # 计算准确率和AUC
    # 计算评估指标（记得使用predict_proba）
    y_pred = model.predict(X_test)
    y_pred_proba = model.predict_proba(X_test)[:, 1]  # 获取正类的概率
    final_acc = accuracy_score(y_test, y_pred)
    final_auc = roc_auc_score(y_test, y_pred_proba)
    precision = precision_score(y_test, y_pred)
    recall = recall_score(y_test, y_pred)
    f1 = f1_score(y_test, y_pred)

    # 如果提供了训练集，评估过拟合情况
    if X_train is not None and y_train is not None:
        train_pred = model.predict(X_train)
        train_accuracy = accuracy_score(y_train, train_pred)
        print(f"\n训练集准确率: {train_accuracy:.4f}")
        print(f"测试集准确率: {final_acc:.4f}")
        if train_accuracy - final_acc > 0.1:
            print("⚠️ 模型可能存在过拟合")
        else:
            print("✅ 模型泛化能力良好")

    print("测试集性能指标:")
    print(f"精确率 (Precision): {precision:.4f}")
    print(f"召回率 (Recall): {recall:.4f}")
    print(f"F1 分数: {f1:.4f}")
    print("最终模型准确率: {:.4f}".format(final_acc))
    print("最终模型AUC值: {:.4f}".format(final_auc))
