import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, roc_auc_score, confusion_matrix
from sklearn.ensemble import GradientBoostingClassifier
from imblearn.ensemble import EasyEnsembleClassifier, BalancedBaggingClassifier
from sklearn.tree import DecisionTreeClassifier  # 用于设置基分类器权重
import warnings

warnings.filterwarnings('ignore')

# 1. 加载数据并预处理
train_data = pd.read_csv('train.csv')
final_features = pd.read_csv('final_features.csv')

# 统一列名
#train_data.rename(columns={'merchant_id': 'seller_id'}, inplace=True)

# 合并特征
merged_train = pd.merge(train_data, final_features, on=['user_id', 'merchant_id'], how='left').fillna(0)
X = merged_train.drop(['user_id', 'merchant_id', 'label'], axis=1)
y = merged_train['label']

# 查看原始分布
print("原始标签分布:\n", y.value_counts(normalize=True))  # 输出比例

# 2. 划分数据集（保持分层抽样）
X_train, X_test, y_train, y_test = train_test_split(
    X, y, test_size=0.2, random_state=42, stratify=y
)

# 3. 计算样本权重（替代类别权重）
class_counts = y_train.value_counts().to_dict()
majority_class = max(class_counts, key=class_counts.get)  # 多数类（0）
minority_class = min(class_counts, key=class_counts.get)  # 少数类（1）
weight_ratio = class_counts[majority_class] / class_counts[minority_class]  # 权重比 = 93%/7% ≈13.29

# 为每个样本生成权重（少数类样本权重设为weight_ratio，多数类设为1）
y_train_np = y_train.to_numpy()
sample_weights = np.where(y_train_np == minority_class, weight_ratio, 1.0)


# 4. 定义评估函数（增加PR曲线指标）
def evaluate_model(model, name, X_train, y_train, X_test, y_test, sample_weights=None):
    if sample_weights is not None:
        model.fit(X_train, y_train, sample_weight=sample_weights)  # 传递样本权重
    else:
        model.fit(X_train, y_train)

    y_pred = model.predict(X_test)
    y_prob = model.predict_proba(X_test)[:, 1]

    print(f"\n==== {name} 模型评估 ====")
    print("混淆矩阵:\n", confusion_matrix(y_test, y_pred))
    print("分类报告:\n", classification_report(y_test, y_pred))
    print(f"ROC AUC: {roc_auc_score(y_test, y_prob):.4f}")
    # 计算少数类召回率（针对1类）
    recall = classification_report(y_test, y_pred, output_dict=True)['1']['recall']
    print(f"少数类（1类）召回率: {recall:.4f}")


# 5. 调整权重后的模型训练

# 5.1 EasyEnsemble算法（基分类器设置类别权重）
ee_base = DecisionTreeClassifier(
    class_weight={majority_class: 1, minority_class: weight_ratio},  # 显式设置权重
    random_state=42
)
ee_model = EasyEnsembleClassifier(
    estimator=ee_base,
    n_estimators=50,  # 增加基分类器数量提升鲁棒性
    random_state=42,
    n_jobs=-1
)

# 5.2 平衡Bagging算法（基分类器使用balanced权重）
bb_base = DecisionTreeClassifier(
    class_weight="balanced",  # 使用sklearn内置的balanced模式
    random_state=42
)
bb_model = BalancedBaggingClassifier(
    estimator=bb_base,
    n_estimators=50,
    random_state=42,
    n_jobs=-1
)

# 5.3 梯度提升树算法（使用样本权重）
gb_model = GradientBoostingClassifier(
    n_estimators=200,  # 增加迭代次数适应权重调整
    learning_rate=0.05,
    random_state=42
)

# 6. 训练并评估模型
models = [
    (ee_model, "EasyEnsemble", None),
    (bb_model, "BalancedBagging", None),
    (gb_model, "GradientBoosting", sample_weights)  # 传递样本权重给GBM
]

for model, name, weights in models:
    evaluate_model(model, name, X_train, y_train, X_test, y_test, weights)

# 8. 保存模型
from joblib import dump
dump(ee_model, 'easy_ensemble_model.joblib')
dump(bb_model, 'balanced_bagging_model.joblib')
dump(gb_model, 'gradient_boosting_model.joblib')