# 库导入
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.linear_model import LogisticRegression
from xgboost import XGBClassifier
from lightgbm import LGBMClassifier
from catboost import CatBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.ensemble import StackingClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
import warnings
from tqdm import tqdm

# 忽略警告
warnings.filterwarnings('ignore')

# 数据读取
train_data = pd.read_csv('E:/Users/Wxw/Desktop/data/dataTrain.csv')
test_data = pd.read_csv('E:/Users/Wxw/Desktop/data/dataB.csv')
submission = pd.read_csv('E:/Users/Wxw/Desktop/data/submit_example_B.csv')
data_nolabel = pd.read_csv('E:/Users/Wxw/Desktop/data/dataNoLabel.csv')

# 打印数据集形状
print(f'train_data.shape = {train_data.shape}')
print(f'test_data.shape  = {test_data.shape}')

# 特征构造
# 创建新特征 'f47'，由 'f1' 和 'f2' 线性组合
train_data['f47'] = train_data['f1'] * 10 + train_data['f2']
test_data['f47'] = test_data['f1'] * 10 + test_data['f2']

# 暴力特征工程，构造位置相关特征
loc_f = ['f1', 'f2', 'f4', 'f5', 'f6']
for df in [train_data, test_data]:
    for i in range(len(loc_f)):
        for j in range(i + 1, len(loc_f)):
            df[f'{loc_f[i]}+{loc_f[j]}'] = df[loc_f[i]] + df[loc_f[j]]
            df[f'{loc_f[i]}-{loc_f[j]}'] = df[loc_f[i]] - df[loc_f[j]]
            df[f'{loc_f[i]}*{loc_f[j]}'] = df[loc_f[i]] * df[loc_f[j]]
            df[f'{loc_f[i]}/{loc_f[j]}'] = df[loc_f[i]] / (df[loc_f[j]] + 1)

# 暴力特征工程，构造通话相关特征
com_f = ['f43', 'f44', 'f45', 'f46']
for df in [train_data, test_data]:
    for i in range(len(com_f)):
        for j in range(i + 1, len(com_f)):
            df[f'{com_f[i]}+{com_f[j]}'] = df[com_f[i]] + df[com_f[j]]
            df[f'{com_f[i]}-{com_f[j]}'] = df[com_f[i]] - df[com_f[j]]
            df[f'{com_f[i]}*{com_f[j]}'] = df[com_f[i]] * df[com_f[j]]
            df[f'{com_f[i]}/{com_f[j]}'] = df[com_f[i]] / (df[com_f[j]] + 1)

# 编码分类特征
cat_columns = ['f3']
data = pd.concat([train_data, test_data])

# 对分类特征进行标签编码
for col in cat_columns:
    lb = LabelEncoder()
    lb.fit(data[col])
    train_data[col] = lb.transform(train_data[col])
    test_data[col] = lb.transform(test_data[col])

# 确定数值特征和分类特征
num_columns = [col for col in train_data.columns if col not in ['id', 'label', 'f3']]
feature_columns = num_columns + cat_columns
target = 'label'

# 分离特征和标签
train = train_data[feature_columns]
label = train_data[target]
test = test_data[feature_columns]


# 模型训练函数，使用交叉验证框架
def model_train(model, model_name, kfold=5):
    oof_preds = np.zeros((train.shape[0]))  # 初始化训练集预测结果
    test_preds = np.zeros(test.shape[0])  # 初始化测试集预测结果
    skf = StratifiedKFold(n_splits=kfold)  # 使用StratifiedKFold进行分层抽样
    print(f"Model = {model_name}")

    # 交叉验证
    for k, (train_index, test_index) in enumerate(skf.split(train, label)):
        x_train, x_test = train.iloc[train_index, :], train.iloc[test_index, :]
        y_train, y_test = label.iloc[train_index], label.iloc[test_index]

        # 训练模型
        model.fit(x_train, y_train)

        # 预测验证集
        y_pred = model.predict_proba(x_test)[:, 1]
        oof_preds[test_index] = y_pred.ravel()
        auc = roc_auc_score(y_test, y_pred)
        print("- KFold = %d, val_auc = %.4f" % (k, auc))

        # 预测测试集
        test_fold_preds = model.predict_proba(test)[:, 1]
        test_preds += test_fold_preds.ravel()

    # 计算整体AUC
    print("Overall Model = %s, AUC = %.4f" % (model_name, roc_auc_score(label, oof_preds)))
    return test_preds / kfold


# 数据清洗
gbc = GradientBoostingClassifier()
gbc_test_preds = model_train(gbc, "GradientBoostingClassifier", 60)

# 为了加速计算，只使用前50000条训练数据
train = train[:50000]
label = label[:50000]

# 模型融合，定义多个基模型
gbc = GradientBoostingClassifier(
    n_estimators=50,
    learning_rate=0.1,
    max_depth=5
)
hgbc = HistGradientBoostingClassifier(
    max_iter=100,
    max_depth=5
)
xgbc = XGBClassifier(
    objective='binary:logistic',
    eval_metric='auc',
    n_estimators=100,
    max_depth=6,
    learning_rate=0.1
)
gbm = LGBMClassifier(
    objective='binary',
    boosting_type='gbdt',
    num_leaves=2 ** 6,
    max_depth=8,
    colsample_bytree=0.8,
    subsample_freq=1,
    max_bin=255,
    learning_rate=0.05,
    n_estimators=100,
    metrics='auc'
)
cbc = CatBoostClassifier(
    iterations=210,
    depth=6,
    learning_rate=0.03,
    l2_leaf_reg=1,
    loss_function='Logloss',
    verbose=0
)

# 定义Stacking模型
estimators = [
    ('gbc', gbc),
    ('hgbc', hgbc),
    ('xgbc', xgbc),
    ('gbm', gbm),
    ('cbc', cbc)
]
clf = StackingClassifier(
    estimators=estimators,
    final_estimator=LogisticRegression()
)

# 特征筛选
X_train, X_test, y_train, y_test = train_test_split(
    train, label, stratify=label, random_state=2022)

# 训练Stacking模型
clf.fit(X_train, y_train)
y_pred = clf.predict_proba(X_test)[:, 1]
auc = roc_auc_score(y_test, y_pred)
print('auc = %.8f' % auc)

# 特征选择，找出对AUC影响较大的特征
ff = []
for col in feature_columns:
    x_test = X_test.copy()
    x_test[col] = 0  # 将特征置为0
    auc1 = roc_auc_score(y_test, clf.predict_proba(x_test)[:, 1])
    if auc1 < auc:
        ff.append(col)
    print('%5s | %.8f | %.8f' % (col, auc1, auc1 - auc))

# 使用选定的特征重新训练模型
clf.fit(X_train[ff], y_train)
y_pred = clf.predict_proba(X_test[ff])[:, 1]
auc = roc_auc_score(y_test, y_pred)
print('auc = %.8f' % auc)

# 使用筛选后的特征
train = train[ff]
test = test[ff]

# 重新训练模型并进行预测
clf_test_preds = model_train(clf, "StackingClassifier", 10)

# 保存预测结果到CSV文件，路径为E:/Users/Wxw/Desktop/data/submission.csv，编码为utf-8
submission['label'] = clf_test_preds
submission.to_csv('E:/Users/Wxw/Desktop/data/submission.csv', index=False, encoding='utf-8')
