import pandas as pd
import xgboost as xgb
from sklearn.model_selection import train_test_split
import joblib  # 正确的导入方式
import logging

# 读取数据
# data = pd.read_excel('D:\\python\\Binary classification\\xgboost\\xgboost_score_model-master\\xgboost_score_model-master\\cs-training.xls')
data = pd.read_csv(r'D:\\python\\Binary classification\\xgboost\\xgboost_score_model-master\xgboost_score_model-master\\cs-training.csv')

# print(r'D:\python\Binary classification\xgboost\xgboost_score_model-master\xgboost_score_model-master\cs-training.xls')


# 删除无关列
del data['Unnamed: 0']

# 重命名列名
data.columns = ['y','RevolvingUtilizationOfUnsecuredLines', 'age','NumberOfTime30-59DaysPastDueNotWorse', 'DebtRatio', 'MonthlyIncome','NumberOfOpenCreditLinesAndLoans', 'NumberOfTimes90DaysLate','NumberRealEstateLoansOrLines', 'NumberOfTime60-89DaysPastDueNotWorse','NumberOfDependents']

# 定义特征和目标变量
data_x = data[['RevolvingUtilizationOfUnsecuredLines', 'age','NumberOfTime30-59DaysPastDueNotWorse', 'DebtRatio', 'MonthlyIncome','NumberOfOpenCreditLinesAndLoans', 'NumberOfTimes90DaysLate','NumberRealEstateLoansOrLines', 'NumberOfTime60-89DaysPastDueNotWorse','NumberOfDependents']]
data_y = data['y']

# 分割数据集为训练集和测试集
train_x, test_x, train_y, test_y = train_test_split(data_x.values, data_y.values, test_size=0.2, random_state=1234)

# 创建 DMatrix 数据格式
d_train = xgb.DMatrix(train_x, label=train_y)
d_valid = xgb.DMatrix(test_x, label=test_y)

# 设置监控的数据集
watchlist = [(d_train, 'train'), (d_valid, 'valid')]

# 设置参数
params = {
    'eta': 0.2,  # 学习率
    'max_depth': 3,  # 树的最大深度
    'min_child_weight': 1,  # 最小样本权重
    'gamma': 0.3,  # 树分裂的最小损失
    'subsample': 0.8,  # 随机取样比例
    'colsample_bytree': 0.8,  # 对特征随机采样的比例
    'booster': 'gbtree',  # 树提升模型
    'objective': 'binary:logistic',  # 二分类逻辑回归
    'scale_pos_weight': 1,  # 类别不平衡时处理
    'lambda': 1,  # 正则化参数
    'seed': 1234,  # 随机数种子
    'silent': 1,  # 输出是否显示
    'eval_metric': 'auc'  # 使用AUC作为评估指标
}

# 训练模型
bst = xgb.train(params, d_train, 1000, watchlist, early_stopping_rounds=500, verbose_eval=10)

# 获取最佳的树数量
tree_nums = bst.best_iteration
logging.info('最优模型树的数量：%s，auc：%s' % (bst.best_iteration, bst.best_score))

# 训练最佳模型
bst = xgb.train(params, d_train, tree_nums, watchlist, early_stopping_rounds=500, verbose_eval=10)

# 保存模型
joblib.dump(bst, 'd:/xgboost.model')  # 保存模型到指定路径
