# -*- coding: utf-8 -*-
# @日期    : 2021/11/28 12:13
# @作者  : 万方名
# @FileName: main.py

import numpy as np
import pandas as pd
import xgboost as xgb

data = pd.read_csv('../data/seeds_dataset.txt', header=None, sep='\s+', converters={7: lambda x: int(x) - 1})

# 将最后一列字段名设置为label
data.rename(columns={7: 'label'}, inplace=True)

mask = np.random.rand(len(data)) < 0.8

train = data[mask]
test = data[~mask]

# 生成DMatrix
xgb_train = xgb.DMatrix(train.iloc[:, :6], label=train.label)
xgb_test = xgb.DMatrix(test.iloc[:, :6], label=test.label)

# 通过softmax进行多分类
params = {
    'objective': 'multi:softmax',  # 学习目标：多分类
    'num_class': 3,
    'eta': 0.1,  # 学习率
    'max_depth': 5  # 决策树分裂的最大深度
}

watchlist = [(xgb_train, 'train'), (xgb_test, 'test')]
num_round = 50
bst = xgb.train(params, xgb_train, num_round, watchlist)

# 模型预测
pred = bst.predict(xgb_test)
error_rate = np.sum(pred != test.label) / test.shape[0]
print(f'测试集错误率（softmax）:{error_rate}')
