import xgboost as xgb
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt

'''

# read in data
dtrain = xgb.DMatrix('D:/lung_cancer/xgboost-master/xgboost-master/demo/data/agaricus.txt.train')
dtest = xgb.DMatrix('D:/lung_cancer/xgboost-master/xgboost-master/demo/data/agaricus.txt.test')

# specify parameters via map
param = {'max_depth':2, 'eta':1, 'objective':'binary:logistic'}
num_round = 2
bst = xgb.train(param, dtrain, num_round)

# make prediction
preds = bst.predict(dtest)
print(preds)

'''

data = pd.read_csv('D:/lung_cancer/data/data.csv')
features = []
label = []
for i in range(len(data)):
    one_feature = [data['z'][i], data['x'][i], data['y'][i], data['r'][i], data['patientWeight'][i], data['patientSex'][i], data['patientAge'][i]]
    features.append(one_feature)
    label.append(data['cancer_type'][i]-1)

train_features = features[:800]
train_labels = label[:800]

test_features = features[800:]
test_labels = label[800:]

train_features = np.array(train_features, dtype=np.float)
train_labels = np.array(train_labels, dtype=np.int)
test_features = np.array(test_features, dtype=np.float)
test_labels = np.array(test_labels, dtype=np.int)


w = np.random.rand(800, 1)
dtrain = xgb.DMatrix(train_features, label=train_labels, missing=0, weight=w)
dtest = xgb.DMatrix(test_features, label=test_labels)

# 保存为二进制文件
# dtrain.save_binary('train.buffer')
# 加载二进制文件
# dtrain2 = xgb.DMatrix('train.buffer')

param = {
    'booster': 'gbtree',
    'objective': 'multi:softmax',  # 多分类问题
    'num_class': 5,                # 类别数，与multisoftmax并用
    'gamma': 0.1,                  # 用于控制是否后剪枝的参数，越大越保守， 一般0.1， 0.2这样子
    'max_depth': 12,               # 构建树的深度， 越大越容易过拟合
    'lambda': 2,                   # 控制模型复杂度的权重值L2正则化项参数， 参数越大， 模型越不容易过拟合
    'subsample': 0.7,              # 随机采样训练样本
    'colsample_bytree': 1,         # 生成树时进行的列采样
    'min_child_weight': 3,         # 孩子节点中最小的样本权重和。如果一个叶子节点的样本权重和小于min_child_weight则拆分过程结束。在现行回归模型中，这个参数是指建立每个模型所需要的最小样本数。该成熟越大算法越conservative
    'silent': 0,                   # 设置成1，则没有运行信息输出，最好设置0
    'eta': 0.3,                    # 如同学习率
    'seed': 1000,
    'nthread': 4,                  # cpu线程数
}

num_round = 10
bst = xgb.train(param, dtrain, num_round)
ans = bst.predict(dtest)
print(ans)

xgb.plot_importance(bst)
plt.show()

# 保存模型
bst.save_model('test.model')
# 导出模型和特征映射
bst.dump_model('dump.raw.txt')


