'''
五分类xgboost
使用五折交叉验证方法
'''
import xgboost as xgb
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, accuracy_score, confusion_matrix


# 读取五折交叉验证数据集
def read_data2():
    sets_path = 'D:/lung_cancer/data/kfold_dataset/'

    train_sets = ['dataset1.csv',
                  'dataset2.csv',
                  'dataset4.csv',
                  'dataset3.csv']

    test_set = 'dataset0.csv'

    # 读取数据集
    train_features = []
    train_labels = []
    test_features = []
    test_labels = []

    # 读取训练集
    for set in train_sets:
        train_data = pd.read_csv(sets_path+set)
        for j in range(len(train_data)):
            one_feature = [train_data['z'][j], train_data['x'][j], train_data['y'][j], train_data['r'][j],
                           train_data['patientWeight'][j], train_data['patientSex'][j], train_data['patientAge'][j],
                           train_data['patientSize'][j], train_data['local_suvmax'][j], train_data['local_suvmin'][j],
                           train_data['local_suvavg'][j], train_data['local_suvstd'][j], train_data['local_suvvar'][j],
                           train_data['lungW'][j], train_data['lungH'][j]]


            train_features.append(one_feature)
            train_labels.append(train_data['cancer_type'][j] - 1)

    # 读取测试集
    test_data = pd.read_csv(sets_path+test_set)
    for i in range(len(test_data)):
        one_feature = [test_data['z'][i], test_data['x'][i], test_data['y'][i], test_data['r'][i],
                       test_data['patientWeight'][i], test_data['patientSex'][i], test_data['patientAge'][i],
                       test_data['patientSize'][i], test_data['local_suvmax'][i], test_data['local_suvmin'][i],
                       test_data['local_suvavg'][i], test_data['local_suvstd'][i], test_data['local_suvvar'][i],
                       test_data['lungW'][i], test_data['lungH'][i]]

        test_features.append(one_feature)
        test_labels.append(test_data['cancer_type'][i] - 1)

    # 转变成数组
    x_train = np.asarray(train_features, dtype=np.float)
    x_test = np.asarray(test_features, dtype=np.float)
    y_train = np.asarray(train_labels, dtype=np.int)
    y_test = np.asarray(test_labels, dtype=np.int)

    # 随机打乱
    index = np.arange(len(y_train))
    x_train = x_train[index]
    y_train = y_train[index]

    return x_train, y_train, x_test, y_test

# 按训练时划分的训练集和测试集
def read_data():
    test_data = pd.read_csv('D:/lung_cancer/data/divide_csv/five/test.csv')
    train_data = pd.read_csv('D:/lung_cancer/data/divide_csv/five/train.csv')
    test_features = []
    train_features = []
    test_labels = []
    train_labels = []
    for i in range(len(test_data)):
        one_feature = [test_data['z'][i], test_data['x'][i], test_data['y'][i], test_data['r'][i],
                       test_data['patientWeight'][i], test_data['patientSex'][i], test_data['patientAge'][i],
                       test_data['patientSize'][i], test_data['local_suvmax'][i], test_data['local_suvmin'][i],
                       test_data['local_suvavg'][i], test_data['local_suvstd'][i], test_data['local_suvvar'][i]]


        test_features.append(one_feature)
        test_labels.append(test_data['cancer_type'][i] - 1)

    for j in range(len(train_data)):
        one_feature = [train_data['z'][j], train_data['x'][j], train_data['y'][j], train_data['r'][j],
                       train_data['patientWeight'][j], train_data['patientSex'][j], train_data['patientAge'][j],
                       train_data['patientSize'][j], train_data['local_suvmax'][j], train_data['local_suvmin'][j],
                       train_data['local_suvavg'][j],  train_data['local_suvstd'][j], train_data['local_suvvar'][j]]

        train_features.append(one_feature)
        train_labels.append(train_data['cancer_type'][j] - 1)

    X_train = np.asarray(train_features, dtype=np.float)
    X_test = np.asarray(test_features, dtype=np.float)
    y_train = np.asarray(train_labels, dtype=np.int)
    y_test = np.asarray(test_labels, dtype=np.int)

    return X_train, y_train, X_test, y_test


def train():
    train_features, train_labels, test_features, test_labels = read_data2()
    dtrain = xgb.DMatrix(train_features, label=train_labels)
    dtest = xgb.DMatrix(test_features, label=test_labels)

    param = {
        'booster': 'dart',
        'objective': 'multi:softmax',  # 多分类问题
        'eval_metric': ['mlogloss', 'merror'],
        'num_class': 5,                # 类别数，与multisoftmax并用
        'gamma': 0.3,                  # 用于控制是否后剪枝的参数，越大越保守， 一般0.1， 0.2这样子
        'max_depth': 6,               # 构建树的深度， 越大越容易过拟合
        'lambda': 3,                   # 控制模型复杂度的权重值L2正则化项参数， 参数越大， 模型越不容易过拟合
        'subsample': 1,                # 随机采样训练样本
        'colsample_bytree': 0.75,       # 生成树时进行的列采样
        'min_child_weight': 2,         # 孩子节点中最小的样本权重和。如果一个叶子节点的样本权重和小于min_child_weight则拆分过程结束。在现行回归模型中，这个参数是指建立每个模型所需要的最小样本数。该成熟越大算法越conservative

        'sampling_method': 'gradient_based',
        'tree_method': 'gpu_hist',
        'silent': 0,                   # 设置成1，则没有运行信息输出，最好设置0
        'eta': 0.5,                    # 如同学习率
        'seed': 1000,
        'nthread': 4,                  # cpu线程数
    }

    # 循环次数
    num_round = 20
    evallist = [(dtest, 'eval'), (dtrain, 'train')]
    model = xgb.train(param, dtrain, num_round, evallist)

    # 可视化特征重要性
    # xgb.plot_importance(model)
    # plt.show()
    # plt.savefig('../output/features_importance.png')
    # 可视化模型
    # img1 = xgb.to_graphviz(model, num_trees=0)
    # img1.format = 'png'
    # img1.view('../output/img1')

    # 保存模型
    # model.save_model('test.model')
    # 导出模型和特征映射
    # model.dump_model('../output/model.txt')

    # 测试
    train_pred =model.predict(dtrain)
    test_pred = model.predict(dtest)

    # 计算评价指标：混淆矩阵+准确率+查准+查全+F1-score
    train_accuracy = accuracy_score(train_labels, train_pred)
    test_accuracy = accuracy_score(test_labels, test_pred)
    print("train accuracy: %.2f %%" % (train_accuracy * 100))
    print("test accuracy: %.2f %%" % (test_accuracy * 100))

    target_names = ['1', '2', '3', '4', '5']
    result_statis = classification_report(test_labels, test_pred, target_names=target_names)
    print(result_statis)

    confusion = confusion_matrix(test_labels, test_pred)
    print(confusion)



if __name__ == '__main__':
    train()

