'''
三分类xgboost模型
'''

import xgboost as xgb
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from release_code.data_analysis.data_two_metrics import auc_curve, pr_curve
from sklearn.metrics import classification_report, accuracy_score, confusion_matrix


# 按训练时划分的训练集和测试集
def read_data():
    test_data = pd.read_csv('D:/lung_cancer/data/divide_csv/three/test.csv')
    train_data = pd.read_csv('D:/lung_cancer/data/divide_csv/three/train.csv')
    test_features = []
    train_features = []
    test_labels = []
    train_labels = []
    for i in range(len(test_data)):
        one_feature = [test_data['z'][i], test_data['x'][i], test_data['y'][i], test_data['r'][i],
                       test_data['patientWeight'][i], test_data['patientSex'][i], test_data['patientAge'][i],
                       test_data['patientSize'][i], test_data['local_suvmax'][i], test_data['local_suvmin'][i],
                       test_data['local_suvavg'][i], test_data['local_suvstd'][i], test_data['local_suvvar'][i]]


        test_features.append(one_feature)
        test_labels.append(test_data['cancer_type'][i]-1)

    for j in range(len(train_data)):
        one_feature = [train_data['z'][j], train_data['x'][j], train_data['y'][j], train_data['r'][j],
                       train_data['patientWeight'][j], train_data['patientSex'][j], train_data['patientAge'][j],
                       train_data['patientSize'][j], train_data['local_suvmax'][j], train_data['local_suvmin'][j],
                       train_data['local_suvavg'][j],  train_data['local_suvstd'][j], train_data['local_suvvar'][j]]

        train_features.append(one_feature)
        train_labels.append(train_data['cancer_type'][j]-1)

    X_train = np.asarray(train_features, dtype=np.float)
    X_test = np.asarray(test_features, dtype=np.float)
    y_train = np.asarray(train_labels, dtype=np.int)
    y_test = np.asarray(test_labels, dtype=np.int)

    return X_train, X_test, y_train, y_test

def train():
    train_features, test_features, train_labels, test_labels = read_data()

    dtrain = xgb.DMatrix(train_features, label=train_labels)
    dtest = xgb.DMatrix(test_features, label=test_labels)

    param = {
        'booster': 'dart',
        'objective': 'multi:softmax',  # 三分类问题
        'num_class': 3,
        'eval_metric': ['mlogloss'],
        'max_depth': 6,               # 构建树的深度， 越大越容易过拟合
        'lambda': 8,                   # 控制模型复杂度的权重值L2正则化项参数， 参数越大， 模型越不容易过拟合
        'subsample': 1,                # 随机采样训练样本
        'colsample_bytree': 0.7,       # 生成树时进行的列采样
        'min_child_weight': 5,         # 孩子节点中最小的样本权重和。如果一个叶子节点的样本权重和小于min_child_weight则拆分过程结束。在现行回归模型中，这个参数是指建立每个模型所需要的最小样本数。该参数越大算法越conservative
        'gamma': 0,
        'eta': 0.1,                    # 如同学习率
        'seed': 0,
        # 'sampling_method': 'gradient_based',
        # 'tree_method': 'gpu_hist',
        'nthread': 4,                  # cpu线程数
        'slient': 1
    }

    # param = {
    #     'booster': 'gbtree',
    #     'objective': 'multi:softprob',  # 多分类问题
    #     'num_class': 3,  # 类别数，与multisoftmax并用
    #     'gamma': 0.1,  # 用于控制是否后剪枝的参数，越大越保守， 一般0.1， 0.2这样子
    #     'max_depth': 8,  # 构建树的深度， 越大越容易过拟合
    #     'lambda': 2,  # 控制模型复杂度的权重值L2正则化项参数， 参数越大， 模型越不容易过拟合
    #     'subsample': 0.8,  # 随机采样训练样本
    #     'colsample_bytree': 1,  # 生成树时进行的列采样
    #     'min_child_weight': 3,
    #     # 孩子节点中最小的样本权重和。如果一个叶子节点的样本权重和小于min_child_weight则拆分过程结束。在现行回归模型中，这个参数是指建立每个模型所需要的最小样本数。该成熟越大算法越conservative
    #     'silent': 0,  # 设置成1，则没有运行信息输出，最好设置0
    #     'eta': 0.05,  # 如同学习率
    #     'seed': 1000,
    #     'nthread': 4,  # cpu线程数
    # }

    # 循环次数
    num_round = 50
    evallist = [(dtest, 'eval'), (dtrain, 'train')]
    model = xgb.train(param, dtrain, num_boost_round=num_round, evals=evallist)

    # 可视化特征重要性
    # xgb.plot_importance(model)
    # plt.show()
    # plt.savefig('../output/features_importance.png')
    # plt.close()

    # 可视化模型
    # img1 = xgb.to_graphviz(model, num_trees=0)
    # img1.format = 'png'
    # img1.view('../output/img1')

    # 保存模型
    # model.save_model('../output/model.json')
    # 导出模型和特征映射
    # model.dump_model('../output/model.txt')

    # 测试
    train_preds =model.predict(dtrain)
    test_preds = model.predict(dtest)

    # print(test_preds)

    # 计算评价指标：混淆矩阵+准确率+查准+查全+F1-score
    train_accuracy = accuracy_score(train_labels, train_preds)
    test_accuracy = accuracy_score(test_labels, test_preds)
    print("train accuracy: %.2f %%" % (train_accuracy * 100))
    print("test accuracy: %.2f %%" % (test_accuracy * 100))

    # 计算评估指标
    target_names = ['1', '2', '3']
    result_statis = classification_report(test_labels, test_preds, target_names=target_names)
    print(result_statis)

    # 计算混淆矩阵
    confusion = confusion_matrix(test_labels, test_preds)
    print(confusion)




if __name__ == '__main__':
    train()
