'''
xgboost分类器
# 只对第一类和第二类做分类（腺癌和鳞癌）,并绘制ROC曲线
# 每个分类器通过调整二分类阈值可生成一条ROC曲线
'''

import xgboost as xgb
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from release_code.data_analysis.data_two_metrics import auc_curve, pr_curve

# 自定义比例划分训练集和测试集
def read_data():
    data = pd.read_csv('D:/lung_cancer/data/data.csv')
    features = []
    labels = []
    for i in range(len(data)):
        if data['cancer_type'][i]-1 == 0 or data['cancer_type'][i]-1 == 1:
            one_feature = [data['z'][i], data['x'][i], data['y'][i], data['r'][i],
                           data['patientWeight'][i], data['patientSex'][i], data['patientAge'][i],
                           data['part_suvmax'][i], data['suv_avg'][i], data['suv_std'][i]]
            features.append(one_feature)
            labels.append(data['cancer_type'][i]-1)
    # print(features)
    # print(len(features))
    # print(labels)
    # print(len(labels))

    # 只用第一类和第二类数据

    X_train, X_test, y_train, y_test = train_test_split(np.array(features, dtype=np.float),
                                                        np.array(labels, dtype=np.int),
                                                        test_size=0.2, random_state=1234565)

    # print('Xtrain type: ', type(X_train))
    # print('y_train type: ', type(y_train))
    return X_train, X_test, y_train, y_test

# 按训练时划分的训练集和测试集
def read_data2():
    test_data = pd.read_csv('D:/lung_cancer/data/divide_csv/two/test.csv')
    train_data = pd.read_csv('D:/lung_cancer/data/divide_csv/two/train.csv')
    test_features = []
    train_features = []
    test_labels = []
    train_labels = []
    for i in range(len(test_data)):
        one_feature = [test_data['z'][i], test_data['x'][i], test_data['y'][i], test_data['r'][i],
                       test_data['patientWeight'][i], test_data['patientSex'][i], test_data['patientAge'][i],
                       test_data['patientSize'][i], test_data['local_suvmax'][i], test_data['local_suvmin'][i],
                       test_data['local_suvavg'][i], test_data['local_suvstd'][i], test_data['local_suvvar'][i],
                       test_data['lungW'][i], test_data['lungH'][i], test_data['newx'][i], test_data['newy'][i]]
        # , test_data['lungW'][i], test_data['lungH'][i], test_data['newx'][i], test_data['newy'][i]

        # one_feature = [test_data['z'][i], test_data['r'][i],
        #                test_data['patientWeight'][i], test_data['patientSex'][i], test_data['patientAge'][i],
        #                test_data['patientSize'][i], test_data['local_suvmax'][i], test_data['local_suvmin'][i],
        #                test_data['local_suvavg'][i], test_data['local_suvstd'][i], test_data['local_suvvar'][i],
        #                test_data['newX'][i], test_data['newY'][i]]

        test_features.append(one_feature)
        test_labels.append(test_data['cancer_type'][i] - 1)

    for j in range(len(train_data)):
        one_feature = [train_data['z'][j], train_data['x'][j], train_data['y'][j], train_data['r'][j],
                       train_data['patientWeight'][j], train_data['patientSex'][j], train_data['patientAge'][j],
                       train_data['patientSize'][j], train_data['local_suvmax'][j], train_data['local_suvmin'][j],
                       train_data['local_suvavg'][j],  train_data['local_suvstd'][j], train_data['local_suvvar'][j],
                       train_data['lungW'][j], train_data['lungH'][j], train_data['newx'][j], train_data['newy'][j]]

        # train_data['lungW'][j], train_data['lungH'][j], train_data['newx'][j], train_data['newy'][j]

        # one_feature = [train_data['z'][j], train_data['r'][j],
        #                train_data['patientWeight'][j], train_data['patientSex'][j], train_data['patientAge'][j],
        #                train_data['patientSize'][j], train_data['local_suvmax'][j], train_data['local_suvmin'][j],
        #                train_data['local_suvavg'][j], train_data['local_suvstd'][j], train_data['local_suvvar'][j],
        #                train_data['newX'][j], train_data['newY'][j]]

        train_features.append(one_feature)
        train_labels.append(train_data['cancer_type'][j] - 1)

    X_train = np.asarray(train_features, dtype=np.float)
    X_test = np.asarray(test_features, dtype=np.float)
    y_train = np.asarray(train_labels, dtype=np.int)
    y_test = np.asarray(test_labels, dtype=np.int)

    return X_train, X_test, y_train, y_test

# 一般：无wh,无特殊坐标
def train():
    train_features, test_features, train_labels, test_labels = read_data2()
    dtrain = xgb.DMatrix(train_features, label=train_labels)
    dtest = xgb.DMatrix(test_features, label=test_labels)

    param = {
        'booster': 'gbtree',
        'objective': 'binary:logistic',  # 二分类问题
        'eval_metric': 'auc',
        'max_depth': 6,               # 构建树的深度， 越大越容易过拟合
        'lambda': 12,                   # 控制模型复杂度的权重值L2正则化项参数， 参数越大， 模型越不容易过拟合
        'subsample': 1,                # 随机采样训练样本
        'colsample_bytree': 0.7,       # 生成树时进行的列采样
        'min_child_weight': 2,         # 孩子节点中最小的样本权重和。如果一个叶子节点的样本权重和小于min_child_weight则拆分过程结束。在现行回归模型中，这个参数是指建立每个模型所需要的最小样本数。该参数越大算法越conservative
        'eta': 0.6,                    # 如同学习率
        'seed': 0,
        'nthread': 4,                  # cpu线程数
        'slient': 1
    }

    # 循环次数
    num_round = 12
    watchlist = [(dtrain, 'train')]
    model = xgb.train(param, dtrain, num_boost_round=num_round, evals=watchlist)

    # 可视化特征重要性
    # xgb.plot_importance(model)
    # plt.savefig('../output/features_importance.png')
    # plt.show()
    # plt.close()

    # 可视化模型
    # img1 = xgb.to_graphviz(model, num_trees=0)
    # img1.format = 'png'
    # img1.view('../output/img1')

    # 保存模型
    # model.save_model('../output/model.json')
    # 导出模型和特征映射
    # model.dump_model('../output/model.txt')

    # 测试
    train_preds =model.predict(dtrain)
    test_preds = model.predict(dtest)

    # 保存测试集结果（概率值）
    # np.save('D:/lung_cancer/data/two_result/two_xgboost_labels.npy', test_labels)
    # np.save('D:/lung_cancer/data/two_result/two_xgboost_preds.npy', test_preds)

    # 绘制roc曲线
    auc_curve(test_labels, test_preds)
    # auc_curve(train_labels, train_preds)
    pr_curve(test_labels, test_preds)
    # pr_curve(train_labels, train_preds)

# 一般+w+h
def train2():
    train_features, test_features, train_labels, test_labels = read_data2()
    dtrain = xgb.DMatrix(train_features, label=train_labels)
    dtest = xgb.DMatrix(test_features, label=test_labels)

    param = {
        'booster': 'gbtree',
        'objective': 'binary:logistic',  # 二分类问题
        'eval_metric': 'auc',
        'max_depth': 4,               # 构建树的深度， 越大越容易过拟合
        'lambda': 12,                   # 控制模型复杂度的权重值L2正则化项参数， 参数越大， 模型越不容易过拟合
        'subsample': 1,                # 随机采样训练样本
        'colsample_bytree': 0.7,       # 生成树时进行的列采样
        'min_child_weight': 1,         # 孩子节点中最小的样本权重和。如果一个叶子节点的样本权重和小于min_child_weight则拆分过程结束。在现行回归模型中，这个参数是指建立每个模型所需要的最小样本数。该参数越大算法越conservative
        'eta': 0.4,                    # 如同学习率
        'seed': 0,
        'nthread': 4,                  # cpu线程数
        'slient': 1
    }

    # 循环次数
    num_round = 22
    watchlist = [(dtrain, 'train')]
    model = xgb.train(param, dtrain, num_boost_round=num_round, evals=watchlist)

    # 可视化特征重要性
    # xgb.plot_importance(model)
    # plt.savefig('../output/features_importance.png')
    # plt.show()
    # plt.close()

    # 可视化模型
    # img1 = xgb.to_graphviz(model, num_trees=0)
    # img1.format = 'png'
    # img1.view('../output/img1')

    # 保存模型
    # model.save_model('../output/model.json')
    # 导出模型和特征映射
    # model.dump_model('../output/model.txt')

    # 测试
    train_preds =model.predict(dtrain)
    test_preds = model.predict(dtest)

    # 保存测试集结果（概率值）
    # np.save('D:/lung_cancer/data/two_result/two_xgboost_wh_labels.npy', test_labels)
    # np.save('D:/lung_cancer/data/two_result/two_xgboost_wh_preds.npy', test_preds)

    # 绘制roc曲线
    auc_curve(test_labels, test_preds)
    # auc_curve(train_labels, train_preds)
    pr_curve(test_labels, test_preds)
    # pr_curve(train_labels, train_preds)

# 一般+w+h+newx+newy
def train3():
    train_features, test_features, train_labels, test_labels = read_data2()
    dtrain = xgb.DMatrix(train_features, label=train_labels)
    dtest = xgb.DMatrix(test_features, label=test_labels)

    param = {
        'booster': 'gbtree',
        'objective': 'binary:logistic',  # 二分类问题
        'eval_metric': 'auc',
        'max_depth': 4,               # 构建树的深度， 越大越容易过拟合
        'lambda': 12,                   # 控制模型复杂度的权重值L2正则化项参数， 参数越大， 模型越不容易过拟合
        'subsample': 1,                # 随机采样训练样本
        'colsample_bytree': 0.7,       # 生成树时进行的列采样
        'min_child_weight': 2,         # 孩子节点中最小的样本权重和。如果一个叶子节点的样本权重和小于min_child_weight则拆分过程结束。在现行回归模型中，这个参数是指建立每个模型所需要的最小样本数。该参数越大算法越conservative
        'eta': 0.3,                    # 如同学习率
        'seed': 0,
        'nthread': 4,                  # cpu线程数
        'slient': 1
    }

    # 循环次数
    num_round = 20
    watchlist = [(dtrain, 'train')]
    model = xgb.train(param, dtrain, num_boost_round=num_round, evals=watchlist)

    # 可视化特征重要性
    # xgb.plot_importance(model)
    # plt.savefig('../output/features_importance.png')
    # plt.show()
    # plt.close()

    # 可视化模型
    # img1 = xgb.to_graphviz(model, num_trees=0)
    # img1.format = 'png'
    # img1.view('../output/img1')

    # 保存模型
    # model.save_model('../output/model.json')
    # 导出模型和特征映射
    # model.dump_model('../output/model.txt')

    # 测试
    train_preds =model.predict(dtrain)
    test_preds = model.predict(dtest)

    # 保存测试集结果（概率值）
    # np.save('D:/lung_cancer/data/two_result/two_xgboost_whxy_labels.npy', test_labels)
    # np.save('D:/lung_cancer/data/two_result/two_xgboost_whxy_preds.npy', test_preds)

    # 绘制roc曲线
    auc_curve(test_labels, test_preds)
    # auc_curve(train_labels, train_preds)
    pr_curve(test_labels, test_preds)
    # pr_curve(train_labels, train_preds)

if __name__ == '__main__':
    train()
