# -*- coding: utf-8 -*-

import logger.logger as logger
from sklearn.externals import joblib
import xgboost as xgb
from sklearn.metrics import accuracy_score


class XGB():
    def __init__(self, booster='gbtree',
                 objective='multi:softmax',
                 num_class=4,
                 gamma=0.1,
                 max_depth=6,
                 lambda_=2,
                 subsample=0.7,
                 colsample_bytree=0.7,
                 min_child_weight=3,
                 silent=1,
                 eta=0.3,
                 seed=1000,
                 nthread=4):
        '''

        :param booster:
        :param objective: 多分类的问题
        :param num_class: 类别数，与 multisoftmax 并用
        :param gamma: 用于控制是否后剪枝的参数,越大越保守，一般0.1、0.2这样子。
        :param max_depth: 构建树的深度，越大越容易过拟合
        :param lambda_: 控制模型复杂度的权重值的L2正则化项参数，参数越大，模型越不容易过拟合。
        :param subsample: 随机采样训练样本
        :param colsample_bytree: 生成树时进行的列采样
        :param min_child_weight:
        :param silent: 设置成1则没有运行信息输出，最好是设置为0.
        :param eta: 如同学习率
        :param seed:
        :param nthread: cpu 线程数
        '''
        self.model = None
        self.parameter = {}
        self.parameter['booster'] = booster
        self.parameter['objective'] = objective
        self.parameter['num_class'] = num_class
        self.parameter['gamma'] = gamma
        self.parameter['max_depth'] = max_depth
        self.parameter['lambda'] = lambda_
        self.parameter['subsample'] = subsample
        self.parameter['colsample_bytree'] = colsample_bytree
        self.parameter['min_child_weight'] = min_child_weight
        self.parameter['silent'] = silent
        self.parameter['eta'] = eta
        self.parameter['seed'] = seed
        self.parameter['nthread'] = nthread

    def train(self, x_train, y_train, x_test, y_test, model_path):
        logger.info("training...")
        dtrain = xgb.DMatrix(x_train, label=y_train)
        dtest = xgb.DMatrix(x_test, label=y_test)

        self.model = xgb.train(dtrain ,**self.parameter)
        joblib.dump(self.model, model_path)
        y_pred = self.model.predict(dtest)
        predictions = [round(value) for value in y_pred]
        acc = accuracy_score(y_test, predictions)
        logger.info("acc: {}".format(str(acc)))

    def cross_validation(self, train_x, train_y, test_vecs, y_test, model_path, param_grid=None):
        from sklearn.model_selection import GridSearchCV
        param_grid = [
            {'kernel': ['rbf'],
             'C': [0.001, 0.01, 0.1, 1, 10, 100],
             'gamma': [0.001, 0.01, 0.1, 0.2, 0.4, 0.6, 0.8, 1, 1.2, 1.4, 1.6]},

            {'kernel': ['linear'],
             'C': [0.001, 0.01, 0.1, 1, 10, 10]}
        ]
        grid_search = GridSearchCV(xgb, param_grid, n_jobs=4, verbose=1, cv=5)
        grid_search.fit(train_x, train_y)
        best_parameters = grid_search.best_estimator_.get_params()

        for para, val in list(best_parameters.items()):
            print(para, val)

        self.model = xgb.train(train_x, train_y)
        joblib.dump(self.model, model_path)
        acc = self.model.score(test_vecs, y_test)
        logger.info("acc: {}".format(str(acc)))

    def load_model(self, model_path):
        self.model = joblib.load(model_path)

    def predict(self, x_vecs):
        if not self.model:
            logger.info("please load xgb model first!!!")
            return
        y_pred = self.model.predict(x_vecs)
        return y_pred


if __name__ == '__main__':
    pass
