# coding:utf-8
# Author : hiicy redldw
# Date : 2019/01/14
import pymysql
from numpy import loadtxt
import xgboost
import numpy as np
from sklearn import preprocessing
from xgboost import XGBClassifier,XGBRegressor
from sklearn.model_selection import train_test_split, StratifiedKFold, GridSearchCV
from sklearn.metrics import accuracy_score,mean_squared_error
from xgboost import plot_importance
from matplotlib import pyplot as plt

def xgbClassifi():
    """
    二分类
    :return:
    """
    fpath = r'F:\Archive\files\pima-indians-diabetes.csv'
    dg = loadtxt(fpath,delimiter=',')

    # dmatrix = xgboost.DMatrix(dg[:,:-8],label=dg[:,-1])

    X = dg[:,0:8]  # 训练数据
    Y = dg[:,-1]  # label

    seed = 7
    test_size = 0.22
    X_train,X_test,Y_trian,Y_test = train_test_split(X,Y,test_size=test_size,
                                                     random_state=seed)

    model = XGBClassifier()

    learning_rate = [0.0001, 0.001, 0.01, 0.1, 0.2, 0.3]
    param_grid = dict(learning_rate=learning_rate)
    kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=7)
    grid_search = GridSearchCV(model, param_grid, scoring="neg_log_loss", n_jobs=-1, cv=kfold)
    grid_result = grid_search.fit(X, Y)
    print(grid_result.cv_results_.keys())
    print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
    means = grid_result.cv_results_['mean_test_score']
    stds = grid_result.cv_results_['std_test_score']
    params = grid_result.cv_results_['params']
    for mean, stdev, param in zip(means, stds, params):
        print("%f (%f) with: %r" % (mean, stdev, param))

    eval_set = [(X_test, Y_test)]
    # model.fit(X_train,Y_trian)
    # 检测损失
    model.fit(X_train,Y_trian,early_stopping_rounds=10,eval_metric="logloss",eval_set=eval_set,verbose=True)
    y_pred = model.predict(X_test)
    print('scoce:',model.score(X_test,Y_test))
    results = model.evals_result()
    epochs = len(results['validation_0']['error'])
    predictions = [round(value) for value in y_pred]
    accuracy = accuracy_score(Y_test, predictions)
    print("Accuracy: %.2f%%" % (accuracy * 100.0))
    plot_importance(model)
    plt.show()

def Regrees():
    from sklearn.datasets import load_boston
    boston = load_boston()
    data,target = boston.data,boston.target
    print(data.shape,boston.feature_names)
    import pandas as pd
    import numpy as np
    data = pd.DataFrame(data)
    data.columns = boston.feature_names
    print(data.head())
    data['price'] = target
    print(data.info())
    X,y = data.iloc[:,:-1],data.iloc[:,-1]


    X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2,random_state=123)
    xg_reg = xgboost.XGBRegressor(objective='reg:linear', colsample_bytree=0.3, learning_rate=0.1,
                              max_depth=5, alpha=10, n_estimators=10)
    xg_reg.fit(X_train,y_train)
    preds = xg_reg.predict(X_test)
    rmse = np.sqrt(mean_squared_error(y_test,preds))
    print('rmse 损失:',rmse)

    """
    损失函数
    “reg:linear”,linear regression
    “reg:logistic”,logistic regression
    “binary:logistic”,用于二类问题的logistic regression,输出概率
    “binary:logitraw”,同上，不过输出的是sigmoid function转换之前的score
    “count:poisson”,用于计数问题的泊松回归问题？
    “multi:softmax”,用于多类分类问题，并采用softmax损失函数，需要设定 num_class
    “multi:softprob”,同上，不过输出所有n个类的概率
    “rank:pairwise”,用于排序问题，损失函数为pairwise loss
    “reg::gamma”
    """
    """
    eval_metric,评价准则
    有以下几种：
    1. 'rmse'
    2. 'mae'
    3. 'logloss',  负的对数似然
    4. 'error',二类分类错误率，输出大于0.5时为正例，否则负例
    5. 'error@t',同上，不过指定正负例阈值
    6. 'merror',多类分类错误率
    7. 'mlogloss',多类的logloss
    8. 'auc'
    9. 'ndcg',[Normalized Discounted Cumulative Gain][3]
    10. 'map',[Mean average precision][4]
    """
    data_dmatrix = xgboost.DMatrix(data=X,label=y)
    params = {"objective": "reg:linear", 'colsample_bytree': 0.3, 'learning_rate': 0.1,
              'max_depth': 5, 'alpha': 10}
    # 交叉验证
    cv_results = xgboost.cv(dtrain=data_dmatrix, params=params, nfold=3,
                        num_boost_round=50, early_stopping_rounds=10, metrics="rmse", as_pandas=True, seed=123)
    print(cv_results.head())
    # 画树，要求有graphviz
    xg_reg = xgboost.train(params,data_dmatrix,num_boost_round=10)
    xgboost.plot_tree(xg_reg,num_trees=0)
    plt.rcParams['figure.figsize'] = [50, 10]

    # xgboost.plot_importance(xg_reg)
    # plt.rcParams['figure.figsize'] = [5, 5]
    plt.show()
# xgbClassifi()
# Regrees()
def classfy():
    from sklearn.datasets import load_iris
    import xgboost as xgb
    from xgboost import plot_importance
    from matplotlib import pyplot as plt
    from sklearn.model_selection import train_test_split
    from sklearn.metrics import accuracy_score

    # 加载样本数据集
    iris = load_iris()
    X, y = iris.data, iris.target
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234565)  # 数据集分割
    print(X_train.shape,y_train)
    # 训练模型
    model = xgb.XGBClassifier(max_depth=5, learning_rate=0.1, n_estimators=160, silent=True, objective='multi:softmax')
    model.fit(X_train, y_train)

    # 对测试集进行预测
    score = model.score(X_test,y_test)
    print('score:',score)
    y_pred = model.predict(X_test)
    # 计算准确率
    accuracy = accuracy_score(y_test, y_pred)
    print("accuarcy: %.2f%%" % (accuracy * 100.0))

    # 显示重要特征
    plot_importance(model)
    plt.show()

def do_normalization(data):
    min_max_scaler = preprocessing.MinMaxScaler()
    x_minmax = min_max_scaler.fit_transform(data)
    return x_minmax

def getTrainData():
    config = {
        'host': "10.244.171.143",
        'port': 3306,
        'user': "cddba",
        'password': 'foxconn2019..',
        'db': "rds_test"
    }
    conn = pymysql.connect(connect_timeout=20,**config)
    cur = conn.cursor()
    cur.execute(r'select * from boston')
    r = cur.fetchall()
    cur.close()
    conn.close()
    datax = np.array(r)
    # index = np.argsort(datax[:,0])
    data = datax[:, 0:-1]
    label = datax[:, -1]
    if 0:
        data = do_normalization(data)
    # print(data[0],'\n\n',label)
    return data, label
def xgRess():
    import xgboost as xgb
    from xgboost import plot_importance
    from matplotlib import pyplot as plt
    from sklearn.model_selection import train_test_split
    from sklearn.datasets import load_boston
    from sklearn.metrics import mean_squared_error,mean_absolute_error
    import numpy as np
    # boston = load_boston()
    # X, y = boston.data, boston.target
    X,y = getTrainData()
    print(X)
    print('\n\n\n\n',y)
    # XGBoost训练过程
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
    # print(X_train.shape,X_train,y_train.shape,y_train)
    model = xgb.XGBRegressor(max_depth=5, learning_rate=0.1, n_estimators=160, silent=True, objective='reg:gamma')
    model.fit(X_train, y_train)

    # 对测试集进行预测
    ans = model.predict(X_test)

    preds = model.predict(X_train)
    print('利用函数预测得分',model.score(X_train, y_train))
    print('预测数据\n', preds, '\n')

    mse = mean_absolute_error(y_test,ans)
    print(f'mse:{mse}')
    rmse = np.sqrt(mean_squared_error(y_test,ans))
    print(f'rmse:{rmse}')
    # 显示重要特征
    plot_importance(model)
    plt.show()
xgRess()