# coding: utf-8

from xgboost import XGBClassifier


def fit_xgb(X_train, y_train):
    # from sklearn.model_selection import StratifiedKFold
    xgb = XGBClassifier(
        learning_rate=0.1,
        n_estimators=1000,  # 数值大没关系，cv会自动返回合适的n_estimators
        max_depth=5,
        min_child_weight=1,
        gamma=0,
        subsample=0.3,
        colsample_bytree=0.8,
        colsample_bylevel=0.7,
        objective='multi:softmax',
        seed=3)

    # kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=3)
    xgb.fit(X_train, y_train)
    return xgb

def fit_svm(X_train, y_train):
    # from sklearn.model_selection import StratifiedKFold
    from sklearn.svm import LinearSVC
    SVC1 = LinearSVC().fit(X_train, y_train)
    return SVC1

def fit_LR(X_train, y_train):
    from sklearn.linear_model import LogisticRegression
    lr = LogisticRegression()

    lr.fit(X_train, y_train)
    return lr


def fit_LR_cv(X_train, y_train):
    from sklearn.model_selection import GridSearchCV
    from sklearn.linear_model import LogisticRegression

    # 需要调优的参数
    # 请尝试将L1正则和L2正则分开，并配合合适的优化求解算法（slover）
    # tuned_parameters = {'penalty':['l1','l2'],
    #                   'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000]
    #                   }
    penaltys = ['l1', 'l2']
    Cs = [0.001, 0.01, 0.1, 1, 10, 100, 1000]
    tuned_parameters = dict(penalty=penaltys, C=Cs)

    lr_penalty = LogisticRegression()
    grid = GridSearchCV(lr_penalty, tuned_parameters, cv=5, scoring='neg_log_loss')
    grid.fit(X_train, y_train)
    print(-grid.best_score_)
    print(grid.best_params_)
    return grid

def printmetrics(model, X_val, y_val):
    y_pre = model.predict(X_val)
    import sklearn.metrics as metrics

    print("Classification report for classifier: ", metrics.classification_report(y_val, y_pre))
    print("Confusion matrix: ", metrics.confusion_matrix(y_val, y_pre))
    print("accuracy_score: ", metrics.accuracy_score(y_val, y_pre))
