# xgboost
library(mlr3)
library(mlr3learners)
library(mlr3tuning)
library(paradox)

data(Pima.te, package = "MASS")
data(Pima.tr, package = "MASS")
pima <- rbind(Pima.te, Pima.tr)

pima.scale <- data.frame(scale(pima[, -8]))
pima.scale$type <- pima$type


pima.tsk <- TaskClassif$new(id = "pima",
                            backend = pima,
                            target = 'type')
pima.xgb <- lrn("classif.xgboost", 
                colsample_bytree = 1,
                min_child_weight = 1,
                subsample = 0.5)

pima.xgb$train(pima.tsk)
pima.pred <- pima.xgb$predict(pima.tsk)
pima.pred$score()

# Tuning ----
pst <- as.data.table(pima.xgb$param_set)
View(pst)

tune.ps <- ParamSet$new(list(
  ParamInt$new('nrounds', lower = 75, upper = 100),
  ParamDbl$new('eta', lower = 0.01, upper = 0.3),
  ParamDbl$new('gamma', lower = 0.25, upper = 0.5),
  ParamInt$new('max_depth', lower = 2, upper = 3)
))

pima.at <- AutoTuner$new(
  learner = pima.xgb,
  resampling = rsmp("cv", folds = 5L),
  measures = msr("classif.ce"),
  tune_ps = tune.ps,
  terminator = term("evals", n_evals = 15),
  tuner = tnr("random_search")
)

pima.at$train(pima.tsk)
pima.pred.at <- pima.at$predict(pima.tsk)

pima.pred.at$score()
