# mlr3 template

library(mlr3)
library(mlr3learners)

# 1. Task ----
# Task include data and type (and target if needed)
task <- TaskClassif$new(id = "classif.tsk", backend = data, target = "")
regr.tsk <- TaskRegr$new(id = "regr.tsk", backend = data, target = "")

# Predefined tasks
as.data.table(mlr_tasks)
tsk("iris")  # Get predefined task 'iris'

# Visit a task
task$data()
task$data(rows = c(3, 10, 27), col = "Species")
task$nrow
task$ncol
task$feature_names
task$target_names

# Modify task
task$select(c("Sepal.Width", "Sepal.Length")) # keep only these features
task$filter(1:3) # keep only these rows
task$cbind(data.table(foo = letters[1:3])) # add column foo

# 2. Learner ----
mlr_learners

learner <- lrn("classif.svm", id = "svm.lrn", hyperparamters)

learner$param_set  # Hyperparameters (range, default and current values)
learner$param_set$values <- list(cp = 0.01, xval = 0)  # Change hyperparameters

# 3. Training ----
# Split train and test (return row numbers)
train.set <- sample(task$nrow, 0.7 * task$nrow)
test.set <- setdiff(seq_len(task$nrow), train.set)

learner$train(task, row_ids = train.set)

learner$model  # Access trained model

# 4. Predicting ----
# Prediction object :: PredictionClassif obj
prediction <- learner$predict(task, row_ids = test.set)
prediction$confusion   # Show confusion matrix

# Predict probability ----
learner$predict_type = "prob"
pred2 <- learner$predict(task, row_ids = test.set)

# Plot prediction
library(mlr3viz)
autoplot(prediction)
autoplot(prediction, type = "roc")  # Only for 'prob' prediction

# Performance assessment
mlr_measures
# "classif.ce" and "regr.mse" are defaults for classif and regr task, seperately.

measure <- msr("classif.acc")
prediction$score(measure)

# 5. Resampling ----
mlr_resamplings

# Create resampling strategy
ho <- rsmp("holdout", ratio = 0.7)
cv <- rsmp("cv", folds = 5L)

# 'rs' is ResampleResult obj that include all iterations
rs <- resample(task, learner, ho, store_models = TRUE)

# Calcualte the average performance across all resampling iterations
rs$aggregate(msr("classif.ce"))

# Return all performance as a data.frame
as.data.table(rs$score(msr("classif.ce")))

rs$warnings
rs$errors

rs$resampling
rs$resampling$iters

# Get first iteration train and test data id
rs$train_set(1)
rs$test_set(1)

# Return the 1st iteration learner in resampling
rs$learners[[1]]

# Return all predictions merged into a single object
rs$prediction()

# Return the 1st resampling iteration
rs$predictions()[[1]]

# 6. Benchmarking ----

# benchmark_grid() create an exhaustive design,
#  so all learners are executed on the same train/test split for each task.
bmd <- benchmark_grid(
  task = list(task1, task2, ...),  # task=task
  learners = list(lrn("classif.rpart"), lrn("classif.featureless")),
  resamplings = rsmp("holdout")
)  # bmd - benchmark design

bmr <- benchmark(bmd)

measures <- list(
  msr("classif.auc", id = "auc_train", predict_sets = "train"),
  msr("classif.auc", id = "auc_test")
)

bmr$aggregate(measures)

# 7. Tuning ----
learner$param_set

# Design hyperparameters range
library(paradox)
tune.ps <- ParamSet$new(list(
  ParamDbl$new("cp", lower = 0.001, upper = 0.1),
  ParamInt$new("minsplit", lower = 1, upper = 10)
))

# parA = ParamLgl$new(id = "A")
# parB = ParamInt$new(id = "B", lower = 0, upper = 10, tags = c("tag1", "tag2"))
# parC = ParamDbl$new(id = "C", lower = 0, upper = 4, special_vals = list(NULL))
# parD = ParamFct$new(id = "D", levels = c("x", "y", "z"), default = "y")
# parE = ParamUty$new(id = "E", custom_check = function(x) checkmate::checkFunction(x))

# Create tuning method
ho <- rsmp("holdout")
msr.ce <- msr("classif.ce")
evals20 <- term("evals", n_evals = 20)

library(mlr3tuning)
tuning.method <- TuningInstance$new(
  task = task,
  learner = learner,
  resampling = ho,
  measures = msr.ce,
  param_set = tune.ps,
  terminator = evals20
)

# Trigge the tuning
tuner <- tnr("grid_search", resolution = 5)  # RandomSearch, TunerGenSA

tuning.rst <- tuner$tune(tuning.method)

# Training with tuned hyperparameters
learner$param_set$values = tuning.method$result$params
learner$train(task)

# Auto tuning
#  Learner::AutoTuner, that wraps learner and hyperparameters
learner = lrn("classif.rpart")
resampling = rsmp("holdout")
measures = msr("classif.ce")
tune_ps = ParamSet$new(list(
  ParamDbl$new("cp", lower = 0.001, upper = 0.1),
  ParamInt$new("minsplit", lower = 1, upper = 10)
))
terminator = term("evals", n_evals = 10)
tuner = tnr("random_search")

at = AutoTuner$new(
  learner = learner,
  resampling = resampling,
  measures = measures,
  tune_ps = tune_ps,
  terminator = terminator,
  tuner = tuner
)
at$train(task)

# AutoTuner obj can be used like any other learner: $train(), $predict()
#    and can be passed to benchmark()

grid <- benchmark_grid(
  task = tsk("pima"),
  learner = list(at, lrn("classif.rpart")),
  resampling = rsmp("cv", folds = 5)
)

bmr <- benchmark(grid)
bmr$aggregate(measures)
