library(mlr3)
library(mlr3learners)
library(mlr3tuning)
library(mlr3viz)

# Single learner ----
task <- TaskClassif$new(id = "task_name", backend = data.frame, target = "target")
learner <- lrn("classif.svm", id = "learner.svm",
               predict_type = "prob",  # "response" is default, 
               #  but "prob" maybe better because 'roc' can be used in autolpot()
               type = "C-classification", kernel = "polynomial"
               )

tune.ps <- ParamSet$new(list(
  ParamInt$new("degree", lower = 3, upper =5),
  ParamDbl$new("coef0", lower = 0.1, upper = 4)
))
resampling <- rsmp("holdout")
measure <- msr("classif.ce")
terminator <- term("evals", n_evals = 10)
tuner <- tnr("random_search")

at1 <- AutoTuner$new(
  learner = learner,
  resampling = resampling,
  measures = measure,
  tune_ps = tune.ps,
  terminator = terminator,
  tuner = tuner
)

at1$train(task)

at1$score()

# Prediction
pred <- at1$predict(task)
autoplot(pred)
autoplot(pred, type = "roc")

# Benchmarking ----
bmd <- benchmark_grid(
  tasks = task,  # list(task1, task2, ...)
  learners = list(at1, at2, at3),
  resamplings = resampling
)

bmr <- benchmark(bmd)

bmr$aggregate(measure)
