library(tidyverse)

# Load data ----
load("D:/Code/RCode/MachLearn/prostate.RData")
str(prostate)
table(prostate$gleason)
prostate$gleason <- ifelse(prostate$gleason == 6, 0, 1)
table(prostate$gleason)

# Split train and test set ----
prostate.train <- prostate %>% filter(train == TRUE) %>% select(-train)
prostate.test <- prostate %>% filter(train == FALSE) %>% select(-train)

# Create model (Regr tree) ----
library(mlr3)
library(mlr3viz)

# Target 'lpsa' is numeric, so Classif is not availabel.
pst.tsk <- TaskRegr$new(id = "prostate", 
                           backend = prostate.train, target = "lpsa")

autoplot(pst.tsk)  # Only plot target variable
autoplot(pst.tsk, type = "pairs")  # Distribution and correlationship

mlr_learners
pst.lrn <- lrn("regr.rpart", id = "rpart")

cv5 <- rsmp("cv", folds = 5L)
rs <- resample(pst.tsk, pst.lrn, cv5)

rs$aggregate()

rs$learners[[5]]

pst.lrn$train(pst.tsk)
pst.pred <- pst.lrn$predict(pst.tsk)
pst.prednew <- pst.lrn$predict_newdata(prostate.test, task = pst.tsk)
pst.pred$score()
pst.prednew$score()

autoplot(pst.pred)
autoplot(pst.prednew)

# Plot model ----
# 'learner$model' is the result of rpart()
#   tree.pros <- rpart(lpsa ~ ., data = prostate.train)

# plot() only plot structure of the tree
plot(pst.lrn$model, uniform = TRUE, compress = TRUE, lty = 3, branch = 0.7)
# text() add classificaiton results on the tree
text(pst.lrn$model, all = TRUE, digits = 5, use.n = TRUE, cex = 0.9, xpd = TRUE)

# rpart.plot::prp() plot regr tree better.
library(rpart.plot)
prp(pst.lrn$model)

# Auto tuning ----
library(paradox)
library(mlr3tuning)

pst.lrn$param_set

tune.ps <- ParamSet$new(list(
  ParamDbl$new("cp", lower = 0.001, upper = 1)
))

at <- AutoTuner$new(
  learner = pst.lrn,
  resampling = rsmp("holdout"),
  measures = msr("regr.mse"),
  tune_ps = tune.ps,
  terminator = term("evals", n_evals = 10),
  tuner = tnr("random_search")
)

at$train(pst.tsk)

at$model
at$learner$param_set

# IN this situation, performance of tuned model is worse than un-tuned model.
#   But the tuned model get the better performance in test data.
pst.pred.at <- at$predict(pst.tsk)
pst.pred.at$score()
pst.pred$score()

pst.prednew.at <- at$predict_newdata(prostate.test)
pst.prednew.at$score()
pst.prednew$score()
