library(tidymodels) #tidymodels 框架
# ✖ purrr::discard() masks scales::discard()
# ✖ dplyr::filter()  masks stats::filter()
# ✖ dplyr::lag()     masks stats::lag()
# ✖ recipes::step()  masks stats::step()
# 执行以上方法需要指定包名运行，如purrr::discard()

library(naniar)   #处理和可视化缺失数据
library(readxl)   #读取EXCEL文件
library(themis)   #处理类别不平衡
library(finetune) #参数调优
library(discrim)  #朴素贝叶斯引擎包
library(themis)   #处理不平衡数据
library(doMC)     #并发处理

# 加载EXCEL数据，数据类型转为numeric
# 读取INR|TG|HDL时存在告警，读取完数据转换应该正常。
# 需要确认为何在EXCEL中的小数无法直接正确预测类型。
mydata = read_excel(paste(getwd(),
                          "data",
                          "aaaalldata.xlsx",
                          sep = "/"),
                    col_types=c("numeric"))
mydata$diagnosis <- as.factor(mydata$diagnosis)

set.seed(1)
data_split = initial_split(mydata,
                           prop = 0.7,
                           strata = diagnosis)
train_data = training(data_split)
test_data = testing(data_split)
train_data <- train_data[,c("diagnosis","sex","ALL","N","INR","Cr","bleeding","time","Cervical")]
test_data <- test_data[,c("diagnosis","sex","ALL","N","INR","Cr","bleeding","time","Cervical")]

# 构建预处理方案
smote_recp = recipe(diagnosis~.,data = train_data) |>
  step_range(INR,Cr,bleeding,time) |>
  step_smotenc(diagnosis,over_ratio=tune() ,
               neighbors=tune(id="smote_neighbors"),
               seed=123)

downsample_recp = recipe(diagnosis~.,data = train_data) |>
  step_range(INR,Cr,bleeding,time) |>
  step_downsample(diagnosis,under_ratio = tune(id="under_ratio"),
                  seed = 123)

base_recp = recipe(diagnosis~.,data = train_data) |>
  step_range(INR,Cr,bleeding,time)


xgboost_spec = boost_tree(tree_depth = tune(),
                          trees = tune(),
                          learn_rate = tune(),
                          mtry=tune(),
                          min_n = tune(),
                          loss_reduction = tune(),
                          sample_size = tune(),
                          stop_iter = tune()) |>
  set_engine("xgboost",counts = FALSE) |>
  set_mode("classification")

decision_tree_spec = decision_tree(tree_depth = tune(),
                                   min_n = tune(),
                                   cost_complexity = tune()) |>
  set_engine("rpart") |>
  set_mode("classification")

kknn_spec = nearest_neighbor(neighbors = tune(),
                             weight_func = tune(),
                             dist_power = tune()) |>
  set_engine("kknn") |>
  set_mode("classification")

logistic_spec = logistic_reg() |>
  set_engine("glm") |>
  set_mode("classification")

naive_bayes_spec = naive_Bayes(smoothness = tune(),
                               Laplace = tune()) |>
  set_engine("naivebayes") |>
  set_mode("classification")

random_forest_spec = rand_forest(mtry = tune(),
                                 trees = tune(),
                                 min_n = tune()) |>
  set_engine("ranger",counts=FALSE) |>
  set_mode("classification")

svm_spec = svm_rbf(cost = tune(),
                   rbf_sigma = tune(),
                   margin = tune()) |>
  set_engine("kernlab") |>
  set_mode("classification")

all_workflows = workflow_set(preproc = list(smote = smote_recp,
                                            base = base_recp,
                                            downsample_recp),
                            models = list(xgboost = xgboost_spec,
                                          decision_tree = decision_tree_spec,
                                          kknn = kknn_spec,
                                          logistic = logistic_spec,
                                          naive_bayes = naive_bayes_spec,
                                          random_forest = random_forest_spec,
                                          svm = svm_spec),
                            cross = TRUE)


smote_rand_param = all_workflows |>
  extract_parameter_set_dials(id="smote_random_forest") |>
  update(mtry = mtry_prop(c(0.01,1)))

downsample_rand_param = all_workflows |>
  extract_parameter_set_dials(id = "recipe_3_random_forest") |>
  update(mtry = mtry_prop(c(0.01,1)),
         under_ratio = under_ratio(c(0.1,5)))
base_rand_param = all_workflows |>
  extract_parameter_set_dials(id = "base_random_forest")|>
  update(mtry=mtry_prop(c(0.01,1)))

smote_xgb_param = all_workflows|>
  extract_parameter_set_dials(id="smote_xgboost")|>
  update(mtry = mtry_prop(c(0.01,1)))

downsample_xgb_param = all_workflows |>
  extract_parameter_set_dials(id = "recipe_3_xgboost") |>
  update(mtry = mtry_prop(c(0.01,1)),
         under_ratio = under_ratio(c(0.1,5)))

base_xgb_param = all_workflows |>
  extract_parameter_set_dials(id = "base_xgboost") |>
  update(mtry = mtry_prop(c(0.01,1)))

downsample_svm_param = all_workflows|>
  extract_parameter_set_dials(id = "recipe_3_svm") |>
  update(under_ratio = under_ratio(c(0.1,5)))

downsample_decision_tree_param = all_workflows|>
  extract_parameter_set_dials(id = "recipe_3_decision_tree")|>
  update(under_ratio = under_ratio(c(0.1,5)))

downsample_kknn_param = all_workflows|>
  extract_parameter_set_dials(id = "recipe_3_kknn")|>
  update(under_ratio = under_ratio(c(0.1,5)))

downsample_logistic_param = all_workflows|>
  extract_parameter_set_dials(id = "recipe_3_logistic")|>
  update(under_ratio = under_ratio(c(0.1,5)))

downsample_naive_bayes_param = all_workflows|>
  extract_parameter_set_dials(id = "recipe_3_naive_bayes")|>
  update(under_ratio = under_ratio(c(0.1,5)))

all_workflows = all_workflows|>
  option_add(param_info = smote_rand_param,
             id = "smote_random_forest") |>
  option_add(param_info = downsample_rand_param,
             id = "recipe_3_random_forest") |>
  option_add(param_info = base_rand_param,
             id = "base_random_forest") |>
  option_add(param_info = smote_xgb_param,
             id = "smote_xgboost") |>
  option_add(param_info = downsample_xgb_param,
             id = "recipe_3_xgboost") |>
  option_add(param_info = base_xgb_param,
             id = "base_xgboost") |>
  option_add(param_info = downsample_svm_param,
             id = "recipe_3_svm") |>
  option_add(param_info = downsample_decision_tree_param,
             id = "recipe_3_decision_tree") |>
  option_add(param_info = downsample_kknn_param,
             id = "recipe_3_kknn") |>
  option_add(param_info = downsample_logistic_param,
             id = "recipe_3_logistic") |>
  option_add(param_info = downsample_naive_bayes_param,
             id = "recipe_3_naive_bayes")


set.seed(123)
resmaples = vfold_cv(data = train_data,
                     v = 5,
                     repeats = 5,
                     strata = diagnosis)

set.seed(123)
system_name <- Sys.info()["sysname"]
if(system_name == "Darwin" || system_name == "macOS"){
  library(doMC) 
  registerDoMC(cores = round(parallel::detectCores() * 0.8))
}else if(system_name == "Windows"){
  library(doParallel)
  registerDoParallel(makeCluster(round(parallel::detectCores() * 0.8)))
}else{
  stop("system name is not suitable..",system_name)
}
tune_result = workflow_map(object = all_workflows,
                           fn = "tune_bayes",
                           verbose = TRUE,
                           seed = 123,
                           resamples = resmaples,
                           initial = 10,
                           iter = 50,
                           metrics = metric_set(roc_auc,f_meas,sens,spec),
                           control = control_bayes(verbose = TRUE,
                                                   verbose_iter = TRUE,
                                                   no_improve = 30,
                                                   parallel_over = "everything",
                                                   event_level = "second"))



autoplot(tune_result,
         rank_metric = 'roc_auc',
         metric = 'roc_auc',
         select_best = TRUE) + 
  geom_text(aes(y=mean - 0.2,label = wflow_id), angle=90,hjuest= 1)+
  lims(y = c(0.1,1)) + 
  theme_bw() + 
  theme(legend.position = 'none')


autoplot(tune_result,
         rank_metric = 'sens',
         metric = 'sens',
         select_best = TRUE) + 
  geom_text(aes(y=mean + 0.5,label = wflow_id), angle=90,hjuest= 1)+
  lims(y = c(0,1.5)) + 
  theme_bw() + 
  theme(legend.position = 'none')


tune_result |>
  extract_workflow_set_result(id="recipe_3_xgboost") |>
  show_best(metric = 'roc_auc')

optim_best_param = tune_result |>
  extract_workflow_set_result(id="recipe_3_xgboost") |>
  select_best(metric = 'sens')

tune_result |>
  extract_workflow_set_result(id="recipe_3_xgboost") |>
  show_best(metric = 'sens')

optim_best_param = tune_result |>
  extract_workflow_set_result(id="recipe_3_xgboost") |>
  select_best(metric = 'roc_auc')

set.seed(123)
final_best_model = tune_result |> 
  extract_workflow(id = "recipe_3_xgboost")|>
  finalize_workflow(parameters = optim_best_param)|>
  fit(data = train_data)

final_prediction = augment(final_best_model,new_data = test_data)
roc_auc(final_prediction,truth = diagnosis,.pred_1,event_level="second")
accuracy(data = final_prediction,truth = diagnosis,.pred_class)
sensitivity(data=final_prediction,truth = diagnosis,.pred_class,event_level = "second")
specificity(data=final_prediction,truth = diagnosis,.pred_class,event_level = "second")
ppv(data=final_prediction,truth = diagnosis,.pred_class,event_level = "second")
conf_mat(data=final_prediction,truth = diagnosis,.pred_class)
f_meas(data=final_prediction,truth = diagnosis,.pred_class,event_level = 'second')

# objects <- ls()
# 
# # 遍历每个对象并保存到独立的 .RData 文件中
# for (obj in objects) {
#   # 创建文件名
#   filename <- paste0(obj, ".RData")
#   
#   # 将单个对象保存到指定文件
#   save(list=obj, file=filename)
#   
#   cat("Saved object", obj, "to file", filename, "\n")
# }
