library(haven)#加载dta格式数据
library(tidyverse)#加载数据处理包
mimic_data <- read_dta("MIMIC-IV.dta")
eICU_data <- read_dta("eICU-CRD.dta")


#对缺失值进行数据插补
library(mice)#使用mice包进行数据多重插补
mimic_data <- mimic_data[,-1]#多重插补不能有患者ID以及编号之类的数据，需要剔除
eICU_data <- eICU_data[,-1]#多重插补不能有患者ID以及编号之类的数据，需要剔除
mimic_imp <- mice(mimic_data,m=5,maxit = 10,method = "pmm",seed = 123,print = FALSE)#设置种子，不输出过程
mimic_com <- mice::complete(mimic_imp)
eICU_imp <- mice(eICU_data,m=5,maxit = 10,method = "pmm",seed = 123,print = FALSE)#设置种子，不输出过程
eICU_com <- mice::complete(eICU_imp)

#整理数据（以院内死亡率为应变量（放在第一列），减少不需要的临床特征）
mimic_com_LASSO <- mimic_com %>% select(-c(race,first_careunit,icustay,hospstay,deliriumtime,sepsistime,icu28dmort)) %>% select(hosp_mort,everything())
eICU_com_LASSO <- eICU_com %>% select(-c(race,first_careunit,icustay,hospstay,deliriumtime,sepsistime,icu28dmort)) %>% select(hosp_mort,everything())

#进行LASSO回归筛选特征值并可视化结果
set.seed(123)
library(glmnet)
colnames(mimic_com_LASSO[,1:42])#查看前42列的列名（根据自己的数据调整）
y <- as.matrix(mimic_com_LASSO[,1])#提取第一列作为结局（建议结局放在第一列）
x <- as.matrix(mimic_com_LASSO[,2:42])#第2列至第42列作为自变量

#后边的代码除了s值基本不需更改
lasso_model <- glmnet(x,y,family = "binomial",
                      alpha = 1)#表示采用L1正则化，即Lasso回归
max(lasso_model$lambda)
print(lasso_model)
#绘制LASSO图
plot(lasso_model,
     xvar="lambda")

#交叉验证并绘制可视化结果
cv_model <- cv.glmnet(x,y,family="binomial",alpha=1,nfolds = 10)
plot(cv_model)

#根据交叉验证结果，选择lambda值，lambda.min或lambda.1se
lambda_min <- cv_model$lambda.min
lambda_min
lambda_1se <- cv_model$lambda.1se
lambda_1se

#s为Lambda大小，Lambda越大表示模型的正则化强度越大，选择的自变量也越少。
#这里选择的是刚刚得到的lambda.1se的值
coef_lasso <- coef(lasso_model,s=lambda_1se)
coef_lasso
#结果显示后边带有数值的变量为筛选得到的变量

#根据LASSO回归结果筛选特征变量（即特征后面带有数值的变量）
mimic_ML <- mimic_com_LASSO %>% select(c(hosp_mort,age,weight,temperature,heart_rate,
                                         resp_rate,spo2,sbp,wbc,
                                         hemoglobin,bun,cr,glu,
                                         Cl,P,inr,ptt,
                                         aniongap,gcs,vent,crrt,
                                         seda,sofa_score,sad,aki,stroke))
eICU_ML <- eICU_com_LASSO %>% select(c(hosp_mort,age,weight,temperature,heart_rate,
                                       resp_rate,spo2,sbp,wbc,
                                       hemoglobin,bun,cr,glu,
                                       Cl,P,inr,ptt,
                                       aniongap,gcs,vent,crrt,
                                       seda,sofa_score,sad,aki,stroke))

#对MIMIC-IV数据进行机器学习模型建立
library(caret)
set.seed(123)
#对MIMIC-IV数据进行测试集与内部验证集划分
index <- createDataPartition(mimic_ML$hosp_mort,p=0.7,list = F)
train <- mimic_ML[index,]
test <- mimic_ML[-index,]

#将训练集中的相应列转换为因子类型
train$hosp_mort <- as.factor(as.character(train$hosp_mort))
train$vent <- as.factor(train$vent)
train$crrt <- as.factor(train$crrt)
train$seda <- as.factor(train$seda)
train$sad <- as.factor(train$sad)
train$aki <- as.factor(train$aki)
train$stroke <- as.factor(train$stroke)
str(train)

#将测试集中的相应列转换为因子类型
test$hosp_mort <- as.factor(as.character(test$hosp_mort))
test$vent <- as.factor(test$vent)
test$crrt <- as.factor(test$crrt)
test$seda <- as.factor(test$seda)
test$sad <- as.factor(test$sad)
test$aki <- as.factor(test$aki)
test$stroke <- as.factor(test$stroke)
str(train)

#LR model
lm_model <- glm(hosp_mort~.,data = train,family = binomial(link = "logit"))
summary(lm_model)
lm_pred <- predict(lm_model,test,type="response")
threshold <- 0.5
predictions_binary <- ifelse(lm_pred>threshold,1,0)
confusionMatrix(as.factor(predictions_binary),as.factor(test$sad))
LR_pred <- predictions_binary

#SVM model
library(e1071)
svm_model <- svm(hosp_mort ~ ., data = train, probability = TRUE)
svm_pred <- predict(svm_model, test, probability = TRUE)
a <- data.frame(svm_pred)
ab <- as.factor(as.character(a$svm_pred))
svm_hosp_mort <- as.factor(test$hosp_mort)
svm_pred_prob <- attr(svm_pred, "probabilities")[, 2]
confusionMatrix(data = ab, reference = svm_hosp_mort)
train$hosp_mort <- as.numeric(as.character(train$hosp_mort)) 
test$hosp_mort <- as.numeric(as.character(test$hosp_mort)) 
SVM_pred <- as.numeric(a$svm_pred)

#XGBoost model
library(xgboost)
train_matrix <- xgb.DMatrix(data.matrix(train[,-1]),label = train$hosp_mort)
test_matrix <- xgb.DMatrix(data.matrix(test[,-1]),label = test$hosp_mort)
params <- list(objective = "binary:logistic",eval_metric="logloss",max_depth=3,eta = 0.1,
               gamma=0.5,colsample_bytree = 1,min_child_weight=1,subsample=0.5)
watchlist <- list(train = train_matrix,val=test_matrix)
xgb_model <- xgb.train(params = params,data = train_matrix,nrounds = 125,watchlist = watchlist,
                       early_stopping_rounds = 10,print_every_n = 10,maximize = FALSE)
xgb_pred_prob <- predict(xgb_model,test_matrix)
xgb_pred <- ifelse(xgb_pred_prob > 0.5,1,0)
xgb_pred_factor <- factor(xgb_pred,levels = c(0,1))
test_hosp_mort_factor <- factor(test$hosp_mort,levels = c(0,1))
confusionMatrix(data = xgb_pred_factor,reference=test_hosp_mort_factor)

#RF model（随机森林）
library(randomForest)
train$hosp_mort <- as.factor(train$hosp_mort)
test$hosp_mort <- as.factor(test$hosp_mort)
rf_model <- randomForest(hosp_mort~.,data = train,ntree = 500,mtry = 6)
rf_pred <- predict(rf_model,newdata = test)
confusionMatrix(data = rf_pred,reference = test$hosp_mort)

#DT model(决策树)
library(rpart)
dt_model <- rpart(hosp_mort~.,data = train,method = "class")
dt_pred_prob <- predict(dt_model,newdata = test,type="prob")[,2]
dt_pred <- ifelse(dt_pred_prob > 0.5,1,0)
confusionMatrix(factor(dt_pred,levels = c("0","1")),test$hosp_mort)

#NB model(朴素贝叶斯)
library(e1071)
nb_model <- naiveBayes(hosp_mort~.,data = train)
nb_pred_prob <- predict(nb_model,newdata = test,type="raw")[,2]
nb_pred <- ifelse(nb_pred_prob > 0.5,1,0)
confusionMatrix(factor(nb_pred,levels = c("0","1")),test$hosp_mort)

#KNN model(K最近邻回归)
library(kknn)
knn_model <- kknn(hosp_mort~.,train,test,k=10,distance = 2,kernel = "rectangular")
knn_pred_prob <- predict(knn_model,newdata=test,type="prob")
knn_pred_prob <- knn_pred_prob[,"1"]
knn_pred_prob <- as.numeric(knn_pred_prob)
threshold <- 0.5
knn_pred <- ifelse(knn_pred_prob > threshold,1,0)
confusionMatrix(factor(knn_pred,levels = c("0","1")),test$hosp_mort)

#ROC图绘制(pROC)
library(pROC)
library(ggplot2)
test_hosp_mort <- test$hosp_mort
ML_ROC <- data.frame(test_hosp_mort,
                     lm_pred,LR_pred,
                     svm_pred,svm_pred_prob,
                     xgb_pred_prob,xgb_pred,
                     rf_pred,
                     dt_pred_prob,dt_pred,
                     nb_pred_prob,nb_pred,
                     knn_pred,knn_pred_prob
)
ML_ROC$test_hosp_mort <- as.numeric(as.character(ML_ROC$test_hosp_mort))
ML_ROC$svm_pred <- as.numeric(as.character(ML_ROC$svm_pred))
ML_ROC$rf_pred <- as.numeric(as.character(ML_ROC$rf_pred))
str(ML_ROC)
#指定结局变量并命名
LR_ROC <- roc(ML_ROC$test_hosp_mort,ML_ROC$lm_pred);LR_ROC
SVM_ROC <- roc(ML_ROC$test_hosp_mort,ML_ROC$svm_pred_prob);SVM_ROC
XGB_ROC <- roc(ML_ROC$test_hosp_mort,ML_ROC$xgb_pred_prob);XGB_ROC
RF_ROC <- roc(ML_ROC$test_hosp_mort,ML_ROC$rf_pred);RF_ROC
DT_ROC <- roc(ML_ROC$test_hosp_mort,ML_ROC$dt_pred_prob);DT_ROC
NB_ROC <- roc(ML_ROC$test_hosp_mort,ML_ROC$nb_pred_prob);NB_ROC
KNN_ROC <- roc(ML_ROC$test_hosp_mort,ML_ROC$knn_pred_prob);KNN_ROC

#输出AUC的95%置信区间
auc(LR_ROC)
auc(SVM_ROC)
auc(XGB_ROC)
auc(RF_ROC)
auc(DT_ROC)
auc(NB_ROC)
auc(KNN_ROC)
ci.auc(LR_ROC)
ci.auc(SVM_ROC)
ci.auc(XGB_ROC)
ci.auc(RF_ROC)
ci.auc(DT_ROC)
ci.auc(NB_ROC)
ci.auc(KNN_ROC)

#指定图像保存路径和文件名
#png("ML ROC curves.png",width = 800,height = 800)

#画出第一条曲线
plot.roc(LR_ROC,
         max.auc.polygon=F,#填充整个图像
         smooth=F,#绘制不平滑曲线“
         main="Comparison of different ML models of ROC curves",#添加标题
         col = "red",#曲线颜色为红色
         legacy.axes=T,#使横轴从0到1，表示为1-特异度
         lwd=2,
         print.auc=TRUE,print.auc.x=0.2,print.auc.y=0.8
)
#逐步添加其他曲线
plot.roc(SVM_ROC,
         add = T,
         col = "orange",
         smooth = F,
         lwd=2,
         print.auc=TRUE,print.auc.x=0.2,print.auc.y=0.75)
plot.roc(XGB_ROC,
         add = T,
         col = "black",
         smooth = F,
         lwd=2,
         print.auc=TRUE,print.auc.x=0.2,print.auc.y=0.70)
plot.roc(RF_ROC,
         add = T,
         col = "pink",
         smooth = F,
         lwd=2,
         print.auc=TRUE,print.auc.x=0.2,print.auc.y=0.65)
plot.roc(DT_ROC,
         add = T,
         col = "skyblue",
         smooth = F,
         lwd=2,
         print.auc=TRUE,print.auc.x=0.2,print.auc.y=0.60)
plot.roc(NB_ROC,
         add = T,
         col = "blue",
         smooth = F,
         lwd=2,
         print.auc=TRUE,print.auc.x=0.2,print.auc.y=0.55)
plot.roc(KNN_ROC,
         add = T,
         col = "purple",
         smooth = F,
         lwd=2,
         print.auc=TRUE,print.auc.x=0.2,print.auc.y=0.50)
#增加图例
legend("bottomright",
       legend = c("逻辑回归模型（LR）","支持向量机模型（SVM）","极限梯度提升模型（XGBoost）","随机森林模型（RF）","决策树模型（DT）","朴素贝叶斯模型（NB）","K最近邻模型（KNN）"),
       col = c("red","orange","black","pink","skyblue","blue","purple"),
       lwd = 1,
       cex=0.7
)