install.packages(c("ggpubr","corrplot",
                   "glmnet","caret","CBCgrps",
                   "tidyverse","rms","UBL","dplyr",
                   "pROC","openxlsx","data.table",
                   "xgboost","Matrix","caTools","plyr"))
library(corrplot)
library(glmnet)
library(caret)
library(CBCgrps)
library(nortest)
library(tidyverse)
library(ggpubr)
library(rms)
library(pROC)
library(viridis)
library(UBL)
library(dplyr)
library(openxlsx)
library(plyr)
library(data.table)
library(xgboost)
library(Matrix)
library(caTools)
library(isotone)

base_path <- "/Users/xuxusheng/opt/program/project/git/R/machine-study"

file_name <- "/aaaalldata.xlsx"
data_path <- paste(base_path,file_name,sep="")

base_script_name <- "/baseScript.R"
base_script_path <- paste(base_path,base_script_name,sep = "")
source(base_script_path)

setwd(base_path)
getwd()

cat("初始化包完成，开始加载数据:",data_path)
#加载数据
mydata <- read.xlsx(data_path)
#删除异常数据
mydata <- na.omit(mydata)
#去掉阳性率极低的变量
mydata <- mydata %>%  
  select(-cirrhosis,-Graves,-heparin)
#转换数据结构
mydata$INR <- as.numeric(as.character(mydata$INR))  
mydata$TG <- as.numeric(as.character(mydata$TG))  
mydata$HDL <- as.numeric(as.character(mydata$HDL))
mydata$plt <- as.numeric(as.character(mydata$plt))
#设置变量类型为Factor
# mydata <- set_column_factor(mydata,names(mydata)[1:30])

#TODO 基线标，暂不实现，后续调用baselineTable中的函数实现
#source(baselineTablePath)
#baselineTable(mydata)

#划分数据集
all_data <- data_packet(mydata,proportion=0.7)
test_data <- all_data$test_data
train_data <- all_data$train_data

cat("分组后的训练集阴性/阳性比例为：",table(train_data$diagnosis) / sum(table(train_data$diagnosis)))
# 
#进行独热编码
column_names <- c("Cervical","pathology")
train_data <- dummy_vars(train_data,column_names)
test_data <- dummy_vars(test_data,column_names)

#lasso回归，先进行标准化处理，再进行lasso回归，最后完成交叉验证
data2 = train_data%>%
  mutate_if(.predicate = is.numeric,
            .funs = min_max_scale)%>%
  as.data.frame()

set.seed(123)
x <- data.matrix(data2[,-1])
y <- data2[,1]
y <- as.numeric(unlist(y))
lasso <- glmnet(x, y, family = "binomial",nlambda = 1000, alpha = 1)
print(lasso)
plot(lasso, xvar = "lambda", label = TRUE)
tmp <- as_tibble(as.matrix(coef(lasso)),rownames = "coef") %>%
  pivot_longer(cols = -coef,
               names_to = "variable",
               names_transform = list(variable=parse_number),
               values_to = "value")%>%
  group_by(variable)%>%
  mutate(lambda=lasso$lambda[variable+1],
         norm=sum(if_else(coef=="(Intercept)",0,abs(value))))
ggplot(tmp[-c(1:557),],aes(log(lambda),value,color=coef,acute=coef))+
  geom_line(linewidth=1.2)+
  labs(x="Log Lambda",y="Coefficients")+
  theme_bw()

lasso.cv = cv.glmnet(x, y,alpha = 1,nfolds =50,family="binomial")
plot(lasso.cv)
lasso.cv$lambda.min #minimum
lasso.cv$lambda.1se #one standard error away

coefficients <- coef(lasso.cv, s = "lambda.1se")
coefficients_matrix <- as.matrix(coefficients)
variable_names <- rownames(coefficients_matrix)
non_zero_coeffs <- coefficients_matrix[coefficients_matrix != 0 & !is.na(coefficients_matrix)]

selected_variables <- variable_names[coefficients_matrix != 0 & !is.na(coefficients_matrix)][-1] 
columns <- c(colnames(train_data)[1],selected_variables)
train_data <- train_data[,columns]
test_data <- test_data[,columns]
train_data <- set_column_factor(train_data,c("diagnosis","sex","size","Blood","N","Cervical.5"))
test_data <- set_column_factor(test_data,c("diagnosis","sex","size","Blood","N","Cervical.5"))
train_data$time <- as.integer(as.character(train_data$time))
test_data$time <- as.integer(as.character(test_data$time))
#lasso筛选结果：     1se筛选变量为6

# uni_glm_model <- function(x){
#   FML <- as.formula(paste0("diagnosis==1~",x))
#   glm1 <- glm(FML,data = train_data,family = binomial)
#   glm2 <- summary(glm1)
#   OR <- round(exp(coef(glm1)),2)
#   SE <- round(glm2$coefficients[,2],3)
#   CI2.5<- round(exp(coef(glm1)-1.96*SE),2)
#   CI97.5<- round(exp(coef(glm1)+1.96*SE),2)
#   CI <- paste0( CI2.5,'-',CI97.5)
#   B <- round(glm2$coefficients[,1],3)
#   Z<- round(glm2$coefficients[,3],3)
#   P <- round(glm2$coefficients[,4],3)
#   uni_glm_model <-data.frame('characteristics'=x,
#                              'B'=B,
#                              'SE'=SE,
#                              'OR'=OR,
#                              'CI'=CI,
#                              'Z'=Z,
#                              'P'=P)[-1,]
#   return(uni_glm_model)
# }
# 
# all_column_names = colnames(train_data)[2:length(names(train_data))]
# uni_glm <- lapply(all_column_names,uni_glm_model)
# uni_glm <- ldply(uni_glm,data.frame)
# View(uni_glm)
# uni_glm1 <- uni_glm[uni_glm$P <= 0.05, ] #挑选有意义的结果
# uni_glm1 <- as.data.frame(uni_glm1)
# 
# 
# fml <- as.formula(paste0('diagnosis==1~ ',paste0(uni_glm$characteristics[uni_glm$P<0.05],collapse = '+')))
# fml
# #多因素enter回归
# modelA <- glm(fml,data=train_data,family=binomial)
# modelA
# summary(modelA)
# 
# modelX <- glm(diagnosis~1,data=train_data,family = binomial)
# summary(modelX)
# 
# fml1 <- as.formula(paste("~",paste0(uni_glm$characteristics[uni_glm$P<0.05],collapse = '+'),sep =" "))
# fml1
# modelB <- step(modelX,scope=list(upper=fml1,lower=~1)
#                ,data=train_data,family=binomial,direction = "forward")
# summary(modelB)
# 
# modelC <- step(modelA,direction = "backward")
# summary(modelC)
# #多因素双向（both）
# modelD <- step(modelA,direction = "both")
# summary(modelD)
# #
# # cbind(coef=coef(modelD),confint(modelD))
# # exp(cbind(OR=coef(modelD),confint(modelD)))
# 
# AIC(modelA,modelB,modelC,modelD)
# 
# anova(modelA,modelC,test="Chisq")
# anova(modelA,modelB,test="Chisq")
# 
# modelC <- step(modelA,direction = "both")
# modelC
# 
# glm3 <- summary(modelC)
# glm3
# glm3$coefficients
# #输出数据
# OR <- round(exp(glm3$coefficients[,1]),2)
# SE <- round(glm3$coefficients[,2],3)
# CI2.5<- round(exp(coef(modelD)-1.96*SE),2)
# CI97.5<- round(exp(coef(modelD)+1.96*SE),2)
# CI <- paste0( CI2.5,'-',CI97.5)
# B <- round(glm3$coefficients[,1],3)
# Z<- round(glm3$coefficients[,3],3)
# P <- round(glm3$coefficients[,4],3)
# mlogit <- data.frame(
#   'B'=B,
#   'SE'=SE,
#   'OR'=OR,
#   'CI'=CI,
#   'Z'=Z,
#   'P'=P)[-1,]
# columns<- rownames(mlogit)
# columns <-c("diagnosis",columns)
# 

# 
# # fml7 <- as.formula(dead==1~ CRP + Sodium + Potassium + 
# #                      BUN + Platelets + age + HR )
# fml7 <- as.formula(paste("diagnosis==1~",paste0(train_data,collapse = '+'),sep =" "))
# model7 <- glm(fml7,data=train_data,family = binomial(logit))
# train_data$predmodel7 <- predict(newdata=train_data,model7,"response")
# devmodelA <- roc(diagnosis ~ predmodel7, data = train_data, smooth = F)
# round(auc(devmodelA),3)
# round(ci(auc(devmodelA)),3)
# 
# test_data$predmodel7 <- predict(newdata=test_data,model7,"response")
# vadmodelA <- roc(dead~predmodel7,data=test_data,smooth=F)
# round(auc(vadmodelA),3)
# round(ci(auc(vadmodelA)),3)
# 
# install.packages(c("calibrate","rms","MASS"))
# library(calibrate)
# library(rms)
# library(MASS)
# #trainData$dead <- as.numeric(as.character(trainData$dead)) - 1  #假设 "dead" 是因子且水平为 "0" 和 "1"
# val.prob(train_data$predmodel7,train_data$diagnosis)
# #验证人群calibrate
# val.prob(test_data$predmodel7,test_data$diagnosis)
# 
# plot(devmodelA,print.auc=TRUE,print.thres=TRUE,main="ROC CORVE",
#      legacy.axes=T,col="blue",print.thres.col="blue",identity.col="blue",
#      identity.ity=1,identity.lwd=1)
##########筛选变量方法三：boruta
install.packages("Boruta")
library(Boruta)

set.seed(1234)
Var.Selec <- Boruta(diagnosis~.,data=train_data,                    
                    pValue = 0.01,                    
                    mcAdj = TRUE,                    
                    maxRuns =1000,                    
                    doTrace = 0,                    
                    holdHistory = TRUE,                    
                    getImp = getImpRfZ)
Boruta::plotImpHistory(Var.Selec)
print(Var.Selec)
plot(Var.Selec,las = 2, xlab = '', main = 'Variable Importance')
attStats(Var.Selec)
getConfirmedFormula(Var.Selec)
train_data <- train_data[,columns]
test_data <- test_data[,columns]
train_data <- set_column_factor(train_data,c("diagnosis","sex","ALL","N","Cervical.5"))
test_data <- set_column_factor(test_data,c("diagnosis","sex","ALL","N","Cervical.5"))
train_data$time <- as.integer(as.character(train_data$time))
test_data$time <- as.integer(as.character(test_data$time))
train_data$INR <- as.double(as.character(train_data$INR))
test_data$INR <- as.double(as.character(test_data$INR))
train_data$Cr <- as.integer(as.character(train_data$Cr))
test_data$Cr <- as.integer(as.character(test_data$Cr))
train_data$bleeding <- as.integer(as.character(train_data$bleeding))
test_data$bleeding <- as.integer(as.character(test_data$bleeding))
#结果diagnosis ~ sex + ALL + N + INR + Cr + bleeding + time + Cervical.5

## 取变量交集
columns <- c("diagnosis","sex","N","time","Cervical.5")
train_data <- train_data[,columns]
test_data <- test_data[,columns]
train_data <- set_column_factor(train_data,c("diagnosis","sex","N","Cervical.5"))
test_data <- set_column_factor(test_data,c("diagnosis","sex","N","Cervical.5"))
train_data$time <- as.integer(as.character(train_data$time))
test_data$time <- as.integer(as.character(test_data$time))

#SMOTE 
category_counts <- table(train_data$diagnosis)
total_count <- sum(category_counts)
category_percentages <- category_counts / total_count
category_percentages["0"] <- 0.7 # 将阴性变量比例设置为0.7
category_percentages["1"] <- 6  # 将阳性变量比例设置倍数
# 将类别转换为字符类型，以便用作名字
category_names <- as.factor(names(category_counts))
print(category_names)
# 创建命名向量
C.perc <- setNames(as.vector(category_percentages), category_names)
# 打印C.perc
print(C.perc)
C_list = as.list(C.perc)
print(C_list)
#使用SMOTE方法处理不均衡问题，根据前置比例设定需要删除或补充值的比例
train_new  <-  SmoteClassif(diagnosis~., 
                            C.perc = C_list,
                            as.data.frame(train_data), 
                            k=1,
                            dist = "HEOM")

table(train_new$diagnosis)

dat.train=train_new%>%
  mutate_if(.predicate = is.numeric,
            .funs = min_max_scale)%>%
  as.data.frame()
dat.test=test_data%>%
  mutate_if(.predicate = is.numeric,
            .funs = min_max_scale)%>%
  as.data.frame()
# write.csv(dat.train,file="dat.train.csv")
# write.csv(dat.test,file="dat.test.csv")

dat_train <- set_column_num(dat.train,colnames(dat.train))
dat_test <- set_column_num(dat.test,colnames(dat.test))

x_train <- dat_train[,-1]
y_train <- dat_train[,1]
#将特征和目标变量转换为DMatrix格式
dtrain <- xgb.DMatrix(data=as.matrix(x_train),label=dat_train$diagnosis)
X_test <- dat_test[,-1]
y_test <- as.factor(dat_test$diagnosis)
dtest <- xgb.DMatrix(data=as.matrix(X_test),label=dat_test$diagnosis)
set.seed(123)
#设置参数
params <- list(objective="binary:logistic",eval_metric="logloss",
               eta=0.25,max_depth=5,gamma=1,
               colsample_bytree=0.5,
               min_child_weight=1,
               subsample=0.5)

#训练xgboost模型
# xgb_model_final <- xgboost(params = params,data=dtrain,nrounds=3000)
xgb_model_final <- xgb.train(params = params,data=dtrain,nrounds=100)

#在训练集上进行预测
train_predictions <- predict(xgb_model_final,newdata=dtrain,outputmargin = FALSE)
#在测试集上进行预测
test_predictions <- predict(xgb_model_final,newdata=dtest,outputmargin = FALSE)

#计算准确率
train_predictions1 <- ifelse(train_predictions>0.5,1,0)
accuracy <- mean(train_predictions1==y_train)
print(paste("训练集准确率",accuracy))

test_predictions1 <- ifelse(test_predictions>0.5,1,0)
accuracy <- mean(test_predictions1==y_test)
print(paste("测试集准确率",accuracy))

# index <- 1:length(test_predictions)
# 
# fit <- gpava(y = train_predictions, solver = weighted.mean, ties = "primary")
# print(fit$z)
# y_test_calibrated <- predict(fit, newdata = data.frame(x = y_test))
# 
# 
# y_train_pred_calibrated <- ifelse(fit$z > 0.5, 1, 0)
# accuracy <- mean(y_train_pred_calibrated==y_train)
# print(paste("训练集准确率",accuracy))
# y_test_pred_calibrated <- ifelse(y_test_calibrated > 0.5, 1, 0)

####train校准曲线①RMS包②有其他画法
val.prob(train_predictions,                     
         as.numeric(dat.train$diagnosis)-1,
         logistic.cal = FALSE,
         statloc = F,
         riskdist = "calibrated",
         legendloc = c(0.8,0.25))


####test校准曲线①RMS包
val.prob(test_predictions,                     
         as.numeric(dat.test$diagnosis)-1,
         logistic.cal = FALSE,
         statloc = F,
         riskdist = "calibrated",
         legendloc = c(0.8,0.25))

###使用保序回归（Isotonic regression）进行概率校准

