# 加载gbm包
if (!require(gbm)) install.packages("gbm")
library(gbm)
# 加载必要的包
if (!require(caret)) install.packages("caret")
if (!require(pROC)) install.packages("pROC")
if (!require(MatchIt)) install.packages("MatchIt")

library(caret)
library(pROC)
library(MatchIt)

# 设置工作目录并加载数据
setwd("${path}")
mydata <- read.csv("data.csv")
# 确保 ${hotspot} 是因子，并且值为 "event_0" 和 "event_1"
mydata$${hotspot} <- factor(mydata$${hotspot}, levels = c("0", "1"), labels = c("event_0", "event_1"))


# 使用 matchit 进行 1:3 匹配
m.out <- matchit(${hotspot} ~  ${independent_and}, 
                 data = mydata, 
                 method = "nearest", 
                 ratio = 3)

# 提取匹配后的数据
matched_data <- match.data(m.out)
# 将 ${hotspot} 转换为数值型 (0 和 1)
matched_data$${hotspot} <- as.numeric(matched_data$${hotspot} == "event_1")
# 设置交叉验证控制参数（GBM有自己的cross validation参数）
distribution = "bernoulli"

# 训练GBM模型
model_gbm <- gbm(
  formula = ${hotspot} ~  ${independent_and},
  data = matched_data,
  distribution = distribution,
  n.trees = 100,
  interaction.depth = 3,
  shrinkage = 0.1,
  cv.folds = 5,
  verbose = FALSE
)
#筛选出重要变量并重新训练模型
# 获取变量重要性
var_imp_summary <- summary(model_gbm)

# 创建一个数据框用于绘图
imp_df <- data.frame(
  Variable = rownames(var_imp_summary),
  Importance = var_imp_summary$rel.inf
)

# 按照重要性降序排列
imp_df <- imp_df[order(-imp_df$Importance), ]
# 设定阈值以筛选出重要的变量
threshold <- 5 # 可根据实际情况调整

# 筛选出重要性大于等于阈值的变量
important_vars <- imp_df[imp_df$Importance >= threshold, ]

# 打印筛选后的变量列表
print(important_vars)
# 构建新的公式字符串
new_formula_string <- paste("${hotspot} ~", paste(important_vars$Variable, collapse = " + "))

# 将字符串转换为公式对象
new_formula <- as.formula(new_formula_string)

# 训练新的GBM模型
model_gbm <- gbm(
  formula = new_formula,
  data = matched_data,
  distribution = distribution,
  n.trees = 100,
  interaction.depth = 3,
  shrinkage = 0.1,
  cv.folds = 5,
  verbose = FALSE
)

# 查看新模型的变量重要性
summary(model_gbm)
# 预测和评估
predictions_gbm <- predict(model_gbm, newdata = mydata, n.trees = model_gbm$n.trees)
predicted_probs_gbm <- plogis(predictions_gbm)  # 转换为概率
predicted_classes_gbm <- factor(ifelse(predicted_probs_gbm >= 0.5, "event_1", "event_0"), levels = c("event_0", "event_1"))

# 添加预测结果到数据集
mydata_with_predictions_gbm <- mydata %>%
  mutate(
    ${hotspot}_probability = predicted_probs_gbm,
    ${hotspot}_predicted_class = predicted_classes_gbm,
    ${hotspot}_score = NA  # GBM没有评分卡
  )

# 计算 ${hotspot} 和 ${hotspot}_predicted_class 相同的数据数量
matching_rows <- mydata_with_predictions_gbm %>%
  filter(${hotspot} == ${hotspot}_predicted_class) %>%
  nrow()
# 打印结果
cat("Number of rows where ${hotspot} and ${hotspot}_predicted_class match:", matching_rows, "\n")
# 可选：计算准确率（匹配行数 / 总行数）
total_rows <- nrow(mydata_with_predictions_gbm)
accuracy <- matching_rows / total_rows
# 打印结果
cat("Number of rows where ${hotspot} and ${hotspot}_predicted_class match:", accuracy, "\n")
# 这是建模集的结果
write.csv(mydata_with_predictions_gbm, file = "mydata_with_predictions_gbm.csv", row.names = FALSE)

#### 特征重要性图 ####
# 提取特征重要性
importance <- summary(model_gbm)
importance_df <- data.frame(
  feature = rownames(importance),
  importance = importance$rel.inf
)

# 创建特征重要性条形图
importance_plot <- ggplot(importance_df, aes(x=reorder(feature, -importance), y=importance)) +
  geom_bar(stat="identity", fill="#2C7BB6", width=0.7) +  # 使用学术友好的颜色
  coord_flip() +  # 水平翻转坐标轴以改善可读性
  theme_minimal(base_family = "Arial") +
  theme(
    axis.text.y = element_text(size=10),
    axis.title.x = element_text(size=12, face="bold"),
    axis.title.y = element_blank(),  # 隐藏y轴标题
    plot.title = element_text(size=14, hjust=0.5, face="bold"),
    panel.grid.major.y = element_blank(),
    panel.grid.minor.y = element_blank()
  ) +
  labs(title="Feature Importance", x="Relative Influence")
Cairo::CairoTIFF(file="importanceFeatures_gbm.tiff", width=800, height=800,units="in",dpi=150)
print(importance_plot)
dev.off()
#### 学习曲线 ####
# 创建学习曲线
learning_curve <- gbm.perf(model_gbm, method = "cv")

learning_curve_plot <- ggplot(data.frame(trees=1:model_gbm$n.trees, error=model_gbm$cv.error), aes(x=trees, y=error)) +
  geom_line(color="#ABDDA4", size=1.2) +  # 使用学术友好的颜色
  theme_minimal(base_family = "Arial") +
  theme(
    axis.text = element_text(size=10),
    axis.title = element_text(size=12, face="bold"),
    plot.title = element_text(size=14, hjust=0.5, face="bold")
  ) +
  labs(title="Learning Curve", x="Number of Trees", y="Cross-Validation Error")
Cairo::CairoTIFF(file="learningCurve_gbm.tiff", width=800, height=800,units="in",dpi=150)
print(learning_curve_plot)
dev.off()
# 保存图表（可选）
#ggsave("learning_curve.png", plot = learning_curve_plot, dpi = 300, width = 8, height = 6)
#### ROC曲线与AUC ####
# 确保加载必要的包
if (!require(pROC)) install.packages("pROC")
library(pROC)

# 计算ROC曲线和AUC值
roc_curve <- roc(mydata_with_predictions_gbm$${hotspot} == "event_1", mydata_with_predictions_gbm$${hotspot}_probability)
auc_value <- auc(roc_curve)

# 创建ROC曲线图
roc_plot <- ggplot(data.frame(fpr=1 - roc_curve$specificities, tpr=roc_curve$sensitivities), aes(x=fpr, y=tpr)) +
  geom_line(color="#D7191C", size=1.2) +  # 使用学术友好的颜色
  geom_abline(intercept = 0, slope = 1, linetype="dashed", color="gray70") +
  annotate("text", x=0.6, y=0.2, label=paste("AUC =", round(auc_value, 3)), size=4, color="black") +
  theme_minimal(base_family = "Arial") +
  theme(
    axis.text = element_text(size=10),
    axis.title = element_text(size=12, face="bold"),
    plot.title = element_text(size=14, hjust=0.5, face="bold"),
    legend.position = "none"
  ) +
  labs(title="ROC Curve", x="False Positive Rate (FPR)", y="True Positive Rate (TPR)")
Cairo::CairoTIFF(file="roc_gbm.tiff", width=800, height=800,units="in",dpi=150)
print(roc_plot)
dev.off()
# 保存图表（可选）
#ggsave("roc_curve.png", plot = roc_plot, dpi = 300, width = 8, height = 6)
#### 校准图 ####
# 确保加载必要的包
if (!require(pROC)) install.packages("pROC")
library(pROC)
library(caret)
# 计算预测概率和实际结果
predicted_probs_gbm <- predict(model_gbm, newdata = matched_data, n.trees = model_gbm$n.trees, type = "response")
actual_outcomes <- factor(matched_data$${hotspot})  # 确保是因子类型

# 创建一个数据框来存储预测概率和实际结果
calib_data <- data.frame(
  predicted_probs = predicted_probs_gbm,
  actual_outcomes = actual_outcomes
)

# 定义分箱数量
num_bins <- 10
calib_data$bin <- cut(calib_data$predicted_probs, breaks = num_bins, include.lowest = TRUE)

# 计算每个分箱内的平均预测概率和实际事件频率
calib_summary <- calib_data %>%
  group_by(bin) %>%
  summarise(
    avg_predicted_prob = mean(predicted_probs),
    actual_frequency = mean(as.numeric(actual_outcomes) - 1),  # 转换为0/1
    .groups = 'drop'
  )

# 创建校准图
calibration_plot <- ggplot(calib_summary, aes(x=avg_predicted_prob, y=actual_frequency)) +
  geom_point(color="#D7191C", size=3) +  # 使用学术友好的颜色
  geom_line(color="#D7191C", size=1.2) +
  geom_abline(intercept = 0, slope = 1, linetype="dashed", color="gray70") +
  theme_minimal(base_family = "Arial") +
  theme(
    axis.text = element_text(size=10),
    axis.title = element_text(size=12, face="bold"),
    plot.title = element_text(size=14, hjust=0.5, face="bold")
  ) +
  labs(title="Calibration Plot", x="Predicted Probability", y="Actual Frequency")
Cairo::CairoTIFF(file="cal_gbm.tiff", width=800, height=800,units="in",dpi=150)
print(calibration_plot)
dev.off()

#### 混淆矩阵 ####
# 确保加载必要的包
# 确保预测结果为因子类型
predicted_classes_gbm <- factor(ifelse(mydata_with_predictions_gbm$${hotspot}_probability >= 0.5, "event_1", "event_0"), 
                                levels = c("event_0", "event_1"))

# 计算混淆矩阵
conf_matrix <- confusionMatrix(data = predicted_classes_gbm, 
                               reference = mydata_with_predictions_gbm$${hotspot})
sink("confusionMatrix_gbm.txt")
print(conf_matrix)
sink()



mydata1 <- read.csv("result.csv")
# 预测和评估
predictions_gbm <- predict(model_gbm, newdata = mydata1, n.trees = model_gbm$n.trees)
predicted_probs_gbm <- plogis(predictions_gbm)  # 转换为概率
predicted_classes_gbm <- factor(ifelse(predicted_probs_gbm >= 0.5, "event_1", "event_0"), levels = c("event_0", "event_1"))
# 添加预测结果到数据集
result_with_predictions_gbm <- mydata1 %>%
  mutate(
    ${hotspot}_probability = predicted_probs_gbm,
    ${hotspot}_predicted_class = predicted_classes_gbm,
    ${hotspot}_score = NA  # GBM没有评分卡
  )
# 这是结局的结果
write.csv(result_with_predictions_gbm, file = "result_with_predictions_gbm.csv", row.names = FALSE)