# 基因表达数据机器学习分析完整流程
# 目标：通过机器学习模型区分正常组织和斑块组织
# 1. 加载所需包
if (!require("caret")) install.packages("caret")
if (!require("randomForest")) install.packages("randomForest")
if (!require("e1071")) install.packages("e1071")
if (!require("pROC")) install.packages("pROC")
if (!require("ggplot2")) install.packages("ggplot2")
# 明确指定Windows二进制包和国内镜像
options(repos = "https://mirrors.tuna.tsinghua.edu.cn/CRAN/")
install.packages("caret", type = "binary", dependencies = TRUE)
# 安装缺失的lava包
install.packages("lava", repos = "https://mirrors.tuna.tsinghua.edu.cn/CRAN/")
# 安装完成后，重新加载caret
library(caret)
library(randomForest)
library(e1071)
library(pROC)
library(ggplot2)

# 2. 数据准备与预处理
###确保之前的分析结果已加载：expr_final(表达矩阵), de_result(差异基因), sample_info(样本信息)
cat("数据基本信息：\n")
cat("表达矩阵维度：", dim(expr_final), "\n")
cat("差异基因数量：", nrow(de_result), "\n")
cat("样本数量：", nrow(sample_info), "\n")

# 2.1 提取差异基因的表达矩阵（行：基因，列：样本）
diff_expr <- expr_final[rownames(de_result), ]
# 2.2 准备样本标签（0=正常组织，1=斑块组织）
labels <- ifelse(sample_info$Tissue_Type == "Normal", 0, 1)
labels <- factor(labels, levels = c(0, 1), labels = c("Normal", "Plaque"))
# 2.3 转置矩阵（机器学习要求行：样本，列：特征/基因）
ml_data <- as.data.frame(t(diff_expr))
ml_data$label <- labels  # 添加标签列
# 2.4 检查数据完整性
cat("是否存在缺失值：", any(is.na(ml_data)), "\n")
if (any(is.na(ml_data))) {
  # 填充缺失值（使用列均值）
  preProc <- preProcess(ml_data[, -ncol(ml_data)], method = "medianImpute")
  ml_data[, -ncol(ml_data)] <- predict(preProc, ml_data[, -ncol(ml_data)])
}

#3.划分训练集和测试集

#3.1设置随机种子，保证结果可重复
set.seed(123)  
trainIndex <- createDataPartition(ml_data$label, p = 0.7, list = FALSE)
train_data <- ml_data[trainIndex, ]  # 70%用于训练
test_data <- ml_data[-trainIndex, ]  # 30%用于测试
#3.2提取特征和标签
train_features <- train_data[, -ncol(train_data)]
train_labels <- train_data$label
test_features <- test_data[, -ncol(test_data)]
test_labels <- test_data$label
cat("训练集样本数：", nrow(train_data), "\n")
cat("测试集样本数：", nrow(test_data), "\n")

# 4. 特征选择

#4.1使用随机森林评估特征重要性
set.seed(123)
rf_imp <- randomForest(x = train_features, y = train_labels, ntree = 500, importance = TRUE)
#4.2提取并可视化特征重要性
imp_df <- data.frame(
  Gene = rownames(importance(rf_imp)),
  Importance = importance(rf_imp)[, "MeanDecreaseGini"]
)
imp_df <- imp_df[order(imp_df$Importance, decreasing = TRUE), ]
#4.3选择重要性前20%的基因（或至少5个，取较大值）
n_features <- max(5, ceiling(0.2 * nrow(imp_df)))
selected_genes <- imp_df$Gene[1:n_features]
cat("选择的特征基因数量：", n_features, "\n")
#4.4检查selected_genes是否存在
exists("selected_genes")
# 若存在，查看它包含的内容
if (exists("selected_genes")) {
  head(selected_genes)  # 显示前几个基因名
  length(selected_genes)  # 查看有多少个基因
} else {
  cat("selected_genes未定义！需要先运行特征筛选代码生成它\n")
}
#4.4.1查看train_features的列名（确保是基因名）
head(colnames(train_features))
#4.4.2检查selected_genes中哪些不在train_features的列名里
missing_genes <- setdiff(selected_genes, colnames(train_features))
if (length(missing_genes) > 0) {
  cat("以下基因在train_features中不存在：\n")
  print(missing_genes)
} else {
  cat("所有selected_genes都在train_features中存在，可以正常筛选\n")
}

#4.4.3查看selected_genes中NA的数量
sum(is.na(selected_genes))  # 显示有多少个NA
#移除NA值
selected_genes_clean <- selected_genes[!is.na(selected_genes)]
#再次检查是否还有缺失基因
missing_genes <- setdiff(selected_genes_clean, colnames(train_features))
if (length(missing_genes) > 0) {
  cat("仍有缺失基因：\n")
  print(missing_genes)
} else {
  cat("已成功移除NA，可正常筛选\n")
}
#4.4.4用清理后的基因列表筛选特征
#对测试集进行与训练集相同的特征筛选
test_selected <- test_features[, selected_genes_clean]  # 使用清理后的基因列表
svm_pred <- predict(svm_model, test_selected)
#后续可以评估模型性能（例如计算准确率）
accuracy <- mean(svm_pred == test_labels)
cat("SVM模型准确率：", accuracy, "\n")
#4.4.5可视化特征重要性
png("特征重要性.png", width = 1000, height = 800, res = 300)
ggplot(head(imp_df, 10), aes(x = reorder(Gene, Importance), y = Importance)) +
  geom_bar(stat = "identity", fill = "#4285F4") +
  coord_flip() +
  labs(title = "Top 10 重要特征基因", x = "基因", y = "重要性分数") +
  theme_minimal() +
  theme(plot.title = element_text(hjust = 0.5))
dev.off()

# 5. 构建多种机器学习模型并比较

# 5.1 支持向量机(SVM)
set.seed(123)
svm_model <- svm(x = train_selected, y = train_labels, kernel = "radial", probability = TRUE)
svm_pred <- predict(svm_model, test_selected)
svm_prob <- attr(predict(svm_model, test_selected, probability = TRUE), "probabilities")[, "Plaque"]
# 5.2 随机森林(Random Forest)
set.seed(123)
rf_model <- randomForest(x = train_selected, y = train_labels, ntree = 500)
rf_pred <- predict(rf_model, test_selected)
rf_prob <- predict(rf_model, test_selected, type = "prob")[, "Plaque"]
# 5.3 逻辑回归(Logistic Regression)
set.seed(123)
lr_model <- glm(label ~ ., data = cbind(train_selected, label = train_labels), family = "binomial")
lr_pred <- predict(lr_model, newdata = test_selected, type = "response")
lr_pred <- factor(ifelse(lr_pred > 0.5, "Plaque", "Normal"), levels = c("Normal", "Plaque"))

# 6. 模型评估
#6.1定义评估函数
evaluate_model <- function(actual, predicted, prob, model_name) {
  cm <- confusionMatrix(predicted, actual)
  acc <- cm$overall["Accuracy"]
  precision <- cm$byClass["Precision"]
  recall <- cm$byClass["Recall"]
  f1 <- cm$byClass["F1"]
  roc_obj <- roc(as.numeric(actual), prob)
  auc <- auc(roc_obj)
  cat("\n", model_name, "模型评估结果：\n")
  cat("准确率(Accuracy)：", round(acc, 4), "\n")
  cat("精确率(Precision)：", round(precision, 4), "\n")
  cat("召回率(Recall)：", round(recall, 4), "\n")
  cat("F1分数：", round(f1, 4), "\n")
  cat("AUC：", round(auc, 4), "\n")
  
  return(list(cm = cm, roc = roc_obj, auc = auc))
}

#6.2评估各个模型
svm_eval <- evaluate_model(test_labels, svm_pred, svm_prob, "支持向量机(SVM)")
rf_eval <- evaluate_model(test_labels, rf_pred, rf_prob, "随机森林(RF)")
lr_eval <- evaluate_model(test_labels, lr_pred, as.numeric(lr_pred), "逻辑回归(LR)")

# 7. 模型可视化比较
# 7.1 ROC曲线比较
png("ROC曲线比较.png", width = 1000, height = 800, res = 300)
plot(svm_eval$roc, col = "#4285F4", lwd = 2, main = "不同模型的ROC曲线比较")
lines(rf_eval$roc, col = "#EA4335", lwd = 2)
lines(lr_eval$roc, col = "#34A853", lwd = 2)
abline(a = 0, b = 1, lty = 2, col = "gray")
legend("bottomright", 
       legend = c(paste0("SVM (AUC=", round(svm_eval$auc, 3), ")"),
                  paste0("随机森林 (AUC=", round(rf_eval$auc, 3), ")"),
                  paste0("逻辑回归 (AUC=", round(lr_eval$auc, 3), ")")),
       col = c("#4285F4", "#EA4335", "#34A853"), lwd = 2)
dev.off()

# 7.2 混淆矩阵可视化（以最佳模型为例）
#7.2.1选择AUC最高的模型
best_model <- ifelse(svm_eval$auc >= rf_eval$auc & svm_eval$auc >= lr_eval$auc, 
                     "SVM", ifelse(rf_eval$auc >= lr_eval$auc, "RF", "LR"))
cat("\n最佳模型：", best_model, "\n")

#7.2.2可视化最佳模型的混淆矩阵
if (best_model == "SVM") {
  cm <- svm_eval$cm
} else if (best_model == "RF") {
  cm <- rf_eval$cm
} else {
  cm <- lr_eval$cm
}

cm_df <- as.data.frame(cm$table)
png("最佳模型混淆矩阵.png", width = 1000, height = 800, res = 300)
ggplot(cm_df, aes(x = Reference, y = Prediction, fill = Freq)) +
  geom_tile(color = "white") +
  geom_text(aes(label = Freq), size = 12) +
  scale_fill_gradient(low = "#F1F9FF", high = "#4285F4") +
  labs(title = paste("最佳模型(", best_model, ")混淆矩阵"), x = "实际标签", y = "预测标签") +
  theme_minimal() +
  theme(plot.title = element_text(hjust = 0.5, size = 16))
dev.off()


# 8. 保存结果
#8.1保存模型
save(svm_model, rf_model, lr_model, file = "机器学习模型.RData")

#8.2保存评估指标
eval_results <- data.frame(
  模型 = c("SVM", "随机森林", "逻辑回归"),
  准确率 = c(svm_eval$cm$overall["Accuracy"], 
          rf_eval$cm$overall["Accuracy"], 
          lr_eval$cm$overall["Accuracy"]),
  精确率 = c(svm_eval$cm$byClass["Precision"], 
          rf_eval$cm$byClass["Precision"], 
          lr_eval$cm$byClass["Precision"]),
  召回率 = c(svm_eval$cm$byClass["Recall"], 
          rf_eval$cm$byClass["Recall"], 
          lr_eval$cm$byClass["Recall"]),
  F1分数 = c(svm_eval$cm$byClass["F1"], 
           rf_eval$cm$byClass["F1"], 
           lr_eval$cm$byClass["F1"]),
  AUC = c(svm_eval$auc, rf_eval$auc, lr_eval$auc)
)
write.csv(eval_results, "模型评估结果.csv", row.names = FALSE)

cat("\n机器学习分析完成！结果已保存为图片和CSV文件。\n")
