library(dplyr)
library(caret)
library(rpart)
library(rpart.plot)
library(e1071)
library(MASS)
library(ggplot2)

set.seed(123)

# 读取数据
data <- read.csv("data/obesity_level.csv", stringsAsFactors = FALSE)

# 分离数据
cont_columns <- c("Height","Weight","CH2O","FAF","TUE")
disc_columns <- c("family_history_with_overweight","FAVC","FCVC","NCP","SMOKE","SCC")
text_columns <- c("Age","Gender","CAEC","CALC","MTRANS","level")
cont_data <- data[, cont_columns]
disc_data <- data[, disc_columns]
text_data <- data[, text_columns]

quantiles_all <- lapply(cont_data, function(column) {
  if (is.numeric(column)) {  # 确保是数值型列
    quantiles <- quantile(column, probs = c(0, 0.25, 0.5, 0.75, 1), na.rm = TRUE)
    return(quantiles)
  } else {
    return(NULL)
  }
})

for (col_name in names(quantiles_all)) {
  cat("\nQuantiles for", col_name, ":\n")
  print(quantiles_all[[col_name]])
}

# 绘制每一列的箱线图
lapply(names(cont_data), function(column_name) {
  if (is.numeric(cont_data[[column_name]])) {
    boxplot(
      cont_data[[column_name]], 
      main = paste("Boxplot of", column_name), 
      ylab = column_name, 
      col = "lightblue", 
      border = "darkblue"
    )
  }
})

# 正态归一化连续性数据
normalize <- function(x) {
  return ((x - mean(x, na.rm = TRUE)) / sd(x, na.rm = TRUE))
}
cont_data_normalized <- as.data.frame(lapply(cont_data, normalize))

quantiles_all <- lapply(cont_data_normalized, function(column) {
  if (is.numeric(column)) {  # 确保是数值型列
    quantiles <- quantile(column, probs = c(0, 0.25, 0.5, 0.75, 1), na.rm = TRUE)
    return(quantiles)
  } else {
    return(NULL)
  }
})

# 打印分位点
for (col_name in names(quantiles_all)) {
  cat("\nQuantiles for", col_name, ":\n")
  print(quantiles_all[[col_name]])
}

# 合并数据
processed_data <- cbind(text_data, cont_data_normalized, disc_data)

# 处理缺失值
processed_data <- na.omit(processed_data)  # 删除含有NA的行

# 转换分类变量为因子
columns_to_factor <- c("Gender", "level", "family_history_with_overweight", "FAVC", "SMOKE", "CALC", "MTRANS")
processed_data[columns_to_factor] <- lapply(processed_data[columns_to_factor], as.factor)

# 主成分分析
pca_result <- prcomp(cont_data_normalized, scale. = TRUE)
summary(pca_result)

# 提取PCA结果中的标准差
sdevs <- pca_result$sdev

# 计算每个主成分解释的方差比例（贡献率）
contributions <- sdevs^2 / sum(sdevs^2)

# 创建一个数据框来存储主成分和它们的贡献率
pca_contributions <- data.frame(
  Principal_Component = paste0("PC", 1:length(contributions)),
  Contribution_Rate = round(contributions, 4) * 100
)
print(pca_contributions)

# 提取rotation矩阵
loadings <- pca_result$rotation
print(loadings)

# 提取前两个主成分的得分
pca_vision <- pca_result$x[, 1:2]

# 创建一个数据框来存储主成分得分和原始数据中的分类信息
pca_vision_df <- data.frame(pca_vision, level = processed_data$level)

# 绘制前两个主成分的散点图
ggplot(pca_vision_df, aes(x = PC1, y = PC2, color = level)) +
  geom_point(alpha = 0.6) +
  labs(title = "PCA - First Two Principal Components", x = "PC1", y = "PC2") +
  theme_minimal() +
  scale_color_manual(values = c("lightblue", "orange", "green", "purple", "red")) # 调整颜色

# 提取前4个主成分得分
pca_scores <- pca_result$x[, 1:4]
colnames(pca_scores) <- paste0("PC", 1:4)
processed_data <- cbind(text_data, pca_scores, disc_data)

# 创建训练集与测试集
tree_data <- processed_data
train_indices <- createDataPartition(processed_data$level, p = 0.8, list = FALSE)
train_data <- processed_data[train_indices, ]
test_data <- processed_data[-train_indices, ]
train_data$level <- as.factor(train_data$level)
test_data$level <- as.factor(test_data$level)

# 查看训练集和测试集的大小
cat("Training set size:", nrow(train_data),"\n")
cat("Testing set size:", nrow(test_data))

# 生成决策树
tree_model <- rpart(level~., data = train_data, method = "class",
                    control = rpart.control(minsplit = 20, # 最小分支节点
                                            #minbucket = 2, # 分支后最小节点
                                            maxdepth = 30, # 树的深度
                                            cp = 0.01, # 复杂度
                                            xval = 10 # 交叉验证，默认10折
                                            )
                    )

rpart.plot(tree_model)
printcp(tree_model)

# 使用模型进行预测
predictions <- predict(tree_model, newdata = test_data, type = "class")

# 确保预测结果和真实标签都是因子类型
predictions <- factor(predictions)
true_labels <- factor(test_data$level)

# 计算精准率
conf_matrix <- confusionMatrix(predictions, true_labels)
print(conf_matrix$table)
precision_caret <- conf_matrix$byClass["Precision"]

# 提取并打印精准率、召回率和 F1 分数
if (!is.null(conf_matrix$byClass)) {
  precision <- conf_matrix$byClass["Precision"]
  recall <- conf_matrix$byClass["Recall"]
  f1_score <- conf_matrix$byClass["F1"]

  cat("Precision:", ifelse(is.na(precision), "N/A", precision), "\n")
  cat("Recall:", ifelse(is.na(recall), "N/A", recall), "\n")
  cat("F1 Score:", ifelse(is.na(f1_score), "N/A", f1_score), "\n")
} else {
  cat("Confusion matrix does not contain byClass information.\n")
}

# 查看整体性能指标
cat("Overall Accuracy:", conf_matrix$overall["Accuracy"], "\n")

# 计算宏平均和微平均指标（因为评价指标出现了NA）
macro_precision <- mean(conf_matrix$byClass["Precision"], na.rm = TRUE)
macro_recall <- mean(conf_matrix$byClass["Recall"], na.rm = TRUE)
macro_f1 <- mean(conf_matrix$byClass["F1"], na.rm = TRUE)

micro_tp <- sum(diag(conf_matrix$table))
micro_fp <- sum(rowSums(conf_matrix$table) - diag(conf_matrix$table))
micro_fn <- sum(colSums(conf_matrix$table) - diag(conf_matrix$table))

micro_precision <- micro_tp / (micro_tp + micro_fp)
micro_recall <- micro_tp / (micro_tp + micro_fn)
micro_f1 <- 2 * (micro_precision * micro_recall) / (micro_precision + micro_recall)

cat("Macro Precision:", macro_precision, "\n")
cat("Macro Recall:", macro_recall, "\n")
cat("Macro F1 Score:", macro_f1, "\n")

cat("Micro Precision:", micro_precision, "\n")
cat("Micro Recall:", micro_recall, "\n")
cat("Micro F1 Score:", micro_f1, "\n")

# 检查类别分布
cat("Training set class distribution:\n")
print(table(train_data$level))

cat("Testing set class distribution:\n")
print(table(test_data$level))

# 训练LDA模型
lda_model <- lda(level ~ ., data = train_data)

# 打印模型摘要（可选）
print(lda_model)

# 在测试集上进行预测
lda_predictions <- predict(lda_model, newdata = test_data)$class

# 确保预测结果和真实标签都是因子类型，并且具有相同的水平
all_levels <- levels(factor(c(train_data$level, test_data$level)))
lda_predictions <- factor(lda_predictions, levels = all_levels)
true_labels <- factor(test_data$level, levels = all_levels)

# 检查是否有NA值
if (any(is.na(lda_predictions)) || any(is.na(true_labels))) {
  stop("There are NA values in predictions or true labels.")
}

# 生成混淆矩阵
tryCatch({
  lda_conf_matrix <- confusionMatrix(lda_predictions, true_labels, mode = "everything")
  print(lda_conf_matrix$table)
  print(lda_conf_matrix$byClass)
  cat("Overall Accuracy:", lda_conf_matrix$overall["Accuracy"], "\n")
}, error = function(e) {
  cat("Error using confusionMatrix:", e$message, "\n")
  
  # 如果失败，使用基础方法生成混淆矩阵并手动计算评价指标
  lda_conf_matrix <- table(Predicted = lda_predictions, Actual = true_labels)
  print(lda_conf_matrix)
  
  precision_per_class <- numeric(length(levels(true_labels)))
  recall_per_class <- numeric(length(levels(true_labels)))
  f1_score_per_class <- numeric(length(levels(true_labels)))
  
  for (i in seq_along(levels(true_labels))) {
    TP <- lda_conf_matrix[i, i]
    FP <- sum(lda_conf_matrix[i, ]) - TP
    FN <- sum(lda_conf_matrix[, i]) - TP
    
    precision <- ifelse(TP + FP == 0, NA, TP / (TP + FP))
    recall <- ifelse(TP + FN == 0, NA, TP / (TP + FN))
    f1_score <- ifelse(is.na(precision) | is.na(recall), NA, 2 * (precision * recall) / (precision + recall))
    
    precision_per_class[i] <- precision
    recall_per_class[i] <- recall
    f1_score_per_class[i] <- f1_score
  }
  
  cat("Precision per class:\n")
  print(precision_per_class)
  cat("Recall per class:\n")
  print(recall_per_class)
  cat("F1 Score per class:\n")
  print(f1_score_per_class)
  
  # 计算宏平均和微平均指标
  macro_precision <- mean(precision_per_class, na.rm = TRUE)
  macro_recall <- mean(recall_per_class, na.rm = TRUE)
  macro_f1 <- mean(f1_score_per_class, na.rm = TRUE)
  
  micro_tp <- sum(diag(lda_conf_matrix))
  micro_fp <- sum(rowSums(lda_conf_matrix) - diag(lda_conf_matrix))
  micro_fn <- sum(colSums(lda_conf_matrix) - diag(lda_conf_matrix))
  
  micro_precision <- micro_tp / (micro_tp + micro_fp)
  micro_recall <- micro_tp / (micro_tp + micro_fn)
  micro_f1 <- 2 * (micro_precision * micro_recall) / (micro_precision + micro_recall)
  
  cat("Macro Precision:", macro_precision, "\n")
  cat("Macro Recall:", macro_recall, "\n")
  cat("Macro F1 Score:", macro_f1, "\n")
  
  cat("Micro Precision:", micro_precision, "\n")
  cat("Micro Recall:", micro_recall, "\n")
  cat("Micro F1 Score:", micro_f1, "\n")
  
  # 计算整体准确率
  overall_accuracy <- sum(diag(lda_conf_matrix)) / sum(lda_conf_matrix)
  cat("Overall Accuracy:", overall_accuracy, "\n")
})

# 训练SVM模型，默认使用径向基函数 (RBF) 核
svm_model <- svm(level ~ ., data = train_data, kernel = "radial", cost = 1, scale = TRUE)

# 打印模型摘要（可选）
print(svm_model)

# 在测试集上进行预测
svm_predictions <- predict(svm_model, newdata = test_data)

# 确保预测结果和真实标签都是因子类型，并且具有相同的水平
all_levels <- levels(factor(c(train_data$level, test_data$level)))
svm_predictions <- factor(svm_predictions, levels = all_levels)
true_labels <- factor(test_data$level, levels = all_levels)
# 检查是否有NA值
if (any(is.na(svm_predictions)) || any(is.na(true_labels))) {
  stop("There are NA values in predictions or true labels.")
}
# 生成混淆矩阵
tryCatch({
  svm_conf_matrix <- confusionMatrix(svm_predictions, true_labels, mode = "everything")
  print(svm_conf_matrix$table)
  print(svm_conf_matrix$byClass)
  cat("Overall Accuracy:", svm_conf_matrix$overall["Accuracy"], "\n")
}, error = function(e) {
  cat("Error using confusionMatrix:", e$message, "\n")
  
  # 如果失败，使用基础方法生成混淆矩阵并手动计算评价指标
  svm_conf_matrix <- table(Predicted = svm_predictions, Actual = true_labels)
  print(svm_conf_matrix)
  
  precision_per_class <- numeric(length(levels(true_labels)))
  recall_per_class <- numeric(length(levels(true_labels)))
  f1_score_per_class <- numeric(length(levels(true_labels)))
  
  for (i in seq_along(levels(true_labels))) {
    TP <- svm_conf_matrix[i, i]
    FP <- sum(svm_conf_matrix[i, ]) - TP
    FN <- sum(svm_conf_matrix[, i]) - TP
    
    precision <- ifelse(TP + FP == 0, NA, TP / (TP + FP))
    recall <- ifelse(TP + FN == 0, NA, TP / (TP + FN))
    f1_score <- ifelse(is.na(precision) | is.na(recall), NA, 2 * (precision * recall) / (precision + recall))
    
    precision_per_class[i] <- precision
    recall_per_class[i] <- recall
    f1_score_per_class[i] <- f1_score
  }
  
  cat("Precision per class:\n")
  print(precision_per_class)
  cat("Recall per class:\n")
  print(recall_per_class)
  cat("F1 Score per class:\n")
  print(f1_score_per_class)
  
  # 计算宏平均和微平均指标
  macro_precision <- mean(precision_per_class, na.rm = TRUE)
  macro_recall <- mean(recall_per_class, na.rm = TRUE)
  macro_f1 <- mean(f1_score_per_class, na.rm = TRUE)
  
  micro_tp <- sum(diag(svm_conf_matrix))
  micro_fp <- sum(rowSums(svm_conf_matrix) - diag(svm_conf_matrix))
  micro_fn <- sum(colSums(svm_conf_matrix) - diag(svm_conf_matrix))
  
  micro_precision <- micro_tp / (micro_tp + micro_fp)
  micro_recall <- micro_tp / (micro_tp + micro_fn)
  micro_f1 <- 2 * (micro_precision * micro_recall) / (micro_precision + micro_recall)
  
  cat("Macro Precision:", macro_precision, "\n")
  cat("Macro Recall:", macro_recall, "\n")
  cat("Macro F1 Score:", macro_f1, "\n")
  
  cat("Micro Precision:", micro_precision, "\n")
  cat("Micro Recall:", micro_recall, "\n")
  cat("Micro F1 Score:", micro_f1, "\n")
  
  # 计算整体准确率
  svm_conf_matrix_matrix <- svm_conf_matrix$table
  overall_accuracy <- sum(diag(svm_conf_matrix_matrix)) / sum(svm_conf_matrix_matrix)
  cat("Overall Accuracy:", overall_accuracy, "\n")
})

