# 加载必要的包
chooseCRANmirror()
15
if (!require(caret)) install.packages("caret")
if (!require(pROC)) install.packages("pROC")
if (!require(MatchIt)) install.packages("MatchIt")

library(caret)
library(pROC)
library(MatchIt)

# 设置工作目录并加载数据
setwd("D:/rCode/关键词热点")
mydata <- read.csv("bibliometric_nuomotu.csv")

dim(mydata)
str(mydata)
colnames(mydata)

# 确保 group_sum 是因子，并且值为 "event_0" 和 "event_1"
# 
mydata$group_sum <- factor(mydata$group_sum, levels = c("0", "1"), labels = c("event_0", "event_1"))

mydata$gr


# 使用 matchit 进行 1:3 匹配
m.out <- matchit(group_sum ~ auth_sum + lan_sum + web_site_sum + jour_sum + pt_sum + country_sum +
                   fund_sum + unit_sum + sum + total + impact_factor + fen_qu, 
                 data = mydata, 
                 method = "nearest", 
                 ratio = 3)

# 提取匹配后的数据
matched_data <- match.data(m.out)

# 查看匹配结果
summary(m.out)

# 分离出匹配后的事件组和对照组
event_group <- subset(matched_data, group_sum == "event_1")
control_group <- subset(matched_data, group_sum == "event_0")

# 打印匹配后各组的数量
cat("Event Group:", nrow(event_group), "\n")
cat("Control Group:", nrow(control_group), "\n")

# 设置交叉验证控制参数
control <- trainControl(
  method = "repeatedcv",
  number = 5,
  repeats = 5,
  classProbs = TRUE,
  summaryFunction = twoClassSummary
)

# 训练逻辑回归模型
model_cv <- train(
  group_sum ~ auth_sum + lan_sum + web_site_sum + jour_sum + pt_sum + country_sum +
    fund_sum + unit_sum + sum + total + impact_factor + fen_qu,
  data = matched_data,
  method = "glm",
  family = "binomial",
  trControl = control,
  metric = "ROC"
)

# 打印最佳模型的结果
print(model_cv)

# 创建评分卡函数
create_score_card <- function(data, coefficients, base_score = 300, PDO = 20) {
  scaling_factor <- -PDO / log(2)
  
  # Initialize an empty data frame to store scores for each variable
  score_card <- as.data.frame(matrix(NA, nrow = nrow(data), ncol = length(coefficients)))
  
  # Loop over each coefficient and calculate the score for each row
  for (i in seq_along(coefficients)) {
    var_name <- names(coefficients)[i]
    if (var_name == "(Intercept)") {
      score_card[, i] <- base_score + coefficients[i] * scaling_factor
    } else {
      if (var_name %in% colnames(data) && is.numeric(data[[var_name]])) {
        score_card[, i] <- data[[var_name]] * coefficients[i] * scaling_factor
      } else {
        score_card[, i] <- 0  # If column does not exist or is not numeric, set score to 0
      }
    }
  }
  
  colnames(score_card) <- names(coefficients)
  score_card$total_score <- rowSums(score_card, na.rm = TRUE)
  
  return(score_card)
}

# 提取逻辑回归模型的系数
coefficients <- coef(model_cv$finalModel)

# 应用评分卡到匹配的数据集
score_card <- create_score_card(matched_data, coefficients)

# 将评分与实际结果对比
scored_data <- cbind(matched_data, total_score = score_card$total_score)

# 预测和评估
predictions <- predict(model_cv, newdata = matched_data, type = "prob")

# 获取预测概率列名（这里假设 event_1 是正类）
predicted_probs <- predictions[, "event_1"]

# 根据预测概率生成预测类别，确保因子水平一致
predicted_classes <- factor(ifelse(predicted_probs >= 0.5, "event_1", "event_0"), levels = c("event_0", "event_1"))

# 计算 ROC 曲线和 AUC
roc_obj <- roc(as.numeric(matched_data$group_sum) == 2, predicted_probs) # 将因子水平转换为二进制 (TRUE/FALSE)
plot(roc_obj, main="ROC Curve for Matched Data")
auc_value <- auc(roc_obj)
cat("AUC:", auc_value, "\n")

# 混淆矩阵和其他评估指标
conf_matrix <- confusionMatrix(predicted_classes, matched_data$group_sum)
print(conf_matrix)

# 计算额外的评估指标
precision <- conf_matrix$byClass["Precision"]
recall <- conf_matrix$byClass["Recall"]
f1_score <- 2 * (precision * recall) / (precision + recall)
cat("Precision:", precision, "\n")
cat("Recall:", recall, "\n")
cat("F1 Score:", f1_score, "\n")

# 定义函数以评判一条数据的分数和诊断率
evaluate_single_record <- function(record, model, coefficients, base_score = 300, PDO = 20) {
  # 预测概率
  prob <- predict(model, newdata = record, type = "prob")[, "event_1"]
  
  # 创建评分卡
  score <- create_score_card(record, coefficients, base_score, PDO)
  
  # 返回结果
  result <- list(
    probability = prob,
    score = score$total_score,
    predicted_class = ifelse(prob >= 0.5, "event_1", "event_0")
  )
  return(result)
}

# 示例：评判一条数据的分数和诊断率
single_record <- matched_data[1, ] # 选择第一条记录作为示例
evaluation_result <- evaluate_single_record(single_record, model_cv, coefficients)
colnames(mydata)
# 打印评判结果
cat("Probability of being in the event group:", evaluation_result$probability, "\n")
cat("Score:", evaluation_result$score, "\n")
cat("Predicted Class:", evaluation_result$predicted_class, "\n")



library(dplyr)

# 定义一个向量化版本的函数来为整个数据集添加预测结果和评分
add_predictions_and_scores_vectorized <- function(data, model, coefficients, base_score = 300, PDO = 20) {
  # 使用 dplyr 的 rowwise 和 mutate 函数来处理每一行
  data <- data %>%
    rowwise() %>%
    mutate(
      group_sum_probability = predict(model, newdata = cur_data(), type = "prob")[, "event_1"],
      score_card = list(create_score_card(cur_data(), coefficients, base_score, PDO)),
      group_sum_score = score_card$total_score,
      group_sum_predicted_class = ifelse(group_sum_probability >= 0.5, "event_1", "event_0")
    ) %>%
    ungroup() %>%
    select(-score_card)  # 移除不必要的列表列
  
  return(data)
}

# 应用函数到 mydata
mydata_with_predictions <- add_predictions_and_scores_vectorized(mydata, model_cv, coefficients)
colnames(mydata_with_predictions)
# 打印前几行以检查结果
head(mydata_with_predictions)
# 计算 group_sum 和 group_sum_predicted_class 相同的数据数量
matching_rows <- mydata_with_predictions %>%
  filter(group_sum == group_sum_predicted_class) %>%
  nrow()

# 打印结果
cat("Number of rows where group_sum and group_sum_predicted_class match:", matching_rows, "\n")

# 可选：计算准确率（匹配行数 / 总行数）
total_rows <- nrow(mydata_with_predictions)
accuracy <- matching_rows / total_rows
# 如果需要，可以将更新后的数据保存到CSV文件
write.csv(mydata_with_predictions, file = "mydata_with_predictions.csv", row.names = FALSE)
