# 行为数据线性混合效应模型（包含词频）
# Behavioral Data Linear Mixed Effects Model with Word Frequency

# Clean workspace
rm(list = ls())  # Remove all objects
graphics.off()    # Close all graphics devices
cat("\014")      # Clear console

# Install required packages if not already installed
if (!require(tidyverse)) install.packages("tidyverse")
if (!require(readxl)) install.packages("readxl")
if (!require(writexl)) install.packages("writexl")
if (!require(moments)) install.packages("moments")
if (!require(nortest)) install.packages("nortest")
if (!require(lme4)) install.packages("lme4")
if (!require(car)) install.packages("car")
if (!require(lattice)) install.packages("lattice")
if (!require(ggplot2)) install.packages("ggplot2")
if (!require(DHARMa)) install.packages("DHARMa")
if (!require(emmeans)) install.packages("emmeans")

# Load required libraries
library(tidyverse)  # for data manipulation
library(readxl)     # for reading Excel files
library(writexl)    # for writing Excel files
library(moments)    # for skewness and kurtosis calculations
library(nortest)    # for Anderson-Darling normality test
library(lme4)       # for mixed effects models
library(car)        # for additional diagnostics
library(lattice)    # for plotting
library(ggplot2)    # for visualization
library(DHARMa)     # for residual diagnostics
library(emmeans)    # for simple effects analysis

# Function to load behavior data
load_behavior_data <- function(file_path, sheet_number = 3) {  # Default to load the 3rd sheet
  # Determine file extension
  file_ext <- tools::file_ext(file_path)
  
  # Load data based on file type
  data <- if (file_ext == "csv") {
    read.csv(file_path)
  } else if (file_ext %in% c("xlsx", "xls")) {
    read_excel(file_path, sheet = sheet_number)  # Specify sheet
  } else {
    stop("Unsupported file format. Please provide a CSV or Excel file.")
  }
  
  # Convert to tibble for better handling
  data <- as_tibble(data)
  
  # 处理变量名：将空格替换为下划线
  names(data) <- gsub(" ", "_", names(data))
  
  # Ensure proper column types
  data <- data %>%
    mutate(
      Subject = as.factor(Subject),
      Semantic_Relatedness = as.factor(Semantic_Relatedness),
      Priming_Word_Type = as.factor(Priming_Word_Type),
      Group = as.factor(Group)
    )
  
  return(data)
}

# Function to load word frequency data
load_word_frequency <- function(file_path, sheet_num) {
  # Load word frequency data from specified sheet
  freq_data <- read_excel(file_path, sheet = sheet_num)
  
  # Convert to tibble and clean column names
  freq_data <- as_tibble(freq_data)
  names(freq_data) <- gsub(" ", "_", names(freq_data))
  
  # Convert column names to lowercase for consistency
  names(freq_data) <- tolower(names(freq_data))
  
  # Print debugging information
  print(paste("\nLoading frequency data from sheet", sheet_num))
  print("Column names:")
  print(names(freq_data))
  print("First few rows:")
  print(head(freq_data))
  print("Data structure:")
  print(str(freq_data))
  
  # Clean and convert frequency data
  freq_data <- freq_data %>%
    mutate(
      # Remove any non-numeric characters and convert to numeric
      frequency = as.numeric(gsub("[^0-9.]", "", as.character(frequency))),
      # Convert word to lowercase for consistent matching
      word = tolower(word)
    )
  
  # Print summary after cleaning
  print("\nAfter cleaning:")
  print("Summary of frequency values:")
  print(summary(freq_data$frequency))
  print("Number of NA values:")
  print(sum(is.na(freq_data$frequency)))
  
  return(freq_data)
}

# Set input file paths
behavior_file <- "source_data/Ch_Ja_Lex_Behavior_2Group_29Sub_for R.xlsx"
freq_file <- "source_data/日语词频计算2025年4月5日.xlsx"

# Create result folder name from input file name
result_folder <- tools::file_path_sans_ext(basename(behavior_file))
result_folder <- gsub("_for R", "", result_folder)  # Remove "_for R" suffix
result_path <- file.path("results", result_folder, "mixed_effects_with_frequency")

# Create result folder
if (!dir.exists(result_path)) {
  dir.create(result_path, recursive = TRUE)
  print(paste("Created result folder:", result_path))
}

# Load behavior data
print("Loading behavior data...")
behavior_data <- load_behavior_data(behavior_file, sheet_number = 3)

# Create log_RT variable
print("Creating log-transformed RT...")
behavior_data <- behavior_data %>%
  mutate(log_RT = log(RT))

# Load word frequency data from both sheets
print("Loading word frequency data...")
freq_data_sheet4 <- load_word_frequency(freq_file, sheet_num = 4)
freq_data_sheet5 <- load_word_frequency(freq_file, sheet_num = 5)

# Print frequency data summaries before merge
message("\nChinese frequency data summary before merge:")
message("Number of rows: ", dim(freq_data_sheet4)[1])
print(summary(freq_data_sheet4))

message("\nJapanese frequency data summary before merge:")
message("Number of rows: ", dim(freq_data_sheet5)[1])
print(summary(freq_data_sheet5))

# Print detailed data inspection
message("\nDetailed data inspection:")
message("Chinese frequency data structure:")
str(freq_data_sheet4)
message("\nJapanese frequency data structure:")
str(freq_data_sheet5)

# Print sample rows
message("\nSample rows from Chinese frequency data:")
print(head(freq_data_sheet4))
message("\nSample rows from Japanese frequency data:")
print(head(freq_data_sheet5))

# Print column names
message("\nChinese frequency data columns:")
print(names(freq_data_sheet4))
message("\nJapanese frequency data columns:")
print(names(freq_data_sheet5))

# Print some sample prime words from behavior data
message("\nSample of Prime words from behavior data:")
print(head(behavior_data$Prime_word))
message("Number of unique Prime words: ", length(unique(behavior_data$Prime_word)))

# Merge word frequency data with behavior data
message("\nMerging word frequency data...")
behavior_data <- behavior_data %>%
  mutate(Prime_word_lower = tolower(Prime_word)) %>%  # Create lowercase version of Prime_word
  # Join Chinese frequency data
  left_join(freq_data_sheet4, by = c("Prime_word_lower" = "word")) %>%
  rename(Chinese_Freq = frequency) %>%
  # Join Japanese frequency data
  left_join(freq_data_sheet5, by = c("Prime_word_lower" = "word")) %>%
  rename(Japanese_Freq = frequency) %>%
  # Remove temporary column
  select(-Prime_word_lower) %>%
  # Handle missing values (set to 0) and adjust Japanese frequency
  mutate(
    Chinese_Freq = ifelse(is.na(Chinese_Freq), 0, Chinese_Freq),
    Japanese_Freq = ifelse(is.na(Japanese_Freq), 0, Japanese_Freq),
    Japanese_Freq = (Japanese_Freq / 11) * 12  # Adjust Japanese frequency
  )

# Print detailed merge results
message("\nAfter merging - detailed inspection:")
message("Number of rows in merged data: ", nrow(behavior_data))
message("Column names in merged data:")
print(names(behavior_data))

message("\nFrequency data summary after merge and Japanese frequency adjustment:")
message("Chinese frequency summary:")
print(summary(behavior_data$Chinese_Freq))
message("Number of zeros in Chinese frequency: ", sum(behavior_data$Chinese_Freq == 0))
message("Number of NAs in Chinese frequency: ", sum(is.na(behavior_data$Chinese_Freq)))

message("\nJapanese frequency summary (after adjustment):")
print(summary(behavior_data$Japanese_Freq))
message("Number of zeros in Japanese frequency: ", sum(behavior_data$Japanese_Freq == 0))
message("Number of NAs in Japanese frequency: ", sum(is.na(behavior_data$Japanese_Freq)))

# Print sample of merged data
message("\nSample of merged data (first few rows):")
print(head(select(behavior_data, Prime_word, Chinese_Freq, Japanese_Freq)))

# Calculate z-scores for both frequency measures
print("\nCalculating z-scores for frequency data...")
behavior_data <- behavior_data %>%
  mutate(
    Chinese_Freq_z = scale(Chinese_Freq, center = TRUE, scale = TRUE)[,1],
    Japanese_Freq_z = scale(Japanese_Freq, center = TRUE, scale = TRUE)[,1]
  )

# Print summary of z-scores
print("\nZ-score summaries:")
print("Chinese frequency z-scores:")
print(summary(behavior_data$Chinese_Freq_z))
print("Japanese frequency z-scores:")
print(summary(behavior_data$Japanese_Freq_z))

# Create item variable (instead of iterm)
behavior_data$item <- behavior_data$Prime_word  # Changed from Target_word to Prime_word
behavior_data$item <- as.factor(behavior_data$item)

# 设置新的模型公式
model_formula <- as.formula(
  "log_RT ~ Semantic_Relatedness * Priming_Word_Type * Group +  # 实验条件交互
           Chinese_Freq_z +  # 所有词汇的汉语词频主效应
           Chinese_Freq_z:Priming_Word_Type +  # 汉语词频与Priming类型交互
           Japanese_Freq_z:Priming_Word_Type +  # 日语词频与Priming类型交互
           (1 + Semantic_Relatedness + Priming_Word_Type | Subject) +  # 被试随机效应
           (1 + Group | item)"  # 项目随机效应，包含Group的随机斜率
)

model_formula_text <- paste(
  "log_RT ~ Semantic_Relatedness * Priming_Word_Type * Group +",
  "Chinese_Freq_z + Chinese_Freq_z:Priming_Word_Type +",
  "Japanese_Freq_z:Priming_Word_Type +",
  "(1 + Semantic_Relatedness + Priming_Word_Type | Subject) +",
  "(1 + Group | item)"
)

print(paste("Model formula:", model_formula_text))

print("Fitting linear mixed-effects model...")
mixed_model <- lmer(model_formula, data = behavior_data)
print("Model fitting completed")

# Print model summary
print("\n============= Model Summary =============")
model_summary <- summary(mixed_model)
print(model_summary)

# Simple effects analysis
print("\n============= Simple Effects Analysis =============")

# 1. Simple effects by Group
print("\nSimple effects analysis by Group:")
emm_group <- emmeans(mixed_model, pairwise ~ Priming_Word_Type|Group, pbkrtest.limit = 6960)
print(emm_group)

# 2. Simple effects by Semantic Relatedness
print("\nSimple effects analysis by Semantic Relatedness:")
emm_relatedness <- emmeans(mixed_model, pairwise ~ Group|Semantic_Relatedness, pbkrtest.limit = 6960)
print(emm_relatedness)

# 3. Simple effects by Priming Word Type
print("\nSimple effects analysis by Priming Word Type:")
emm_priming <- emmeans(mixed_model, pairwise ~ Group|Priming_Word_Type, pbkrtest.limit = 6960)
print(emm_priming)

# Save simple effects results
simple_effects_file <- file.path(result_path, "simple_effects_analysis.txt")
simple_effects_results <- capture.output({
  cat("\n============= Simple Effects Analysis =============\n")
  
  cat("\n1. Simple effects by Group:\n")
  print(emm_group)
  
  cat("\n2. Simple effects by Semantic Relatedness:\n")
  print(emm_relatedness)
  
  cat("\n3. Simple effects by Priming Word Type:\n")
  print(emm_priming)
})
writeLines(simple_effects_results, simple_effects_file)
print(paste("Simple effects analysis results saved to:", simple_effects_file))

# Create model results analysis report
print("\nCreating model results analysis report...")
model_results_report_file <- file.path(result_path, "模型结果分析报告.txt")

# Calculate model overview statistics
n_observations <- nrow(behavior_data)
n_subjects <- length(unique(behavior_data$Subject))
n_items <- length(unique(behavior_data$item))

# Write report header
cat(paste0(
  "\n线性混合效应模型结果分析报告\n",
  "================================\n\n",
  "1. 模型概述\n",
  "   模型公式：log_RT ~ Semantic_Relatedness * Priming_Word_Type * Group + ",
  "Chinese_Freq_z + Chinese_Freq_z:Priming_Word_Type + Japanese_Freq_z:Priming_Word_Type + ",
  "(1 + Semantic_Relatedness + Priming_Word_Type | Subject) + (1 + Group | item)\n",
  "   数据观测数：", nrow(behavior_data), "\n",
  "   受试者数量：", length(unique(behavior_data$Subject)), "\n",
  "   项目数量：", length(unique(behavior_data$item)), "\n\n"
), file = model_results_report_file, append = FALSE)

# Extract fixed effects and their significance
fixed_effects <- fixef(mixed_model)
fixed_effects_se <- sqrt(diag(vcov(mixed_model)))
fixed_effects_t <- fixed_effects / fixed_effects_se
fixed_effects_p <- 2 * (1 - pnorm(abs(fixed_effects_t)))

# Add significance markers function (if not already defined)
get_significance_markers <- function(p_value) {
  if (p_value < 0.001) return(" ***")
  else if (p_value < 0.01) return(" **")
  else if (p_value < 0.05) return(" *")
  else if (p_value < 0.1) return(" .")
  else return("")
}

# Create fixed effects table with significance markers
fixed_effects_table <- data.frame(
  Estimate = fixed_effects,
  SE = fixed_effects_se,
  t_value = fixed_effects_t,
  p_value = paste0(round(fixed_effects_p, 4), sapply(fixed_effects_p, get_significance_markers))  # Add markers directly to p-values
)

# Write fixed effects results
cat("\n2. 固定效应结果\n   以下报告所有固定效应的估计值、标准误、t值和p值：\n\n", 
    file = model_results_report_file, append = TRUE)
capture.output(
  {
    print(fixed_effects_table, digits = 4)
    cat("\nSignificance codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n")
  },
  file = model_results_report_file,
  append = TRUE
)

# Add random effects results
cat("\n3. 随机效应结果\n   报告随机效应的方差组分：\n\n", file = model_results_report_file, append = TRUE)
random_effects <- VarCorr(mixed_model)
capture.output(
  print(random_effects, comp = c("Variance", "Std.Dev.")),
  file = model_results_report_file,
  append = TRUE
)

# Perform simple effects analysis
cat("\n4. 简单效应分析\n   以下报告不同条件下的简单效应分析结果：\n\n", 
    file = model_results_report_file, append = TRUE)

# Simple effects by Group
emm_group <- emmeans(mixed_model, pairwise ~ Priming_Word_Type|Group)
# Simple effects by Semantic Relatedness
emm_relatedness <- emmeans(mixed_model, pairwise ~ Group|Semantic_Relatedness)
# Simple effects by Priming Word Type
emm_priming <- emmeans(mixed_model, pairwise ~ Group|Priming_Word_Type)

capture.output(
  {
    cat("\n   4.1 按组别的简单效应\n")
    print(emm_group)
    cat("\n   4.2 按语义关联性的简单效应\n")
    print(emm_relatedness)
    cat("\n   4.3 按启动词类型的简单效应\n")
    print(emm_priming)
  },
  file = model_results_report_file,
  append = TRUE
)

# Add interpretation with significance markers
cat(paste0(
  "\n5. 结果解释\n",
  "   5.1 主效应分析\n",
  "   - Group主效应：", ifelse(fixed_effects_p["Groupexperimental"] < 0.05, 
                           paste0("显著 (p = ", round(fixed_effects_p["Groupexperimental"], 4), get_significance_markers(fixed_effects_p["Groupexperimental"]), ")"),
                           paste0("不显著 (p = ", round(fixed_effects_p["Groupexperimental"], 4), ")")), "\n",
  "   - Semantic_Relatedness主效应：", ifelse(fixed_effects_p["Semantic_RelatednessSemantic Unrelated"] < 0.05,
                                         paste0("显著 (p = ", round(fixed_effects_p["Semantic_RelatednessSemantic Unrelated"], 4), get_significance_markers(fixed_effects_p["Semantic_RelatednessSemantic Unrelated"]), ")"),
                                         paste0("不显著 (p = ", round(fixed_effects_p["Semantic_RelatednessSemantic Unrelated"], 4), ")")), "\n",
  "   - Priming_Word_Type主效应：", ifelse(fixed_effects_p["Priming_Word_TypeHomograph"] < 0.05,
                                      paste0("显著 (p = ", round(fixed_effects_p["Priming_Word_TypeHomograph"], 4), get_significance_markers(fixed_effects_p["Priming_Word_TypeHomograph"]), ")"),
                                      paste0("不显著 (p = ", round(fixed_effects_p["Priming_Word_TypeHomograph"], 4), ")")), "\n",
  "   - Chinese_Freq_z主效应：", ifelse(fixed_effects_p["Chinese_Freq_z"] < 0.05,
                                   paste0("显著 (p = ", round(fixed_effects_p["Chinese_Freq_z"], 4), get_significance_markers(fixed_effects_p["Chinese_Freq_z"]), ")"),
                                   paste0("不显著 (p = ", round(fixed_effects_p["Chinese_Freq_z"], 4), ")")), "\n",
  "   - Japanese_Freq_z:Priming_Word_Type交互：", ifelse(fixed_effects_p["Priming_Word_TypeHomograph:Japanese_Freq_z"] < 0.05,
                                                    paste0("显著 (p = ", round(fixed_effects_p["Priming_Word_TypeHomograph:Japanese_Freq_z"], 4), get_significance_markers(fixed_effects_p["Priming_Word_TypeHomograph:Japanese_Freq_z"]), ")"),
                                                    paste0("不显著 (p = ", round(fixed_effects_p["Priming_Word_TypeHomograph:Japanese_Freq_z"], 4), ")")), "\n\n",
  
  "   5.2 交互作用分析\n",
  "   - Group × Semantic_Relatedness：", ifelse(fixed_effects_p["Semantic_RelatednessSemantic Unrelated:Groupexperimental"] < 0.05,
                                           paste0("显著 (p = ", round(fixed_effects_p["Semantic_RelatednessSemantic Unrelated:Groupexperimental"], 4), get_significance_markers(fixed_effects_p["Semantic_RelatednessSemantic Unrelated:Groupexperimental"]), ")"),
                                           paste0("不显著 (p = ", round(fixed_effects_p["Semantic_RelatednessSemantic Unrelated:Groupexperimental"], 4), ")")), "\n",
  "   - Group × Priming_Word_Type：", ifelse(fixed_effects_p["Priming_Word_TypeHomograph:Groupexperimental"] < 0.05,
                                        paste0("显著 (p = ", round(fixed_effects_p["Priming_Word_TypeHomograph:Groupexperimental"], 4), get_significance_markers(fixed_effects_p["Priming_Word_TypeHomograph:Groupexperimental"]), ")"),
                                        paste0("不显著 (p = ", round(fixed_effects_p["Priming_Word_TypeHomograph:Groupexperimental"], 4), ")")), "\n",
  "   - Semantic_Relatedness × Priming_Word_Type：", ifelse(fixed_effects_p["Semantic_RelatednessSemantic Unrelated:Priming_Word_TypeHomograph"] < 0.05,
                                                      paste0("显著 (p = ", round(fixed_effects_p["Semantic_RelatednessSemantic Unrelated:Priming_Word_TypeHomograph"], 4), get_significance_markers(fixed_effects_p["Semantic_RelatednessSemantic Unrelated:Priming_Word_TypeHomograph"]), ")"),
                                                      paste0("不显著 (p = ", round(fixed_effects_p["Semantic_RelatednessSemantic Unrelated:Priming_Word_TypeHomograph"], 4), ")")), "\n",
  "   - Chinese_Freq_z × Priming_Word_Type：", ifelse(fixed_effects_p["Priming_Word_TypeHomograph:Chinese_Freq_z"] < 0.05,
                                                paste0("显著 (p = ", round(fixed_effects_p["Priming_Word_TypeHomograph:Chinese_Freq_z"], 4), get_significance_markers(fixed_effects_p["Priming_Word_TypeHomograph:Chinese_Freq_z"]), ")"),
                                                paste0("不显著 (p = ", round(fixed_effects_p["Priming_Word_TypeHomograph:Chinese_Freq_z"], 4), ")")), "\n",
  "   - 三阶交互：", ifelse(fixed_effects_p["Semantic_RelatednessSemantic Unrelated:Priming_Word_TypeHomograph:Groupexperimental"] < 0.05,
                        paste0("显著 (p = ", round(fixed_effects_p["Semantic_RelatednessSemantic Unrelated:Priming_Word_TypeHomograph:Groupexperimental"], 4), get_significance_markers(fixed_effects_p["Semantic_RelatednessSemantic Unrelated:Priming_Word_TypeHomograph:Groupexperimental"]), ")"),
                        paste0("不显著 (p = ", round(fixed_effects_p["Semantic_RelatednessSemantic Unrelated:Priming_Word_TypeHomograph:Groupexperimental"], 4), ")")), "\n\n",
  
  "6. 随机效应解释\n",
  "   - Subject随机截距的方差表示个体间反应时的变异程度\n",
  "   - Item随机截距和Group随机斜率的方差表示项目间的变异程度和Group效应在项目间的变化\n\n",
  
  "7. 总结\n",
  "   根据以上分析结果，主要发现：\n",
  "   - ", ifelse(any(fixed_effects_p[c("Groupexperimental", "Semantic_RelatednessSemantic Unrelated", "Priming_Word_TypeHomograph", "Chinese_Freq_z")] < 0.05),
               "存在显著的主效应：\n     ",
               "未发现显著的主效应\n"), 
  ifelse(fixed_effects_p["Groupexperimental"] < 0.05, "Group效应显著\n     ", ""),
  ifelse(fixed_effects_p["Semantic_RelatednessSemantic Unrelated"] < 0.05, "语义关联性效应显著\n     ", ""),
  ifelse(fixed_effects_p["Priming_Word_TypeHomograph"] < 0.05, "启动词类型效应显著\n     ", ""),
  ifelse(fixed_effects_p["Chinese_Freq_z"] < 0.05, "中文词频效应显著\n     ", ""),
  "   - ", ifelse(any(fixed_effects_p[grep(":", names(fixed_effects_p))] < 0.05),
               "存在显著的交互作用：\n     ",
               "未发现显著的交互作用\n"),
  "\n注：所有效应的具体方向和大小请参考上述详细统计结果。\n\n"
), file = model_results_report_file, append = TRUE)

# Add final timestamp
cat(paste0("\n分析日期：", format(Sys.Date(), "%Y年%m月%d日"), "\n"),
    file = model_results_report_file,
    append = TRUE)

print(paste("Model results analysis report saved to:", model_results_report_file))

# Extract residuals
residuals <- resid(mixed_model)
fitted_values <- fitted(mixed_model)

# Save model results
model_results <- capture.output(print(model_summary))
model_results_file <- file.path(result_path, "mixed_model_summary.txt")
writeLines(model_results, model_results_file)
print(paste("Model summary saved to:", model_results_file))

print("\n============= Checking Mixed Effects Model Assumptions =============")

print("\n1. Normality of Residuals")
# Calculate descriptive statistics for residuals
residuals_mean <- mean(residuals)
residuals_median <- median(residuals)
residuals_sd <- sd(residuals)
residuals_skewness <- skewness(residuals)
residuals_kurtosis <- kurtosis(residuals)

print("Residuals descriptive statistics:")
print(paste("Mean:", round(residuals_mean, 4)))
print(paste("Median:", round(residuals_median, 4)))
print(paste("Standard Deviation:", round(residuals_sd, 4)))
print(paste("Skewness:", round(residuals_skewness, 4)))
print(paste("Kurtosis:", round(residuals_kurtosis, 4)))

# Normality tests
print("\nNormality Tests:")
shapiro_test <- shapiro.test(sample(residuals, min(5000, length(residuals))))
print("Shapiro-Wilk test:")
print(shapiro_test)

ad_test <- ad.test(residuals)
print("\nAnderson-Darling test:")
print(ad_test)

# Skewness and kurtosis interpretations
skew_interpretation <- if(abs(residuals_skewness) < 1) {
  "偏度在可接受范围内 (|值| < 1)，分布呈现良好的对称性"
} else if(residuals_skewness > 1) {
  "呈现正偏度，分布右侧有长尾，数据分布向右偏"
} else {
  "呈现负偏度，分布左侧有长尾，数据分布向左偏"
}

kurt_interpretation <- if(abs(residuals_kurtosis - 3) < 2) {
  "峰度在可接受范围内 (|超额峰度| < 2)，分布尖峭度正常"
} else if(residuals_kurtosis > 5) {
  "峰度过高，分布过于尖峭（厚尾），极端值较多"
} else {
  "峰度过低，分布较为平坦（薄尾），极端值较少"
}

print("\nSkewness interpretation:")
print(skew_interpretation)
print("\nKurtosis interpretation:")
print(kurt_interpretation)

# Visual inspection - QQ plot
qq_plot_file <- file.path(result_path, "residuals_QQ_plot.png")
png(qq_plot_file, width = 800, height = 600)
qqnorm(residuals, main = "Q-Q Plot of Model Residuals")
qqline(residuals, col = "red")
dev.off()
print(paste("Q-Q Plot saved to:", qq_plot_file))

# Histogram
hist_plot_file <- file.path(result_path, "residuals_histogram.png")
png(hist_plot_file, width = 800, height = 600)
hist(residuals, breaks = 30, main = "Histogram of Model Residuals", 
     xlab = "Residuals", prob = TRUE)
curve(dnorm(x, mean = mean(residuals), sd = sd(residuals)), 
      add = TRUE, col = "red", lwd = 2)
dev.off()
print(paste("Histogram saved to:", hist_plot_file))

# Density plot
density_plot_file <- file.path(result_path, "residuals_density_plot.png")
png(density_plot_file, width = 800, height = 600)
plot(density(residuals), main = "Density Plot of Model Residuals")
curve(dnorm(x, mean = mean(residuals), sd = sd(residuals)), 
      add = TRUE, col = "red", lwd = 2)
legend("topright", legend = c("Residuals", "Normal Distribution"), 
       col = c("black", "red"), lwd = 2)
dev.off()
print(paste("Density plot saved to:", density_plot_file))

# Residuals vs Fitted values
resid_fitted_file <- file.path(result_path, "residuals_vs_fitted.png")
png(resid_fitted_file, width = 800, height = 600)
plot(fitted_values, residuals, 
     main = "Residuals vs Fitted Values",
     xlab = "Fitted Values", ylab = "Residuals")
abline(h = 0, col = "red", lwd = 2)
lines(lowess(fitted_values, residuals), col = "blue", lwd = 2)
dev.off()
print(paste("Residuals vs Fitted Values plot saved to:", resid_fitted_file))

print("\n2. Homogeneity of Variance")
# Scale-Location plot (square root of standardized residuals vs fitted values)
std_residuals <- residuals / sd(residuals)
scale_location_file <- file.path(result_path, "scale_location_plot.png")
png(scale_location_file, width = 800, height = 600)
plot(fitted_values, sqrt(abs(std_residuals)),
     main = "Scale-Location Plot",
     xlab = "Fitted Values", 
     ylab = "√|Standardized Residuals|")
lines(lowess(fitted_values, sqrt(abs(std_residuals))), col = "red", lwd = 2)
dev.off()
print(paste("Scale-Location plot saved to:", scale_location_file))

# Check variance homogeneity by groups
print("\nLevene's Test for homogeneity of variance across groups:")
levene_test <- car::leveneTest(residuals ~ Group, data = data.frame(residuals = residuals, Group = behavior_data$Group))
print(levene_test)

print("\nLevene's Test for homogeneity of variance across Relatedness:")
levene_test_sem <- car::leveneTest(residuals ~ Semantic_Relatedness, data = data.frame(residuals = residuals, Semantic_Relatedness = behavior_data$Semantic_Relatedness))
print(levene_test_sem)

print("\nLevene's Test for homogeneity of variance across Priming:")
levene_test_pwt <- car::leveneTest(residuals ~ Priming_Word_Type, data = data.frame(residuals = residuals, Priming_Word_Type = behavior_data$Priming_Word_Type))
print(levene_test_pwt)

# Using DHARMa package for advanced diagnostics
print("\n3. DHARMa residual diagnostics")
# Create DHARMa residual object
dharma_residuals <- simulateResiduals(fittedModel = mixed_model)

# DHARMa residuals plot
dharma_plot_file <- file.path(result_path, "dharma_residuals_plot.png")
png(dharma_plot_file, width = 1000, height = 800)
plot(dharma_residuals)
dev.off()
print(paste("DHARMa residuals plot saved to:", dharma_plot_file))

# Save DHARMa tests results
dharma_tests_file <- file.path(result_path, "dharma_tests.txt")
test_output <- capture.output({
  print("DHARMa tests for residual uniformity:")
  testUniformity(dharma_residuals)
  
  print("\nDHARMa tests for residual dispersion:")
  testDispersion(dharma_residuals)
  
  print("\nDHARMa tests for residual outliers:")
  testOutliers(dharma_residuals)
})
writeLines(test_output, dharma_tests_file)
print(paste("DHARMa diagnostic tests saved to:", dharma_tests_file))

# Create a comprehensive diagnosis report
print("\nCreating comprehensive diagnosis report...")
report_file <- file.path(result_path, "线性混合效应模型假设检验报告.txt")

diagnosis_result <- if(
  (abs(residuals_skewness) < 1 && abs(residuals_kurtosis - 3) < 2) &&
  shapiro_test$p.value >= 0.01 && 
  ad_test$p.value >= 0.01
) {
  "残差整体满足正态分布假设"
} else if(
  (abs(residuals_skewness) < 1.5 && abs(residuals_kurtosis - 3) < 3) &&
  (shapiro_test$p.value >= 0.001 || ad_test$p.value >= 0.001)
) {
  "残差近似满足正态分布假设，略有偏离但在可接受范围内"
} else {
  "残差偏离正态分布假设，建议考虑数据转换或使用稳健方法"
}

variance_result <- if(
  levene_test$`Pr(>F)`[1] >= 0.05 && 
  levene_test_sem$`Pr(>F)`[1] >= 0.05 && 
  levene_test_pwt$`Pr(>F)`[1] >= 0.05
) {
  "不同组间的方差同质性假设满足"
} else if(
  (levene_test$`Pr(>F)`[1] >= 0.01 && 
   levene_test_sem$`Pr(>F)`[1] >= 0.01 && 
   levene_test_pwt$`Pr(>F)`[1] >= 0.01)
) {
  "不同组间的方差略有差异，但在可接受范围内"
} else {
  "不同组间的方差存在明显差异，可能违反方差同质性假设"
}

cat(paste0(
  "行为数据线性混合效应模型假设检验报告\n",
  "================================\n\n",
  "1. 模型公式\n",
  "   ", model_formula_text, "\n\n",
  "2. 残差正态性检验\n",
  "   - 基本统计量\n",
  "     均值: ", round(residuals_mean, 4), "\n",
  "     中位数: ", round(residuals_median, 4), "\n",
  "     标准差: ", round(residuals_sd, 4), "\n",
  "     偏度: ", round(residuals_skewness, 4), " (判据: |值| < 1 为理想, |值| < 1.5 为可接受)\n",
  "     峰度: ", round(residuals_kurtosis, 4), " (判据: |值-3| < 2 为理想, |值-3| < 3 为可接受)\n\n",
  "   - 正态性检验结果\n",
  "     Shapiro-Wilk检验: p = ", round(shapiro_test$p.value, 4), 
  " (", ifelse(shapiro_test$p.value >= 0.05, "满足正态性假设", 
           ifelse(shapiro_test$p.value >= 0.01, "略微偏离正态性", "偏离正态性")), ")\n",
  "     Anderson-Darling检验: p = ", round(ad_test$p.value, 4),
  " (", ifelse(ad_test$p.value >= 0.05, "满足正态性假设", 
           ifelse(ad_test$p.value >= 0.01, "略微偏离正态性", "偏离正态性")), ")\n\n",
  "   - 偏度解释\n     ", skew_interpretation, "\n\n",
  "   - 峰度解释\n     ", kurt_interpretation, "\n\n",
  "   - 结论\n     ", diagnosis_result, "\n\n",
  "3. 方差同质性检验\n",
  "   - Group分组方差同质性 (Levene检验): p = ", round(levene_test$`Pr(>F)`[1], 4), 
  " (", ifelse(levene_test$`Pr(>F)`[1] >= 0.05, "满足方差同质性假设", 
           ifelse(levene_test$`Pr(>F)`[1] >= 0.01, "略微违反方差同质性", "违反方差同质性")), ")\n",
  "   - Semantic_Relatedness分组方差同质性: p = ", round(levene_test_sem$`Pr(>F)`[1], 4),
  " (", ifelse(levene_test_sem$`Pr(>F)`[1] >= 0.05, "满足方差同质性假设", 
           ifelse(levene_test_sem$`Pr(>F)`[1] >= 0.01, "略微违反方差同质性", "违反方差同质性")), ")\n",
  "   - Priming_Word_Type分组方差同质性: p = ", round(levene_test_pwt$`Pr(>F)`[1], 4),
  " (", ifelse(levene_test_pwt$`Pr(>F)`[1] >= 0.05, "满足方差同质性假设", 
           ifelse(levene_test_pwt$`Pr(>F)`[1] >= 0.01, "略微违反方差同质性", "违反方差同质性")), ")\n\n",
  "   - 结论\n     ", variance_result, "\n\n",
  "4. 总体评估\n",
  "   ", 
  if(diagnosis_result == "残差整体满足正态分布假设" && 
     variance_result == "不同组间的方差同质性假设满足") {
    "行为数据整体满足线性混合效应模型的假设，可以可靠地使用该模型进行分析。"
  } else if(diagnosis_result == "残差近似满足正态分布假设，略有偏离但在可接受范围内" || 
            variance_result == "不同组间的方差略有差异，但在可接受范围内") {
    "行为数据基本满足线性混合效应模型的假设，存在轻微偏离但在可接受范围内，可以继续使用该模型，但应谨慎解释结果。"
  } else {
    "行为数据在某些方面违反了线性混合效应模型的假设，建议考虑数据转换、使用稳健方法或选择其他更适合的模型。"
  }, "\n\n",
  "5. 建议\n",
  ifelse(diagnosis_result == "残差整体满足正态分布假设" && 
         variance_result == "不同组间的方差同质性假设满足",
         "  - 可以直接使用当前的线性混合效应模型进行分析\n  - 建议在报告中提及已验证模型假设满足\n",
         ifelse(diagnosis_result == "残差近似满足正态分布假设，略有偏离但在可接受范围内" || 
                variance_result == "不同组间的方差略有差异，但在可接受范围内",
                "  - 可以继续使用当前模型，但应在报告中提及假设检验结果和轻微偏离\n  - 考虑使用稳健标准误或Bootstrap方法增强结果可靠性\n",
                "  - 考虑进一步的数据变换\n  - 探索使用广义线性混合模型(GLMM)或其他稳健方法\n  - 检查极端值和异常观测\n"
         )
  ),
  "\n注意：线性混合效应模型对于正态性假设的违反具有一定的稳健性，尤其是在样本量较大时。\n",
  "即使存在轻微偏离，该模型通常仍能提供可靠的参数估计。方差同质性的微小违反也可以通过使用不同\n",
  "的协方差结构加以解决。\n\n",
  "分析日期：", format(Sys.Date(), "%Y年%m月%d日"), "\n"
), file = report_file, fileEncoding = "UTF-8")

print(paste("Comprehensive diagnosis report saved to:", report_file))

print("\n============= Mixed Effects Model Assumption Tests Completed =============") 