if (!requireNamespace("plumber", quietly = TRUE)) install.packages("plumber")
if (!requireNamespace("future", quietly = TRUE)) install.packages("future")
if (!requireNamespace("promises", quietly = TRUE)) install.packages("promises")
if (!requireNamespace("uuid", quietly = TRUE)) install.packages("uuid")
if (!requireNamespace("igraph", quietly = TRUE)) install.packages("igraph")
if (!requireNamespace("readxl", quietly = TRUE)) install.packages("readxl")
if (!requireNamespace("Matrix", quietly = TRUE)) install.packages("Matrix")
if (!requireNamespace("readr", quietly = TRUE)) install.packages("readr")
if (!requireNamespace("dplyr", quietly = TRUE)) install.packages("dplyr")

# 加载必要的包
library(plumber)
library(future)
library(promises)
library(uuid)
library(igraph)
library(readxl)
library(Matrix)
library(readr)
library(dplyr)

# 设置并行计算
plan(multisession)

# 创建任务存储环境
tasks_env <- new.env()

# 定义辅助函数 -----------------------------------------------------------

calculate_ds_fast <- function(targets, sparse_dist) {
  valid_targets <- targets[targets %in% rownames(sparse_dist)]
  if (length(valid_targets) == 0) return(NA)
  
  row_indices <- match(valid_targets, rownames(sparse_dist))
  min_dists <- apply(sparse_dist[row_indices, , drop = FALSE], 1, min, na.rm = TRUE)
  valid_min_dists <- min_dists[is.finite(min_dists)]
  
  if (length(valid_min_dists) == 0) return(NA)
  mean(valid_min_dists)
}

compute_network_proximity_fast <- function(targets, sparse_dist, node_degrees, n_random = 1000) {
  d_real <- calculate_ds_fast(targets, sparse_dist)
  if (is.na(d_real)) return(NA)
  
  all_nodes <- rownames(sparse_dist)
  valid_targets <- intersect(targets, all_nodes)
  target_degrees <- node_degrees[valid_targets]
  
  random_distances <- numeric(n_random)
  valid_count <- 0
  max_attempts <- n_random * 3
  
  generate_degree_matched_sample <- function() {
    sapply(target_degrees, function(deg) {
      deg_range <- c(max(0.85 * deg, 0), 1.15 * deg)
      candidates <- names(node_degrees)[node_degrees >= deg_range[1] & node_degrees <= deg_range[2]]
      
      if (length(candidates) < 10) {
        candidates <- names(node_degrees)[
          node_degrees >= max(1, 0.7 * deg) & node_degrees <= 1.3 * deg
        ]
      }
      
      if (length(candidates) > 0) sample(candidates, 1) else NA
    })
  }
  
  attempts <- 0
  while (valid_count < n_random && attempts < max_attempts) {
    attempts <- attempts + 1
    random_targets <- generate_degree_matched_sample()
    random_targets <- na.omit(random_targets)
    
    if (length(random_targets) > 0) {
      d_random <- calculate_ds_fast(random_targets, sparse_dist)
      if (!is.na(d_random)) {
        valid_count <- valid_count + 1
        random_distances[valid_count] <- d_random
      }
    }
  }
  
  if (valid_count < n_random) {
    random_distances <- random_distances[1:valid_count]
  }
  
  mu_d <- mean(random_distances)
  sigma_d <- sd(random_distances)
  
  if (sigma_d < 1e-10 || is.na(sigma_d)) {
    return(ifelse(d_real < mu_d, -10, 10))
  }
  
  (d_real - mu_d) / sigma_d
}

# 主计算函数 -------------------------------------------------------------
compute_network_proximity <- function(ppi_file, drug_target_file, disease_gene_file) {
  tryCatch({
    # 1. 加载PPI网络数据
    cat("步骤1: 加载PPI网络数据...\n")
    ppi_data <- read_excel(ppi_file, sheet = 1)
    ppi_network <- graph_from_data_frame(ppi_data[, c("Protein_A_Entrez_ID", "Protein_B_Entrez_ID")], 
                                         directed = FALSE)
    
    # 提取最大连通分量
    comp <- components(ppi_network)
    largest_comp_index <- which.max(comp$csize)
    ppi_network <- induced_subgraph(ppi_network, which(comp$membership == largest_comp_index))
    
    # 2. 读取药物靶点和疾病基因
    cat("步骤2: 读取药物靶点和疾病基因文件...\n")
    drug_targets <- read_csv(drug_target_file)
    disease_genes <- read_csv(disease_gene_file)
    unique_disease_genes_ids <- unique(disease_genes$entrez_id)
    
    # 仅保留网络中的基因
    valid_nodes <- V(ppi_network)$name
    drug_targets <- drug_targets %>% filter(entrez_id %in% valid_nodes)
    unique_disease_genes_ids <- unique(unique_disease_genes_ids[unique_disease_genes_ids %in% valid_nodes])
    
    # 3. 预计算距离矩阵和节点度
    cat("步骤3: 预计算疾病基因距离矩阵和节点度...\n")
    disease_nodes <- unique_disease_genes_ids
    all_nodes <- V(ppi_network)$name
    
    # 预计算所有节点度
    node_degrees <- degree(ppi_network)
    names(node_degrees) <- all_nodes
    
    # 计算距离矩阵（仅疾病基因）
    sparse_dist <- matrix(NA, nrow = length(all_nodes), ncol = length(disease_nodes))
    rownames(sparse_dist) <- all_nodes
    colnames(sparse_dist) <- disease_nodes
    
    for (i in seq_along(disease_nodes)) {
      disease_node <- disease_nodes[i]
      v_index <- which(all_nodes == disease_node)
      
      dists <- distances(ppi_network, v = v_index, to = V(ppi_network), mode = "all")
      dists[is.infinite(dists)] <- NA
      sparse_dist[, i] <- as.vector(dists)
    }
    
    sparse_dist <- as(sparse_dist, "sparseMatrix")
    
    # 4. 计算网络邻近度
    drugs <- unique(drug_targets$chemical)
    results <- data.frame()
    
    for (i in seq_along(drugs)) {
      drug <- drugs[i]
      cat(sprintf("处理药物 %d/%d: %s\n", i, length(drugs), drug))
      
      targets <- drug_targets %>% 
        filter(chemical == drug) %>% 
        pull(entrez_id) %>% 
        unique()
      
      d_s <- calculate_ds_fast(targets, sparse_dist)
      Z_score <- compute_network_proximity_fast(targets, sparse_dist, node_degrees)
      P_value <- if(!is.na(Z_score)) 2 * pnorm(-abs(Z_score)) else NA
      
      results <- bind_rows(results, data.frame(
        drug = drug,
        d_s = d_s,
        Z_score = Z_score,
        P_value = P_value
      ))
    }
    
    # 添加显著性标记
    results <- results %>%
      mutate(significance = ifelse(Z_score < 0 & P_value < 0.05, "显著有效", "不显著")) %>%
      arrange(Z_score)
    
    # 返回结果列表
    list(
      success = TRUE,
      message = "计算完成",
      results = results,
      stats = list(
        total_drugs = nrow(results),
        significant_drugs = sum(results$significance == "显著有效"),
        min_Z = min(results$Z_score, na.rm = TRUE),
        max_Z = max(results$Z_score, na.rm = TRUE)
      )
    )
  }, error = function(e) {
    return(list(
      success = FALSE,
      message = paste("计算失败:", e$message)
    ))
  })
}

# 创建Plumber API --------------------------------------------------------
#* @apiTitle 网络邻近度分析API
#* @apiDescription 用于计算药物-疾病网络邻近度的后端接口
#* @apiVersion 1.0.0

#* 健康检查
#* @get /health
function() {
  return(list(status = "API运行正常", time = Sys.time()))
}
#* 上传文件并开始分析
#* @param ppi_file:file PPI网络文件
#* @param drug_targets:file 药物靶点文件
#* @param disease_genes:file 疾病基因文件
#* @post /analyze
function(req, res, ppi_file, drug_targets, disease_genes) {
  # 生成唯一任务ID
  task_id <- UUIDgenerate()
  
  # 创建临时目录
  task_dir <- file.path(tempdir(), task_id)
  dir.create(task_dir, showWarnings = FALSE)
  
  # 1. 验证文件上传对象结构
  validate_upload <- function(file_obj, name) {
    if (is.null(file_obj)) {
      return(list(valid = FALSE, message = paste(name, "文件对象为空")))
    }
    
    # 检查datapath属性是否存在
    if (is.null(file_obj$datapath)) {
      return(list(valid = FALSE, message = paste(name, "文件缺少datapath属性")))
    }
    
    # 检查文件是否存在
    if (!file.exists(file_obj$datapath)) {
      return(list(valid = FALSE, message = paste(name, "临时文件不存在:", file_obj$datapath)))
    }
    
    # 检查文件大小
    if (file.info(file_obj$datapath)$size == 0) {
      return(list(valid = FALSE, message = paste(name, "文件大小为0")))
    }
    
    list(valid = TRUE, path = file_obj$datapath)
  }
  
  # 验证所有文件
  ppi_validation <- validate_upload(ppi_file, "PPI网络")
  drug_validation <- validate_upload(drug_targets, "药物靶点")
  disease_validation <- validate_upload(disease_genes, "疾病基因")
  
  # 收集所有错误
  errors <- c()
  if (!ppi_validation$valid) errors <- c(errors, ppi_validation$message)
  if (!drug_validation$valid) errors <- c(errors, drug_validation$message)
  if (!disease_validation$valid) errors <- c(errors, disease_validation$message)
  
  if (length(errors) > 0) {
    return(list(
      success = FALSE,
      message = "文件验证失败",
      errors = errors
    ))
  }
  
  # 2. 准备目标路径
  ppi_path <- file.path(task_dir, "ppi_network.xlsx")
  drug_targets_path <- file.path(task_dir, "drug_targets.csv")
  disease_genes_path <- file.path(task_dir, "disease_genes.csv")
  
  # 3. 复制文件（使用更安全的方式）
  copy_success <- c(
    file.copy(ppi_validation$path, ppi_path, overwrite = TRUE),
    file.copy(drug_validation$path, drug_targets_path, overwrite = TRUE),
    file.copy(disease_validation$path, disease_genes_path, overwrite = TRUE)
  )
  
  # 4. 验证复制结果
  if (!all(copy_success)) {
    failed_files <- c("PPI", "药物靶点", "疾病基因")[!copy_success]
    return(list(
      success = FALSE,
      message = "文件复制失败",
      failed_files = failed_files
    ))
  }
  
  # 5. 存储任务信息
  tasks_env[[task_id]] <- list(
    id = task_id,
    status = "processing",
    start_time = Sys.time(),
    end_time = NULL,
    result = NULL,
    message = "任务已开始处理",
    progress = 0
  )
  
  # 6. 异步执行计算
  future({
    tryCatch({
      # 调用预定义的函数进行计算
      result <- compute_network_proximity(
        ppi_path, 
        drug_targets_path, 
        disease_genes_path
      )
      
      # 更新任务状态
      tasks_env[[task_id]]$result <- result
      tasks_env[[task_id]]$status <- if (isTRUE(result$success)) "completed" else "failed"
      tasks_env[[task_id]]$message <- result$message
      tasks_env[[task_id]]$progress <- 100
      
      # 清理临时文件
      unlink(task_dir, recursive = TRUE)
      
      return(task_id)
    }, error = function(e) {
      # 错误处理
      tasks_env[[task_id]]$status <- "failed"
      tasks_env[[task_id]]$end_time <- Sys.time()
      tasks_env[[task_id]]$message <- paste("计算过程中出错:", e$message)
      
      # 清理临时文件
      unlink(task_dir, recursive = TRUE)
      
      return(task_id)
    })
  }) %...>% (function(task_id) {
    # 任务完成回调
    message(sprintf("任务 %s 完成 - 状态: %s", task_id, tasks_env[[task_id]]$status))
  }) %...!% (function(error) {
    # 错误处理
    tasks_env[[task_id]]$status <- "failed"
    tasks_env[[task_id]]$end_time <- Sys.time()
    tasks_env[[task_id]]$message <- paste("异步任务失败:", error$message)
    message(sprintf("任务 %s 失败: %s", task_id, error$message))
  })
  
  # 7. 立即返回任务ID
  list(
    success = TRUE,
    task_id = task_id,
    message = "分析任务已开始，请稍后查询结果",
    check_url = paste0("/task/", task_id)
  )
}

#* 获取任务状态
#* @param task_id 任务ID
#* @get /task/<task_id>
function(task_id) {
  task <- tasks_env[[task_id]]
  
  if (is.null(task)) {
    return(list(
      success = FALSE,
      message = "无效的任务ID"
    ))
  }
  
  # 计算处理时间
  elapsed <- if (!is.null(task$end_time)) {
    as.numeric(difftime(task$end_time, task$start_time, units = "secs"))
  } else {
    as.numeric(difftime(Sys.time(), task$start_time, units = "secs"))
  }
  
  # 返回任务信息
  list(
    task_id = task_id,
    status = task$status,
    message = task$message,
    start_time = task$start_time,
    end_time = task$end_time,
    elapsed_seconds = round(elapsed, 1),
    result = if (task$status == "completed") task$result$result else NULL,
    stats = if (task$status == "completed") task$result$stats else NULL
  )
}

#* 获取显著结果
#* @param task_id 任务ID
#* @get /significant/<task_id>
function(task_id) {
  task <- tasks_env[[task_id]]
  
  if (is.null(task) || task$status != "completed") {
    return(list(
      success = FALSE,
      message = "结果不可用"
    ))
  }
  
  # 提取显著结果
  significant <- task$result$results %>%
    filter(significance == "显著有效") %>%
    arrange(Z_score)
  
  list(
    success = TRUE,
    count = nrow(significant),
    drugs = significant$drug,
    Z_scores = significant$Z_score,
    P_values = significant$P_value
  )
}

#* 下载完整结果
#* @param task_id 任务ID
#* @get /download/<task_id>
function(task_id, res) {
  task <- tasks_env[[task_id]]
  
  if (is.null(task) || task$status != "completed") {
    res$status <- 404
    return(list(error = "结果不可用"))
  }
  
  # 创建CSV文件
  csv_data <- task$result$results %>%
    mutate(across(where(is.numeric), round, 4))
  
  # 设置响应头
  res$setHeader("Content-Type", "text/csv")
  res$setHeader("Content-Disposition", paste0('attachment; filename="results_', task_id, '.csv"'))
  
  # 返回CSV内容
  as_attachment(csv_data, paste0("results_", task_id, ".csv"))
}