# ScienceDirect爬虫 Shiny 应用 - 多期刊版本
library(shiny)
library(shinydashboard)
library(DT)
library(rvest)
library(httr)
library(openxlsx)
library(shinyWidgets)
library(jsonlite)
library(stringr)

# 配置代理
Sys.setenv(http_proxy = "http://127.0.0.1:10809")
Sys.setenv(https_proxy = "http://127.0.0.1:10809")

# 获取期刊可用期号的函数（适用于 ScienceDirect）
get_journal_volumes_info <- function(journal_name) {
  journal_url <- paste0("https://www.sciencedirect.com/journal/", journal_name, "/issues")
  
  headers <- add_headers(
    `User-Agent` = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
    `Accept` = "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
    `Accept-Language` = "en-US,en;q=0.9",
    `Referer` = paste0("https://www.sciencedirect.com/journal/", journal_name),
    `Connection` = "keep-alive"
  )
  
  tryCatch({
    response <- GET(journal_url, headers)
    if (status_code(response) != 200) {
      return(list(success = FALSE, error = paste("无法访问期刊页面，HTTP状态码:", status_code(response))))
    }
    
    html <- read_html(content(response, as = "text", encoding = "UTF-8"))
    
    volume_links <- html %>%
      html_nodes("a[href*='/journal/'][href*='/vol/']") %>%
      html_attr("href")
    
    volume_texts <- html %>%
      html_nodes("a[href*='/journal/'][href*='/vol/']") %>%
      html_text(trim = TRUE)
    
    volume_info <- list()
    
    for (i in seq_along(volume_links)) {
      link <- volume_links[i]
      text <- volume_texts[i]
      
      vol_match <- str_match(link, "/vol/(\\d+)")[, 2]
      if (!is.na(vol_match)) {
        vol_number <- vol_match
        is_in_progress <- grepl("progress|current|latest|ongoing", text, ignore.case = TRUE)
        volume_info[[vol_number]] <- list(
          number = vol_number,
          text = text,
          link = paste0("https://www.sciencedirect.com", link),
          is_complete = !is_in_progress
        )
      }
    }
    
    if (length(volume_info) == 0) {
      return(list(success = FALSE, error = "未找到任何volume信息"))
    }
    
    volume_numbers <- as.numeric(names(volume_info))
    sorted_indices <- order(volume_numbers, decreasing = TRUE)
    sorted_volume_info <- volume_info[sorted_indices]
    
    return(list(
      success = TRUE,
      journal_name = journal_name,
      volumes = sorted_volume_info,
      total_volumes = length(sorted_volume_info),
      complete_volumes = sum(sapply(sorted_volume_info, function(x) x$is_complete))
    ))
    
  }, error = function(e) {
    return(list(success = FALSE, error = paste("发生错误:", conditionMessage(e))))
  })
}

# 创建请求头函数
create_session <- function() {
  headers <- add_headers(
    "User-Agent" = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.2210.91",
    "Accept" = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
    "Accept-Language" = "en-US,en;q=0.5",
    "Referer" = "https://www.sciencedirect.com/"
  )
  return(headers)
}

# 获取期刊的volume列表函数
get_journal_volumes <- function(journal_name, max_volumes = 3) {
  # 获取期刊的volume信息
  volume_info_result <- get_journal_volumes_info(journal_name)
  
  if (!volume_info_result$success) {
    stop(volume_info_result$error)
  }
  
  volume_info <- volume_info_result$volumes
  
  # 筛选出完整的volume（非in progress）
  complete_volumes <- volume_info[sapply(volume_info, function(x) x$is_complete)]
  
  if (length(complete_volumes) == 0) {
    stop("未找到完整的volume")
  }
  
  # 取前max_volumes个volume
  selected_volumes <- head(complete_volumes, max_volumes)
  
  return(list(
    volumes = sapply(selected_volumes, function(x) x$link),
    descriptions = sapply(selected_volumes, function(x) paste("Vol", x$number, "-", x$text)),
    total_found = length(volume_info),
    complete_found = length(complete_volumes)
  ))
}

# 清理标题函数
clean_title <- function(title) {
  if (is.na(title) || title == "") return(NA)
  
  # 移除常见的无效标题
  invalid_titles <- c(
    "preparing your download",
    "download",
    "loading",
    "please wait",
    "sciencedirect",
    "elsevier",
    "Editorial Board"
  )
  
  title_lower <- tolower(trimws(title))
  
  # 检查是否为无效标题
  if (title_lower %in% invalid_titles || 
      grepl("preparing|download|loading", title_lower) ||
      nchar(title_lower) < 10) {  # 标题太短可能无效
    return(NA)
  }
  
  # 清理标题格式
  title <- gsub("\\s+", " ", title)  # 合并多个空格
  title <- gsub("^\\s+|\\s+$", "", title)  # 去除首尾空格
  title <- gsub(" - ScienceDirect.*$", "", title)  # 去除网站后缀
  title <- gsub(" \\| .*$", "", title)  # 去除其他后缀
  
  return(title)
}

# 提取作者函数
extract_authors <- function(article_page) {
  author_selectors <- c(
    ".author-group .given-name, .author-group .surname",
    ".author .given-name, .author .surname", 
    "[data-testid='author-name']",
    ".AuthorName",
    ".author-name",
    "span.author, a.author",
    ".authors .author",
    "[class*='author'][class*='name']"
  )
  
  authors <- character(0)
  
  for (selector in author_selectors) {
    temp_authors <- article_page %>%
      html_nodes(selector) %>%
      html_text(trim = TRUE)
    
    if (length(temp_authors) > 0 && !all(temp_authors == "")) {
      temp_authors <- temp_authors[
        !grepl("^(https?://|www\\.|@|#)", temp_authors) &
          nchar(temp_authors) > 1 &
          nchar(temp_authors) < 100 &
          !grepl("^[0-9]+$", temp_authors)
      ]
      
      if (length(temp_authors) > 0) {
        authors <- temp_authors
        break
      }
    }
  }
  
  if (length(authors) == 0) {
    author_sections <- article_page %>%
      html_nodes("div[class*='author'], section[class*='author'], .byline") %>%
      html_text(trim = TRUE)
    
    if (length(author_sections) > 0) {
      valid_sections <- author_sections[nchar(author_sections) > 5 & nchar(author_sections) < 200]
      if (length(valid_sections) > 0) {
        authors <- strsplit(valid_sections[1], "[,;]")[[1]]
        authors <- trimws(authors)
      }
    }
  }
  
  if (length(authors) == 0) {
    return("无作者信息")
  }
  
  authors <- unique(authors[authors != ""])
  return(paste(authors, collapse = "; "))
}

# UI界面
ui <- dashboardPage(
  dashboardHeader(title = "ScienceDirect 多期刊爬虫"),
  
  dashboardSidebar(
    sidebarMenu(
      menuItem("爬虫设置", tabName = "settings", icon = icon("cog")),
      menuItem("爬虫控制", tabName = "scraper", icon = icon("spider")),
      menuItem("结果查看", tabName = "results", icon = icon("table"))
    )
  ),
  
  dashboardBody(
    tags$head(
      tags$style(HTML("
        .content-wrapper, .right-side {
          background-color: #f4f4f4;
        }
        .progress-text {
          font-size: 14px;
          margin-top: 10px;
        }
        .status-box {
          padding: 15px;
          margin: 10px 0;
          border-radius: 5px;
          background-color: #fff;
          box-shadow: 0 1px 3px rgba(0,0,0,0.12);
        }
        .control-buttons {
          text-align: center;
          margin: 20px 0;
        }
        .control-buttons .btn {
          margin: 0 10px;
          font-size: 16px;
          padding: 10px 25px;
        }
        .setting-group {
          padding: 15px;
          margin: 10px 0;
          border-radius: 5px;
          background-color: #fff;
          box-shadow: 0 1px 3px rgba(0,0,0,0.12);
        }
      "))
    ),
    
    tabItems(
      # 设置页面
      tabItem(tabName = "settings",
              fluidRow(
                box(
                  title = "爬虫参数设置", status = "info", solidHeader = TRUE, width = 12,
                  
                  div(class = "setting-group",
                      h4("期刊设置:", style = "margin-top: 0;"),
                      textInput("journal_name", 
                                label = "输入期刊名称 (如: cognition, acta-psychologica):",
                                value = "cognition",
                                placeholder = "请输入期刊英文名称",
                                width = "100%"),
                      p("期刊名称格式说明：使用ScienceDirect网址中的期刊名，如 'cognition', 'acta-psychologica' 等",
                        style = "color: #666; font-size: 12px; margin-top: 5px;"),
                      p("完整网址格式：https://www.sciencedirect.com/journal/[期刊名]",
                        style = "color: #666; font-size: 12px;")
                  ),
                  
                  div(class = "setting-group",
                      h4("Volume设置:", style = "margin-top: 0;"),
                      numericInput("volumes_count", 
                                   label = "要爬取的Volume数量:",
                                   value = 3,
                                   min = 1,
                                   max = 10,
                                   step = 1,
                                   width = "200px"),
                      p("注意：系统会自动跳过标记为'in progress'的Volume，从第一个完整Volume开始爬取。",
                        style = "color: #666; font-size: 12px; margin-top: 10px;")
                  ),
                  
                  div(class = "setting-group",
                      h4("延时设置:", style = "margin-top: 0;"),
                      sliderInput("delay_range", 
                                  label = "请求间隔时间范围（秒）:",
                                  min = 1,
                                  max = 10,
                                  value = c(2, 4),
                                  step = 0.5,
                                  width = "300px"),
                      p("建议设置适当的延时以避免被网站限制访问。",
                        style = "color: #666; font-size: 12px; margin-top: 10px;")
                  ),
                  
                  div(class = "control-buttons",
                      actionBttn("test_journal", "测试期刊连接", 
                                 style = "gradient", color = "warning", size = "md"),
                      actionBttn("save_settings", "保存设置", 
                                 style = "gradient", color = "success", size = "md")
                  ),
                  
                  div(class = "status-box",
                      h4("测试结果:", style = "margin-top: 0;"),
                      verbatimTextOutput("test_results", placeholder = TRUE)
                  )
                )
              )
      ),
      
      # 爬虫控制页面
      tabItem(tabName = "scraper",
              fluidRow(
                box(
                  title = "爬虫控制台", status = "primary", solidHeader = TRUE, width = 12,
                  
                  div(class = "control-buttons",
                      actionBttn("start_scraping", "开始爬取", 
                                 style = "gradient", color = "default", size = "lg"),
                      actionBttn("stop_scraping", "停止爬取", 
                                 style = "gradient", color = "danger", size = "lg"),
                      downloadButton("download_excel", "导出Excel", 
                                     class = "btn btn-info btn-lg")
                  ),
                  
                  hr(),
                  
                  div(class = "status-box",
                      h4("当前状态:", style = "margin-top: 0;"),
                      verbatimTextOutput("current_status", placeholder = TRUE)
                  ),
                  
                  div(class = "status-box",
                      h4("整体进度:", style = "margin-top: 0;"),
                      progressBar(id = "overall_progress", value = 0, status = "primary", 
                                  display_pct = TRUE, striped = TRUE),
                      div(class = "progress-text", textOutput("progress_text"))
                  ),
                  
                  div(class = "status-box",
                      h4("Volume进度:", style = "margin-top: 0;"),
                      progressBar(id = "volume_progress", value = 0, status = "success", 
                                  display_pct = TRUE, striped = TRUE),
                      div(class = "progress-text", textOutput("volume_progress_text"))
                  ),
                  
                  div(class = "status-box",
                      h4("实时日志:", style = "margin-top: 0;"),
                      verbatimTextOutput("log_output", placeholder = TRUE)
                  )
                )
              )
      ),
      
      # 结果查看页面
      tabItem(tabName = "results",
              fluidRow(
                box(
                  title = "爬取结果", status = "success", solidHeader = TRUE, width = 12,
                  
                  fluidRow(
                    column(3,
                           valueBoxOutput("total_articles")
                    ),
                    column(3,
                           valueBoxOutput("articles_with_keywords")
                    ),
                    column(3,
                           valueBoxOutput("articles_with_authors")
                    ),
                    column(3,
                           valueBoxOutput("success_rate")
                    )
                  ),
                  
                  hr(),
                  
                  DT::dataTableOutput("results_table")
                )
              )
      )
    )
  )
)

# Server逻辑
server <- function(input, output, session) {
  # 响应式数值
  values <- reactiveValues(
    scraping = FALSE,
    results = data.frame(),
    current_article = 0,
    total_articles = 0,
    current_volume = 0,
    total_volumes = 0,
    log_messages = character(),
    status_message = "就绪",
    settings_saved = FALSE,
    test_message = ""
  )
  
  # 添加日志消息
  add_log <- function(message) {
    timestamp <- format(Sys.time(), "%H:%M:%S")
    new_message <- paste0("[", timestamp, "] ", message)
    values$log_messages <- c(values$log_messages, new_message)
    # 只保留最近30条消息
    if (length(values$log_messages) > 30) {
      values$log_messages <- tail(values$log_messages, 30)
    }
  }
  
  # 测试期刊连接
  observeEvent(input$test_journal, {
    journal_name <- trimws(input$journal_name)
    
    if (journal_name == "") {
      values$test_message <- "请输入期刊名称"
      return()
    }
    
    values$test_message <- "正在测试期刊连接和获取Volume信息..."
    
    tryCatch({
      # 获取期刊Volume信息
      volume_info_result <- get_journal_volumes_info(journal_name)
      
      if (!volume_info_result$success) {
        values$test_message <- paste("测试失败:", volume_info_result$error)
        return()
      }
      
      volume_info <- volume_info_result$volumes
      complete_volumes <- sum(sapply(volume_info, function(x) x$is_complete))
      
      # 构建测试结果
      test_result_lines <- c(
        paste("✓ 期刊连接成功:", journal_name),
        paste("✓ 期刊网址:", paste0("https://www.sciencedirect.com/journal/", journal_name)),
        paste("✓ 找到Volume总数:", length(volume_info)),
        paste("✓ 完整Volume数:", complete_volumes),
        paste("✓ In Progress Volume数:", length(volume_info) - complete_volumes),
        "",
        "可用的Volume列表 (最新10个):"
      )
      
      # 显示前10个volume信息
      display_volumes <- head(volume_info, 10)
      for (i in seq_along(display_volumes)) {
        vol <- display_volumes[[i]]
        status <- ifelse(vol$is_complete, "✓ 完整", "⚠ In Progress")
        test_result_lines <- c(test_result_lines, 
                               paste("  Vol", vol$number, "-", vol$text, paste0("(", status, ")")))
      }
      
      if (length(volume_info) > 10) {
        test_result_lines <- c(test_result_lines, paste("  ... 还有", length(volume_info) - 10, "个Volume"))
      }
      
      # 显示将要爬取的Volume
      volumes_to_crawl <- input$volumes_count
      complete_vols <- volume_info[sapply(volume_info, function(x) x$is_complete)]
      
      if (length(complete_vols) >= volumes_to_crawl) {
        test_result_lines <- c(test_result_lines, "",
                               paste("将爬取前", volumes_to_crawl, "个完整Volume:"))
        selected_vols <- head(complete_vols, volumes_to_crawl)
        for (i in seq_along(selected_vols)) {
          vol <- selected_vols[[i]]
          test_result_lines <- c(test_result_lines, 
                                 paste("  ", i, ". Vol", vol$number, "-", vol$text))
        }
      } else {
        test_result_lines <- c(test_result_lines, "",
                               paste("⚠ 警告: 完整Volume数(", length(complete_vols), ")少于设置的爬取数量(", volumes_to_crawl, ")"))
      }
      
      values$test_message <- paste(test_result_lines, collapse = "\n")
      
    }, error = function(e) {
      values$test_message <- paste("测试失败:", conditionMessage(e))
    })
  })
  
  # 保存设置
  observeEvent(input$save_settings, {
    values$settings_saved <- TRUE
    add_log("设置已保存")
    showNotification("设置已保存！", type = "message")
  })
  
  # 开始爬取按钮
  observeEvent(input$start_scraping, {
    if (!values$scraping) {
      if (!values$settings_saved) {
        showNotification("请先在设置页面保存配置！", type = "warning")
        return()
      }
      
      values$scraping <- TRUE
      values$results <- data.frame()
      values$current_article <- 0
      values$total_articles <- 0
      values$current_volume <- 0
      values$total_volumes <- 0
      
      journal_key <- trimws(input$journal_name)
      journal_name <- journal_key
      volumes_count <- input$volumes_count
      delay_min <- input$delay_range[1]
      delay_max <- input$delay_range[2]
      
      add_log(paste("开始爬取期刊:", journal_key))
      values$status_message <- "正在获取Volume列表..."
      
      # 后台处理逻辑
      observe({
        if (values$scraping) {
          invalidateLater(100, session)
          
          tryCatch({
            # 获取volume列表
            volume_info <- get_journal_volumes(journal_name, max_volumes = volumes_count)
            volume_urls <- volume_info$volumes
            values$total_volumes <- length(volume_urls)
            
            add_log(paste("找到", values$total_volumes, "个Volume待爬取"))
            add_log(paste("跳过了", volume_info$total_found - volume_info$complete_found, "个in progress的Volume"))
            
            # 初始化总结果
            all_results <- data.frame(
              title = character(),
              keywords = character(),
              authors = character(),
              url = character(),
              volume = character(),
              stringsAsFactors = FALSE
            )
            
            # 遍历每个volume
            for (vol_idx in seq_along(volume_urls)) {
              if (!values$scraping) break
              
              values$current_volume <- vol_idx
              issue_url <- volume_urls[vol_idx]
              volume_desc <- volume_info$descriptions[vol_idx]
              
              values$status_message <- paste("正在处理Volume", vol_idx, "/", values$total_volumes, ":", volume_desc)
              add_log(paste("开始处理Volume:", volume_desc))
              
              # 更新volume进度
              updateProgressBar(session, "volume_progress", 
                                value = round((vol_idx - 1) / values$total_volumes * 100))
              
              # 获取volume页面
              headers <- create_session()
              response <- GET(issue_url, headers)
              
              if (status_code(response) != 200) {
                add_log(paste("无法访问Volume页面，跳过:", conditionMessage(response)))
                next
              }
              
              issue_page <- suppressWarnings(content(response, as = "parsed", encoding = "UTF-8"))
              
              # 提取文章链接
              article_links <- issue_page %>%
                html_nodes("a[href*='/science/article/pii/']") %>%
                html_attr("href")
              
              article_links <- unique(article_links[!is.na(article_links)])
              
              if (length(article_links) == 0) {
                add_log(paste("Volume中未找到文章链接:", volume_desc))
                next
              }
              
              # 拼接完整链接
              full_links <- ifelse(grepl("^https?://", article_links),
                                   article_links,
                                   paste0("https://www.sciencedirect.com", article_links))
              full_links <- unique(full_links)
              
              volume_article_count <- length(full_links)
              values$total_articles <- values$total_articles + volume_article_count
              add_log(paste("Volume", volume_desc, "中找到", volume_article_count, "篇文章"))
              
              # 处理volume中的每篇文章
              for (i in seq_along(full_links)) {
                if (!values$scraping) break
                
                values$current_article <- values$current_article + 1
                values$status_message <- paste(
                  "Volume", vol_idx, "/", values$total_volumes, 
                  "- 文章", i, "/", volume_article_count,
                  "- 总进度", values$current_article, "/", values$total_articles
                )
                
                link <- full_links[i]
                
                # 随机延时
                Sys.sleep(runif(1, delay_min, delay_max))
                
                tryCatch({
                  # 获取文章页面
                  response <- GET(link, headers)
                  if (status_code(response) != 200) {
                    add_log(paste("无法访问文章页面，跳过"))
                    next
                  }
                  
                  article_page <- suppressWarnings(content(response, as = "parsed", encoding = "UTF-8"))
                  
                  # 提取标题
                  title_candidates <- c(
                    article_page %>% html_node("h1.title") %>% html_text(trim = TRUE),
                    article_page %>% html_node("h1") %>% html_text(trim = TRUE),
                    article_page %>% html_node(".article-title") %>% html_text(trim = TRUE),
                    article_page %>% html_node("title") %>% html_text(trim = TRUE)
                  )
                  
                  title <- NA
                  for (candidate in title_candidates) {
                    cleaned_title <- clean_title(candidate)
                    if (!is.na(cleaned_title)) {
                      title <- cleaned_title
                      break
                    }
                  }
                  
                  if (is.na(title)) {
                    add_log("跳过无效标题的文章")
                    next
                  }
                  
                  # 提取关键词
                  keywords <- article_page %>%
                    html_nodes("div.keyword span, .keywords span, [class*='keyword'] span") %>%
                    html_text(trim = TRUE)
                  
                  keyword_str <- paste(unique(keywords[keywords != ""]), collapse = "; ")
                  if (keyword_str == "") {
                    keyword_str <- "无关键词"
                  }
                  
                  # 提取作者
                  authors_str <- extract_authors(article_page)
                  
                  # 添加结果
                  new_row <- data.frame(
                    title = title,
                    keywords = keyword_str,
                    authors = authors_str,
                    url = link,
                    volume = volume_desc,
                    stringsAsFactors = FALSE
                  )
                  all_results <- rbind(all_results, new_row)
                  values$results <- all_results
                  
                  # 更新整体进度
                  overall_progress <- round(values$current_article / values$total_articles * 100)
                  updateProgressBar(session, "overall_progress", value = overall_progress)
                  
                  add_log(paste("成功处理:", substr(title, 1, 40), "..."))
                  
                }, error = function(e) {
                  add_log(paste("处理文章时出错:", conditionMessage(e)))
                })
              }
              
              # 更新volume进度
              updateProgressBar(session, "volume_progress", 
                                value = round(vol_idx / values$total_volumes * 100))
              add_log(paste("完成Volume:", volume_desc))
            }
            
            values$status_message <- "爬取完成!"
            add_log(paste("所有Volume爬取完成! 共获取", nrow(all_results), "条记录"))
            values$scraping <- FALSE
            
          }, error = function(e) {
            add_log(paste("爬取过程中出错:", conditionMessage(e)))
            values$status_message <- paste("错误:", conditionMessage(e))
            values$scraping <- FALSE
          })
        }
      })
    }
  })
  
  # 停止爬取按钮
  observeEvent(input$stop_scraping, {
    values$scraping <- FALSE
    values$status_message <- "已停止爬取"
    add_log("用户停止了爬取")
  })
  
  # 输出测试结果
  output$test_results <- renderText({
    values$test_message
  })
  
  # 输出当前状态
  output$current_status <- renderText({
    values$status_message
  })
  
  # 输出进度文本
  output$progress_text <- renderText({
    if (values$total_articles > 0) {
      paste("文章进度:", values$current_article, "/", values$total_articles)
    } else {
      "等待开始..."
    }
  })
  
  # 输出volume进度文本
  output$volume_progress_text <- renderText({
    if (values$total_volumes > 0) {
      paste("Volume进度:", values$current_volume, "/", values$total_volumes)
    } else {
      "等待开始..."
    }
  })
  
  # 输出日志
  output$log_output <- renderText({
    paste(rev(values$log_messages), collapse = "\n")
  })
  
  # 结果统计框
  output$total_articles <- renderValueBox({
    valueBox(
      value = nrow(values$results),
      subtitle = "总文章数",
      color = "blue"
    )
  })
  
  output$articles_with_keywords <- renderValueBox({
    count <- sum(values$results$keywords != "无关键词", na.rm = TRUE)
    valueBox(
      value = count,
      subtitle = "有关键词",
      color = "green"
    )
  })
  
  output$articles_with_authors <- renderValueBox({
    count <- sum(values$results$authors != "无作者信息", na.rm = TRUE)
    valueBox(
      value = count,
      subtitle = "有作者信息",
      color = "yellow"
    )
  })
  
  output$success_rate <- renderValueBox({
    if (nrow(values$results) > 0 && values$total_articles > 0) {
      rate <- round(nrow(values$results) / values$total_articles * 100, 1)
    } else {
      rate <- 0
    }
    valueBox(
      value = paste0(rate, "%"),
      subtitle = "成功率",
      color = "purple"
    )
  })
  
  # 结果表格
  output$results_table <- DT::renderDataTable({
    DT::datatable(
      values$results,
      options = list(
        pageLength = 10,
        scrollX = TRUE,
        autoWidth = TRUE,
        columnDefs = list(
          list(width = '200px', targets = 0),  # 标题列
          list(width = '150px', targets = 1),  # 关键词列
          list(width = '120px', targets = 2),  # 作者列
          list(width = '100px', targets = 3),  # URL列
          list(width = '120px', targets = 4)   # Volume列
        )
      ),
      rownames = FALSE,
      colnames = c("标题", "关键词", "作者", "URL", "Volume")
    )
  })
  
  # Excel下载
  output$download_excel <- downloadHandler(
    filename = function() {
      journal_name <- trimws(input$journal_name)
      paste0("sciencedirect_", journal_name, "_results_", Sys.Date(), ".xlsx")
    },
    content = function(file) {
      if (nrow(values$results) == 0) {
        showNotification("没有数据可导出", type = "warning")
        return()
      }
      
      tryCatch({
        # 创建工作簿
        wb <- createWorkbook()
        addWorksheet(wb, "文献数据")
        
        # 写入数据
        writeData(wb, "文献数据", values$results)
        
        # 设置列宽
        setColWidths(wb, "文献数据", cols = 1:5, widths = c(50, 30, 25, 15, 20))
        
        # 保存文件
        saveWorkbook(wb, file, overwrite = TRUE)
        
        # 使用add_log代替showNotification
        add_log("Excel文件导出成功!")
        
      }, error = function(e) {
        add_log(paste("导出Excel时出错:", conditionMessage(e)))
      })
    }
  )
}

# 运行应用
shinyApp(ui = ui, server = server)