library(easyPubMed)
library(tidyverse)

# fetch pmid and abstract-entry ------------
my_query <- 'SLE skin'
my_entrez_id <- get_pubmed_ids(my_query)
my_abstracts_txt <- fetch_pubmed_data(my_entrez_id, format = "abstract")

head(my_abstracts_txt)

# retrieve xml-like character obj by default
# a very long length-1 character
my_abstracts_xml <- my_entrez_id |> fetch_pubmed_data()
# medline: more formated, like bib
# uilist: only pmid

# access fields from xml-like obj with grep
# will take 48s for example data
system.time(my_titles <- custom_grep(my_abstracts_xml, "ArticleTitle", "char"))

# use gsub to remove the tag, also trim long titles
TTM <- nchar(my_titles) > 75
my_titles[TTM] <- paste(substr(my_titles[TTM], 1, 70), "...", sep = "")

# Print as a data.frame (use kable)
head(my_titles)

# convert xml char to list --------
my_PM_list <- articles_to_list(my_abstracts_xml)

my_PM_list |> glimpse()

my_PM_list[1] |>
  custom_grep('PubDate', format = 'char')

# convert single entry to df -------------
# each author occupy a row by default
# set getAuthors = FALSE to fill author with NA, return only one row & speed up func
my_PM_list[1] |>
  article_to_df(max_chars = 50, getAuthors = FALSE) |>
  DT::datatable()

# convert 492 entries take 2m
map_df <- my_PM_list |>
  map(article_to_df, .progress = TRUE) |>
  list_rbind()

# lapply wrapper to df ----------
# can keep first/last author
my_PM_author_df <- my_abstracts_xml |>
  table_articles_byAuth(included_authors = 'first')

# save batch_size of entry in xml/txt on disk ---------
out.A <- my_query |>
  batch_pubmed_download(format = "xml", 
                        batch_size = 20,
                        dest_file_prefix = "easyPM_example")
