# File-Name: GScholarScraper_3.1.R
# Date: 2012-08-22
# Author: Kay Cichini
# Email: kay.cichini@gmail.com
# Purpose: Scrape Google Scholar search result
# Packages used: XML
# Licence: CC BY-SA-NC
#
# Arguments:
# (1) input:
# A search string as used in Google Scholar search dialog
#
# (2) write:
# Logical, should a table be writen to user default directory?
# if TRUE ("T") a CSV-file with hyperlinks to the publications will be created.
#
# Difference to version 3:
# (3) added "since" argument - define year since when publications should be returned..
# defaults to 1900..
#
# (4) added "citation" argument - logical, if "0" citations are included
# defaults to "1" and no citations will be included..
# added field "YEAR" to output 
#
# Caveat: if a submitted search string gives more than 1000 hits there seem
# to be some problems (I guess I'm being stopped by Google for roboting the site..)
#
# And, there is an issue with this error message:
# > Error in htmlParse(URL): 
# > error in creating parser for http://scholar.google.com/scholar?q
# I haven't figured out his one yet.. most likely also a Google blocking mechanism..
# Reconnecting / new IP-address helps..


GScholar_Scraper <- function(input, journal="", since = 1900, to = 2012, write = F, citation = 1) {
          
          require(XML)
          
          # putting together the search-URL:
          URL <- paste("http://scholar.google.com/scholar?q=", input, 
                       "&as_sdt=1,5&as_vis=", citation, 
                       "&as_publication=", journal, 
                       "&as_ylo=", since, 
                       "&as_yhi=", to,
                       sep = "")
          
          cat("\nThe URL used is: ", "\n----\n", URL)
          
          # get content and parse it:
          
          doc <- htmlParse(URL)
          
          # number of hits:
          
          h1 <- xpathSApply(doc, "//div[@id='gs_ab_md']", xmlValue)
          
          is.about <- length(grep(pattern="About", x=h1)) > 0
          if(is.about){
                    h2 <- strsplit(h1, " ")[[1]][2] 
          }else{
                    h2 <- strsplit(h1, " ")[[1]][1] 
          }
          
          num <- as.integer(sub("[[:punct:]]", "", h2))
          cat("\n\nNumber of hits: ", num, "\n----\n", "If this number is far from the returned results\nsomething might have gone wrong..\n\n", sep = "")
          
          # If there are no results, stop and throw an error message:
          if (num == 0 | is.na(num)) {
                    stop("\n\n...There is no result for the submitted search string!")
          }
          
          if(num > 1000){
                    num <- 1000
          }
          pages.max <- ceiling(num/100)
          
          # 'start' as used in URL:
          start <- 100 * 1:pages.max - 100
          
          # Collect URLs as list:
          URLs <- paste("http://scholar.google.com/scholar?start=", start, 
                        "&q=", input, 
                        "&num=100&as_sdt=1,5&as_vis=", citation, 
                        "&as_publication=", journal, 
                        "&as_ylo=", since, 
                        "&as_yhi=", to,
                        sep = "")
          x <- URLs[1]
          free(doc)
          scraper_internal <- function(x) {
                    
                    sleep.time <- 6
                    cat(sprintf("sleeping for %s seconds", sleep.time))
                    Sys.sleep(sleep.time)
                    
                    cat("CALL\n")
                    doc <- htmlParse(x, encoding="UTF-8")
                    
                    # titles:
                    tit <- xpathSApply(doc, "//h3[@class='gs_rt']", xmlValue)
                    
                    # publication:
                    pub <- xpathSApply(doc, "//div[@class='gs_a']", xmlValue)
                    
                    # citation:
                    cit <- xpathSApply(doc, "//div[@class='gs_fl']", xmlValue)
                    
                    # links:
                    lin <- xpathSApply(doc, "//h3[@class='gs_rt']/a", xmlAttrs)
                    
                    # citations:
                    lincit <- xpathSApply(doc, "//div[@class='gs_ri']/*", xmlAttrs)
                    
                    
#                     tit[1] <- "User profiles for author:lennart author:martensA reproducibility‐based evaluation procedure for quantifying the differences between MS/MS peak intensity normalization methods"
#                     tit <- list()
                    if(length(tit[1]) != 0){
                              profile.page <- length(grep(pattern="User profiles", x=tit[1])) > 0
                              if(profile.page){
                                        tit <- tit[-1]
                                        lin <- lin[-1]
                              }
                    }
                    
                    
                    
                    # map missing links
                    attrs <- unlist(lincit)
                    
                    rts <- which(attrs=="gs_rt")
                    fls <- which(attrs=="gs_fl")
                    
                    cit.default <- "Cited by 0"
                    
                    citcor <- list()
                    flsindex <- 1
                    for(i in 1:length(rts)){
                              
                              if(is.na(fls[flsindex])){
                                        citcor[i] <- cit.default
                                        next
                              }
                              
                              if(rts[i] <= fls[flsindex]){

                                        if(i == length(rts)){
                                                  citcor[i] <- cit[flsindex]
                                                  # end!
                                        }else if(fls[flsindex] < rts[i+1]){
                                                  # ok!
                                                  citcor[i] <- cit[flsindex]
                                                  flsindex <- flsindex + 1
                                        }else{
                                                  # Skip!! set default!
                                                  citcor[i] <- cit.default
                                        }
                              }else{
                                        # Skip!! set default!
                                        citcor[i] <- cit.default
                              }          
                    }
                    
                    cit <- unlist(citcor)
                    
                    # summaries are truncated, and thus wont be used..  
                    # abst <- xpathSApply(doc, '//div[@class='gs_rs']', xmlValue)
                    # ..to be extended for individual needs
                    
                    options(warn=(-1))
                    y <- as.integer(gsub(".*\\s(\\d{4})\\s.*", "\\1", pub))
                    
                    cit[which(nchar(cit) == 0)] <- cit.default 
                    
                    
#                     
#                     cit.ex <- "Cached   SFX-services@UGent All 2 versions suffix"
#                     cit.ex <- "Cached"
#                     cit.ex <- "Cited by 2"
#                     cit.ex <- "Cited by 5 Related articles"
#                     cit <- cit.ex 
#                     
                    cit <- paste(cit, "suffix", sep=" ")
#                     print(pub)
#                     print(cit)
                    
                    cit <- gsub("Cited by (\\d+).*", "\\1", cit)
                    cit <- as.integer(cit)
                    cit[is.na(cit)]<- 0
                    
                    dat <- data.frame(TITLES = tit, 
                                      PUBLICATION = pub, 
                                      YEAR = y,
                                      CIT = cit,
                                      LINKS = lin)
                    options(warn=0)
                    return(dat)
          }

          
          result <- c()
          for(URL in URLs){
                    res <- scraper_internal(URL)
                    free(doc)
                    result <- rbind(result, res)
          }
          
          if (write == T) {
                    result$LINKS <- paste("=Hyperlink(","\"", result$LINKS, "\"", ")", sep = "")
                    write.table(result, "GScholar_Output.CSV", sep = ";", 
                                row.names = F, quote = F)
                    shell.exec("GScholar_Output.CSV") 
          } else {
                    return(result)
          }
}

