# This script checks wikipedia to try and collate information about each of the
# species in the species table. Example table:
# Species,             Common_name,        raster
# Acanthiza katherina, Mountain Thornbill, MTHORN
# setwd("C:\\Users\\elawrey\\Documents\\2013\\GIS\\WT_MTSRF-2-5ii-4_JCU_Williams-S_Vertebrate-atlas-2009\\e-atlas\\code")
# source("species-wikipedia-crawler.R")


# Column name in the species table for the species name and the common name



wikipediaSpeciesCrawler <- function(
	speciesTable=read.csv('vertebrate.species.csv', stringsAsFactors =FALSE),
	speciesColumn="Species",
	commonNameColumn="Common_name",
	idColumn="raster",
	wikiPagesInfoCsv='wikiPagesInfo.csv',
	thumbImagesDir='images'
) {
	wikipediaBaseUrl <-"http://en.wikipedia.org/wiki/"
	wikipediaDomain <-"http://en.wikipedia.org"
	#fullImageBaseUrl <- "http://upload.wikimedia.org/wikipedia/commons/f/fc/"
	#fullImagesDir <- 'fullImages'


	if (file.exists(wikiPagesInfoCsv)) {
		# Load any existing processing that has been done
		wikiPagesInfo <- read.csv(wikiPagesInfoCsv, stringsAsFactors =FALSE)
	} else {
		# Create an exmpty table. One row per species
		wikiPagesInfo <- speciesTable[,c(idColumn,speciesColumn,commonNameColumn)]
		wikiPagesInfo$wikiTaxa <- ""
		wikiPagesInfo$infoPageUrl <- ""
		wikiPagesInfo$textSummary <- ""
		wikiPagesInfo$imagePageUrl <- ""
		wikiPagesInfo$imageName <- ""
		wikiPagesInfo$thumbImageOrigUrl <- ""
		wikiPagesInfo$thumbImageFilePath <- ""
		#wikiPagesInfo$fullImageOrigUrl <- ""
		#wikiPagesInfo$fullImageFilePath <- ""
		wikiPagesInfo$shortLicense <- ""
		wikiPagesInfo$licenseUrl <- ""
		wikiPagesInfo$author <- ""
		wikiPagesInfo$searchedWikipedia <- FALSE
	}

	library("RCurl")

	# Returns a data frame from the informationi extracted from the wiki URL. If
	# the URL does not correspond to an existing Wiki page the the data frame is
	# empty.
	tryPotentialWikiUrl <- function (wikiUrl) {
		infoPageUrl <- ""
		wikiTaxa <- ""
		textSummary <- ""
		imagePageUrl <- ""
		imageName <- ""
		thumbImageOrigUrl <- ""
		thumbImageFilePath <- ""
		shortLicense <- ""
		licenseUrl <- ""
		author <- ""
		
		#fullImageOrigUrl <- ""
		#fullImageFilePath <- ""

		h <- basicTextGatherer()
		result <- c()
		res <- getURL(wikiUrl,
			  customrequest = "GET",
			  .opts = list(headerfunction = h$update)
			  )
		header = parseHTTPHeader(h$value())
		if (header["status"] == 404) {
			print(paste("No wiki page for", wikiUrl))
			return(result)
		}
		if (header["status"] == 200) {
			infoPageUrl <- wikiUrl
			print(paste("InfoPageUrl: ", infoPageUrl, sep=""))
			# wikiTaxa
			# --------
			# Test if there is a Class field. This should come from the 
			# Scientific classification panel on the page
			index <- regexpr("<td>Class\\:</td>",res)
			
			if (index != -1) {
				# Take the next chunk of text and try the classifications
				# that we are expecting. This is simplier than trying to
				# parse the XML.
				# Example chunk
				#<td>Class:</td>
				#<td><span class="class" style="white-space:nowrap;"><a href="/wiki/Mammalia" title="Mammalia" class="mw-redirect">Mammalia</a></span></td>
				#</tr>
				chunk <- substr(res, index, index+180)
				categories <- c("Bird", "Reptile", "Mammal", "Amphibian")
				categorySearch <- c('Aves', 'Reptilia', 'Mammalia', 'Amphibia')
				for (j in 1:length(categorySearch)) {
					# If found a match don't keep looking
					if(any(grep(categorySearch[j],chunk))) {
						wikiTaxa = categories[j]
						break
					}
				}
			}
			
			#-----------
			# Summary paragraph
			startInfoBoxindex <- regexpr('table class="infobox biota"',res)
			if (startInfoBoxindex != -1) {
				infoboxToEnd <- substr(res, startInfoBoxindex, nchar(res))
				# Search for the end of the table. The summary paragraph should be the next
				# paragraph.
				endOfInfoBoxIndex <- regexpr('</table>',infoboxToEnd)
				
				if (endOfInfoBoxIndex != -1) {
					endOfInfoboxToEnd <- substr(infoboxToEnd, endOfInfoBoxIndex, nchar(infoboxToEnd))
					startParaIndex <- regexpr('<p>',endOfInfoboxToEnd)
					endParaIndex <- regexpr('</p>',endOfInfoboxToEnd)
					
					firstParaHtml <- substr(endOfInfoboxToEnd, startParaIndex+3, endParaIndex-1)
					# Remove all the HTML tags in the paragraph.
					# ------- textSummary ----------
					# Remove html tags.
					textSummary <- gsub("<[^>]+>","",firstParaHtml)
					# Remove references such as [1]
					textSummary <- gsub("\\[[0-9+]\\]", "", textSummary)
					
					# Remove non breaking spaces &#160;
					textSummary <- gsub("&#160;", " ", textSummary)
					
					print(paste("Found summary text: ", substr(textSummary,1,70), " ..."))
					
					# Find the link to the image in the infobox
					infoBox <- substr(infoboxToEnd, 1, endOfInfoBoxIndex)
					print(paste("infoBox: ", substr(infoBox,1,100), " ..."))
					
					# The animal photo should come prior to the "Scientific classification" section
					# Use this to trim the possible search space for the image to reduce false triggers.
					srcIndex <- regexpr('Scientific classification',infoBox)
					if (srcIndex != -1) {
						imageSectionInfoBox <- substr(infoBox, 1, srcIndex)
						print(imageSectionInfoBox)
						imageAHrefIndex <- regexpr('<a href[^>]+class="image">',imageSectionInfoBox)
						
						if (imageAHrefIndex != -1) {
							# Assume structure is as follows: <a href="/wiki/File:Nz_boobook.JPG" class="image">
							imageHrefRel <- substr(infoboxToEnd,imageAHrefIndex+9,imageAHrefIndex+attr(imageAHrefIndex,"match.length")-17)
							
							if (nchar(imageHrefRel) > 250) {
								stop(paste("imageHrefRel too long: ",imageHrefRel))
							}
							# ------- imagePageUrl ----------
							imagePageUrl <- paste(wikipediaDomain,imageHrefRel, sep="")
							print(paste("Found image page:", imagePageUrl))
							
							
							imgToEnd <- substr(infoBox, imageAHrefIndex+attr(imageAHrefIndex,"match.length"), nchar(infoBox))
							 # <img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/4/4a/Nz_boobook.JPG/220px-Nz_boobook.JPG" 
							 # width="220" height="251" 
							 # srcset="//upload.wikimedia.org/wikipedia/commons/thumb/4/4a/Nz_boobook.JPG/330px-Nz_boobook.JPG 1.5x, 
							 # //upload.wikimedia.org/wikipedia/commons/thumb/4/4a/Nz_boobook.JPG/440px-Nz_boobook.JPG 2x" /></a></td>...
							 srcIndex <- regexpr('src="[^"]+',imgToEnd)
							 if (srcIndex != -1) {
								imageBaseUrl <- substr(imgToEnd, srcIndex+5, srcIndex+attr(srcIndex,"match.length")-1)
								# Url might be relative ("/something") or absolute with protocol("http://something") or absolute
								# with no protocol ("//something")
								# ------- thumbImageOrigUrl ----------
								if (substr(imageBaseUrl,1,2) == "//") {
									thumbImageOrigUrl <- paste("http:",imageBaseUrl, sep="")
								} else if (substr(imageBaseUrl,1,2) == "http") {
									thumbImageOrigUrl <- imageBaseUrl
								} else {
									thumbImageOrigUrl <- paste(wikipediaDomain,imageBaseUrl, sep="")
								}
								
								print(paste("Found thumbnail:",thumbImageOrigUrl))
								imageName <- basename(thumbImageOrigUrl)
								# download the thumbnail so we don't need to hotlink with wikipedia
								# ------- thumbImageFilePath ----------
								thumbImageFilePath <- paste(thumbImagesDir,"/",basename(thumbImageOrigUrl), sep="")
								if (!file.exists(thumbImagesDir)) {
									dir.create(thumbImagesDir, recursive=TRUE)
								}
								
								
								# Use binary download
								if (!file.exists(thumbImageFilePath)) {
									print(paste("Downloading thumbnail image: ",thumbImageOrigUrl, sep=""))
									download.file(thumbImageOrigUrl, destfile=thumbImageFilePath,  mode = "wb")
								} else {
									print("Skipping downloading thumb as it is already downloaded")
								}
								
								# # Full image download
								# # Assume the following structure:
								# # http://upload.wikimedia.org/wikipedia/commons/f/fc/Alectura_lathami_-_Centenary_Lakes.jpg
								# # The thumbnail images have the size prepended to the filename 220px-Alectura_lathami_-_Centenary_Lakes.jpg
								# # The imagePageUrl has the image name prepended by "File:"
								# b <- basename(imagePageUrl)
								# imageName <- substr(b, 6, nchar(b))
								# # ------- fullImageFilePath ----------
								# fullImageFilePath <- paste(fullImagesDir,"/",imageName, sep="")
								# print(imagePageUrl)
								# if (!file.exists(fullImagesDir)) {
									# dir.create(fullImagesDir, recursive=TRUE)
								# }
								
								# fullImageOrigUrl <- paste(fullImageBaseUrl,imageName, sep="")
								# download.file(fullImageOrigUrl, destfile=fullImageFilePath,  mode = "wb")
							} else {
								print("No photo found in biota info table")
							}
						} else {
							print("No link to image page found in the infobox")
						}
					} else {
						print(paste("No Scientific classification found"))
					}
					
				} else {
					print("End of biota info table not found")
				}
			} else {
				print("No biota info table found")
			}
			
			# ------- Image Page ------------
			# Download the image page to get attribution information
			if (imagePageUrl != "") {
				print(paste("Downloading image page:", imagePageUrl))
				res <- getURL(imagePageUrl,
				  customrequest = "GET",
				  .opts = list(headerfunction = h$update)
				  )
				header = parseHTTPHeader(h$value())
				if (header["status"] == 404) {
					print(paste("No image page page for", imagePageUrl))
				} else {
					if (header["status"] == 200) {
						# ------ Search for Author -------
						# ...<tr style="vertical-align: top">
						# <td id="fileinfotpl_aut" class="fileinfo-paramfield">Author</td>
						# <td><a rel="nofollow" class="external text" href="http://www.flickr.com/people/58526113@N00">Tony Brown</a> from Perth WA, Australia</td>
						# </tr>...
						authorIndex <- regexpr('id="fileinfotpl_aut"',res)
						if (authorIndex != -1) {
							chunk <- substr(res, authorIndex, nchar(res))
							# Find the <td> after the Author column
							authorStartIndex <- regexpr('<td><a',chunk)
							chunk <- substr(chunk, authorStartIndex, nchar(chunk))
							authorEndIndex <- regexpr('</a>',chunk)
							authorColumn <- substr(chunk, 1, authorEndIndex-1)
							# <td><a rel="nofollow" class="external text" href="http://www.flickr.com/people/58526113@N00">Tony Brown</a> from Perth WA, Australia</td>
							#print(authorColumn)
							author <- gsub("<[^>]+>","",authorColumn)	# Remove all HTML
							print(paste("Author:",author))
						} else {
							print("No author found")
						}
						
						# ------ Search for the licensing -------
						# <span class="licensetpl_link" style="display:none;">http://creativecommons.org/licenses/by/2.0</span> 
						# <span class="licensetpl_short" style="display:none;">CC-BY-2.0</span> 
						# <span class="licensetpl_long" style="display:none;">Creative Commons Attribution 2.0</span> 
						# <span class="licensetpl_link_req" style="display:none;">true</span>
						# <span class="licensetpl_attr_req" style="display:none;">true</span>
						index <- regexpr('<span class="licensetpl_short"[^>]+>',res)
						if (index != -1) {
							startIndex <- index+attr(index,"match.length")
							licenseFrag <- substr(res, startIndex,startIndex+100)
							#print(licenseFrag)
							endIndex <- regexpr('</span>',licenseFrag)
							shortLicense <- substr(licenseFrag, 1, endIndex-1)
							print(paste("Short License:",shortLicense))
						} else {
							print("No short license found")
						}
						
						index <- regexpr('<span class="licensetpl_link"[^>]+>',res)
						if (index != -1) {
							startIndex <- index+attr(index,"match.length")
							licenseFrag <- substr(res, startIndex,startIndex+200)
							endIndex <- regexpr('</span>',licenseFrag)
							licenseUrl <- substr(licenseFrag, 1, endIndex-1)
							print(paste("License URL:",licenseUrl))
						} else {
							print("No license URL")
						}
						
					} else {
						print(paste("Status code:", header))
					}
				}
			} else {
				print("No image page, so no author or license")
			}
			result <- data.frame(
				infoPageUrl=infoPageUrl, 
				wikiTaxa=wikiTaxa, 
				textSummary=textSummary, 
				imageName=imageName,
				imagePageUrl=imagePageUrl,
				thumbImageOrigUrl=thumbImageOrigUrl,
				thumbImageFilePath=thumbImageFilePath,
				author=author,
				shortLicense=shortLicense,
				licenseUrl=licenseUrl,
				stringsAsFactors=FALSE)
				#fullImageOrigUrl=fullImageOrigUrl,
				#fullImageFilePath=fullImageFilePath,
				
			return(result)
		} else {
			stop(paste("Unexpected error code: ",  
					header["status"], " message: ", header["statusMessage"]),wikiUrl)
		}
	}

	for (i in 1:nrow(wikiPagesInfo)) {
		print(paste("----- ",i, " of ", nrow(wikiPagesInfo), ": ", wikiPagesInfo[i, speciesColumn], " -----",sep=""))

		
		
		if (wikiPagesInfo$searchedWikipedia[i]) {
			print(paste("Skipping", wikiPagesInfo[i, speciesColumn],"as already been processed"))
			next
		}
		# Try based on the species name
		potentialSpeciesWikiUrl <- paste(wikipediaBaseUrl, gsub(" ", "_", speciesTable[i, speciesColumn]), sep="")
		result <- tryPotentialWikiUrl(potentialSpeciesWikiUrl)
		if (!is.null(result)) {
			print(paste("Found page for species: ",speciesTable[i, speciesColumn]))
			#print(result)
		} 
		
		# Try based on the Common name
		if (is.null(result) && speciesTable[i, commonNameColumn] != "") {
			potentialSpeciesWikiUrl <- paste(wikipediaBaseUrl, gsub(" ", "_", speciesTable[i, commonNameColumn]), sep="")
			result <- tryPotentialWikiUrl(potentialSpeciesWikiUrl)
			if (!is.null(result)) {
				print(paste("Found page for Common_name: ",speciesTable[i, commonNameColumn]))
				#print(result)
			}
		}
		
		if (!is.null(result)) {
			wikiPagesInfo$wikiTaxa[i] <- result$wikiTaxa
			wikiPagesInfo$infoPageUrl[i] <- result$infoPageUrl
			wikiPagesInfo$textSummary[i] <- result$textSummary
			wikiPagesInfo$imagePageUrl[i] <- result$imagePageUrl
			wikiPagesInfo$imageName[i] <- result$imageName
			wikiPagesInfo$thumbImageOrigUrl[i] <- result$thumbImageOrigUrl
			wikiPagesInfo$thumbImageFilePath[i] <- result$thumbImageFilePath
			wikiPagesInfo$author[i] <- result$author
			wikiPagesInfo$shortLicense[i] <- result$shortLicense
			wikiPagesInfo$licenseUrl[i] <- result$licenseUrl
			# wikiPagesInfo$fullImageOrigUrl[i] <- result$fullImageOrigUrl
			# wikiPagesInfo$fullImageFilePath[i] <- result$fullImageFilePath
			wikiPagesInfo$searchedWikipedia[i] <- TRUE
		} else {
			wikiPagesInfo$wikiTaxa[i] <- ""
			wikiPagesInfo$infoPageUrl[i] <- ""
			wikiPagesInfo$textSummary[i] <- ""
			wikiPagesInfo$imagePageUrl[i] <- ""
			wikiPagesInfo$imageName[i] <- ""
			wikiPagesInfo$thumbImageOrigUrl[i] <- ""
			wikiPagesInfo$thumbImageFilePath[i] <- ""
			wikiPagesInfo$author[i] <- ""
			wikiPagesInfo$shortLicense[i] <- ""
			wikiPagesInfo$licenseUrl[i] <- ""
			# wikiPagesInfo$fullImageOrigUrl[i] <- ""
			# wikiPagesInfo$fullImageFilePath[i] <- ""
			wikiPagesInfo$searchedWikipedia[i] <- TRUE
		}
		print("Saving new record")
		write.csv(wikiPagesInfo, wikiPagesInfoCsv, row.names=FALSE)

	}
}
