class DataController < ApplicationController

  #active_scaffold :data_object

#run the following three actions to get data from the IOC, the harmful plankton project,
#and to see what taxa produce which toxins: do_get_IOC_reflist_data ,
# do_get_harmful_plankton_project_data and do_examine_for_taxon_toxin_associations

#or just run do_get_all_data

def do_get_all_data
  get_IOC_reflist_data
  get_harmful_plankton_project_data
  do_examine_for_taxon_toxin_associations
end

def do_get_IOC_reflist_data
  get_IOC_reflist_data
end

def do_get_harmful_plankton_project_data
  get_harmful_plankton_project_data
end

def do_examine_for_taxon_toxin_associations
  data_objects = DataObject.find(:all)
  data_objects.each do |data_object|
    examine_for_taxon_toxin_associations(data_object,"")
  end
end

#Get from the database information relevant to a specific taxon from the database
#Most of this code should be moved to the view
def show_taxon
  taxon_id = params[:id]
  @taxon_retrieved = Taxon.find(taxon_id)
  @toxin_citations_for_taxon = retrieve_taxon_toxin_associations(taxon_id)
  return nil
end

def list_taxa
  @taxa = Taxon.find(:all, :order => "name")
end

def get_data
  require 'net/http'
  return nil #temporarily disabling this routine
  data_object = DataObject.find(params[:id])
  str = Net::HTTP.get(URI.parse(data_object.url))
  str = str.gsub(/<\/?[^>]*>/, "") #This does not do anything to ampersands
  str = str.gsub(/\s+/, " ") #newlines seem to cause problems for regular expressions.
  tempArray = str.scan(/#{data_object.bookend_start}(.*?)#{data_object.bookend_end}/)
  @dataString = tempArray[0]
  data_object.content = @dataString
  data_object.save
end

#To do: ensure it doesn't link, say, algae in general with a specific toxin
#To do: pass in a list of data objects instead, and have a nested loop, 
#so that there isn't a constant stream of database calls
def examine_for_taxon_toxin_associations(data_object, content)
  if(content.empty?)
    content = data_object.content
  end
  if(content.nil?)
    return nil
  end
  toxin_reconciliation_list = ToxinReconciliation.find(:all)
  toxin_reconciliation_list.each do |toxin_reconciliation|
    if(content.include?(toxin_reconciliation.verbatim_phrase) )
      taxon = data_object.taxon
      toxin = toxin_reconciliation.toxin
      citation = data_object
      puts "Taxon of #{taxon.name}, toxin of #{toxin.name}, data object number #{citation}, URL of #{citation.url}"
      params = {:taxon => taxon, :toxin => toxin, :data_object => citation}
      taxon_toxin_citation = TaxonToxinCitation.create(params)
    end
  end
  return nil
end

#Should this be moved to the model instead?
def retrieve_taxon_toxin_associations(taxon_id)
  taxon = Taxon.find(taxon_id)
  toxin_citations_for_taxon = {}
  taxon.toxins.each do |toxin|
    taxon_toxin_citations = TaxonToxinCitation.find_all_by_taxon_id_and_toxin_id(taxon, toxin)
    toxin_citations_for_taxon[toxin] = []
    taxon_toxin_citations.each do |taxon_toxin_citation|
      toxin_citations_for_taxon[toxin] << taxon_toxin_citation.data_object
    end
  end
  return toxin_citations_for_taxon
end

def list_harmful_effects_not_linked_to_toxins
  @harmful_effects_not_linked_to_toxins = DataObject.find_harmful_effects_not_linked_to_toxins
end

#private 

def get_IOC_reflist_data
  require 'rubygems'
  require 'hpricot'
  require 'open-uri'
  require 'uri'

  #Create an array of pages
  theURLList = ["http://www.bi.ku.dk/ioc/introduction.asp"]
  #Iterate through the URL List
  theURLList.each do |currentURL|
    sleep(0.1)
    currentDoc = Hpricot(open(currentURL))
    #scan for pages
    (currentDoc/"a").each do |a|
      if (a.attributes["href"] =~ /group/ or a.attributes["href"] =~ /details/)
        candidateURL = URI.join(currentURL, a.attributes["href"]) #Warning: does not return a string type
	candidateURL = candidateURL.to_s
	#If a page is new, add it to the list
        unless(theURLList.include?(candidateURL) )
	  theURLList << candidateURL
	end
      end
    end
    #If the page is a species page, scan it for details
    if (currentURL =~ /details/)
      #Create a list of chapters, and a list of data
      chapterList = []
      dataList = []
      taxonName = ""
      chapterText = ""
      
      i = 0 #iterator
      (currentDoc/"td").each do |td|
        unless ((td.attributes["rowspan"] and Integer(td.attributes["rowspan"]) > 1) or (td.attributes["colspan"] and Integer(td.attributes["colspan"]) > 1))
          if (i == 0)
	    chapterText = td.inner_html[0..-2]
	    unless (chapterText.length < 3)
	      chapterList << chapterText
	      dataList << ""
	    end
          else
	    dataList[dataList.length-1] = dataList[dataList.length-1] + td.inner_html
	    if(chapterText == "Genus")
	      taxonName << taxonName + td.inner_text.chars.strip
	    elsif(chapterText == "Species")
	      taxonName << " " + td.inner_text.chars.strip
            end
          end
          i = 1 - i #switch from 0 to 1 or vice versa
        end
      end #End of parsing through species page
      bookend_end = "Not applicable"
      bookend_start = "Not applicable"
      content = "" #Fill this in later
      url = currentURL
      chapter = nil #Fill this in later
      #Find data provider object
      data_provider = DataProvider.find_by_name("IOCRefList") #Creation done by a data migration
      #Find location object
      location = Location.find_by_name("Earth") #Creation done by a data migration
      #Create taxon object based on taxon string
      #taxonName = taxonName.chars.strip
      taxon = Taxon.find_or_create_by_name(taxonName)
      if (data_provider and location and taxon)
	chapterList.each_with_index do |chapterItem, index|
	  content = dataList[index]
	  chapter = Chapter.find_or_create_by_name(chapterItem)
	  
	  data_object = DataObject.new
	  data_object.bookend_end = bookend_end
	  data_object.bookend_start = bookend_start
	  data_object.content = content
	  data_object.url = url
	  data_object.chapter = chapter
	  data_object.data_provider = data_provider
	  data_object.location = location
	  data_object.taxon = taxon
	  success = data_object.save
	end #end of looping through the chapter list
      end #end of checking if we can do an object
    end #end of scenario of the page being a species page
  end #end of iterating through each page
end #end of routine

def get_harmful_plankton_project_data
  
  require 'rubygems'
  require 'hpricot'
  require 'open-uri'
  require 'uri'

  doc = Hpricot(open("http://www.liv.ac.uk/hab/contents.htm"))
  generalInfoURLList = Array.new 
  (doc/"a").each do |a|
    theURL = a.attributes['href']
    if (theURL =~ /Data.*sheets/)
      candidateURL = "http://www.liv.ac.uk/hab/" + a.attributes['href']
      #The page http://www.liv.ac.uk/hab/Data%20sheets/d_norv.htm 
      #is linked to under "Data sheets" as well, hence the following "unless"
      unless generalInfoURLList.include?(candidateURL)
        puts a.attributes['href']
        generalInfoURLList << "http://www.liv.ac.uk/hab/" + a.attributes['href']
      end
    end
  end

  textURLList = Array.new 
  generalInfoURLList.each do |generalInfoURL|
    doc = Hpricot(open(generalInfoURL))
    (doc/"area").each do |area|
      link = area.attributes['href']
      if (link =~ /text/)
        textURLList << "http://www.liv.ac.uk/hab/Data%20sheets/" + link
        break
      end
    end
  end

  #open a page
  #find all the elements which mention font size
  #print the inner text elements out

  textURLList.each do |textURL|
    parseLiverpoolTextURL(textURL)
    sleep 0.1
  end
end

def parseLiverpoolTextURL(textURL)
  require 'rubygems'
  require 'hpricot'
  require 'open-uri'
  require 'uri'

  @chapterText = ""
  @chapterBody = ""
  @chapterTextList = []
  @chapterBodyList = []
  @endOfSpeciesContentReached = false
  
  doc = Hpricot(open(textURL))
  elementList = doc/"/html/body/*"
  elementList.each do |element|
    parseLiverpoolElement(element)
  end
  @chapterBodyList << @chapterBody #To deal with last body element
  speciesName = parseLiverpoolDocumentForSpecies(doc)
  saveLiverpoolData(speciesName, textURL)
end

def saveLiverpoolData(speciesName, textURL)
  defaultLocationName = "Earth"
  dataProviderName = "HarmfulPlanktonProject"
  dataProvider = DataProvider.find_by_name(dataProviderName)
  location = Location.find_by_name(defaultLocationName)
  taxon = Taxon.find_or_create_by_name(speciesName)
  @chapterTextList.each_with_index do |chapterText, index|
    chapter = Chapter.find_or_create_by_name(chapterText)
    data_object = DataObject.new(
      :content => @chapterBodyList[index], :url => textURL,
      :chapter => chapter, :data_provider => dataProvider,
      :location => location, :taxon => taxon)
    success = data_object.save
  end
end

def parseLiverpoolDocumentForSpecies(doc)
  pageTitleElement = doc/"//title"
  speciesName = pageTitleElement.inner_text
  speciesName = speciesName.strip
  speciesName = speciesName.gsub(/\s+/,' ')
  return speciesName
end

def parseLiverpoolElement(element)
  require 'rubygems'
  require 'hpricot'

  #does a descendant of the current node have a chapter in it? (but not at the current node)
  if ((element/"//font").size > 0)
    #if so, recurse
    subElementList = element/"/*"
    subElementList.each do |subElement|
      parseLiverpoolElement(subElement)
    end
    return
  end
  begin
    isFontNode = element.attributes.keys.include?("size") 
  rescue #There ought to be a better way, but I don't know it yet
    isFontNode = FALSE
  end
  if (isFontNode) 
    parseLiverpoolChapterElement(element)
    return
  else
    #get the contents
    parseLiverpoolBodyElement(element)
  end
end

def parseLiverpoolBodyElement(element)
  if(@endOfSpeciesContentReached)
    return
  end
  candidateBodyAddition = element.inner_text
  if(candidateBodyAddition =~ /Glossary.Abbreviations/)
    @endOfSpeciesContentReached = true
    return
  end
  unless (candidateBodyAddition.nil?) #Need to determine what is going on here
    @chapterBody << candidateBodyAddition
  else
    puts "Problem: inner text is nil for some reason"
    puts "current body of #{@chapterBody}"
    puts "html of #{element.to_html}"
  end
end
  
def parseLiverpoolChapterElement(element)
  #  get the contents
  #  check if there's more contents than just the chapter title
  #    if so, complain and die
  #  check if the contents matches an approved list of chapter titles
  #    if not, complain and die
  chapterText = element.inner_text
  chapterText = chapterText.gsub(/\s+/,' ')
  chapterText = chapterText.strip
  approvedChapterTextList = ["Key features", "Toxicity", "Measurements", "Movement", "Food", "Reproduction", "Ecological data", "Species description", "Similar species", "List of basionyms", "List of synonyms", "Taxonomical remarks", "References"] 
  unapprovedChapterTextList = ["(Paulsen) Balech and Tangen 1985", "(Biecheler) Horiguchi ex Yuki et Fukuyo 1992"]
  if(approvedChapterTextList.include?(chapterText)) 
    @chapterBody = @chapterBody.chars.strip
    if(@chapterTextList.size > 0)
      @chapterBodyList << @chapterBody
    end
    @chapterBody = ""
    @chapterTextList << chapterText
  else
    #If the potential chapter title is not something we know we don't want, have a notice
    unless(unapprovedChapterTextList.include?(chapterText) or chapterText.size < 2 or (@chapterTextList.size == 0 and Taxon.find_by_name(chapterText) ) )
      puts "Unapproved chapter candidate: #{chapterText}"
    end
    parseLiverpoolBodyElement(element)
  end
end
  
def algaebaseScratchpad
  #Code to do with algaebase
  resultPage = submitAlgaebaseSearch("Amphora coffeaeformis")
  candidateResultList = parseAlgaebaseResultPage(resultPage)
  finalResultList = findRelevantAlgaebaseResult(candidateResultList)
  finalResultList.each do |finalText, finalURL|
    processedText = finalText.strip #Might not be safe for foreign characters
    print "\ntext of #{processedText}\n"
    print "URL of #{finalURL}\n"
  end
end

#Given a species name, do a search and return the resulting page
def submitAlgaebaseSearch(speciesName)
  require 'rubygems'
  require 'mechanize'
  agent = WWW::Mechanize.new
  speciesSearchPageURL = 'http://www.algaebase.org/search/species/'
  speciesSearchPage = agent.get(speciesSearchPageURL)
  speciesForm = speciesSearchPage.forms[0]
  speciesForm.fields.name("name").value = speciesName
  resultPage = agent.submit(speciesForm)
  return resultPage
end

#Given a results page (not a species page!), 
#return a list of taxonomic identifiers and URLs
def parseAlgaebaseResultPage(resultPage)
  #Nomenclature note:
  #URL should mean a string beginning with http://...
  #Link should mean the equivalent to an object in the html, so it may contain text etc.
  currentURL = resultPage.uri.to_s
  resultList = Hash.new
  candidateLinkList = resultPage.links.href(/species_id.*sk/)
  candidateLinkList.each do |candidateLink|
    relativeURL = candidateLink.uri.to_s
    candidateURL = URI.join(currentURL, relativeURL).to_s
    candidateText = candidateLink.text
    resultList[candidateText] = candidateURL
  end
  resultList = cleanResultList(resultList)
  return resultList
end

#Given a list with whitespace, nonsensical results, and repeats, clean things up
#This does not deal with evaluating a taxonomic string - 
#if it's a variety, a form, or even a different species, it'll still be allowed in this list
def cleanResultList(resultList)
  cleanList = Hash.new
  resultList.each do |text, theURL|
    cleanText = text.strip
    if(cleanText.size == 0)
      next
    end
    unless (theURL =~ /species_id=(\w+)&/)
      next
    end
    speciesIdentifierNumber = $1.to_i
    if(speciesIdentifierNumber == 0 or not speciesIdentifierNumber)
      next
    end
    if (cleanList.include?(theURL))
      next
    end
    #data should be clean
    cleanList[cleanText] = theURL
  end
  return cleanList
end

#deprecated routine - tried to do too many things at once
def findRelevantAlgaebaseResult(candidateResultList)
  resultList = Hash.new
  #Given a result list, either return the correct pair, or return nil
  candidateResultList.each do |candidateText, candidateURL|
    if(candidateText =~ /\w/)
      resultList[candidateText] = candidateURL
    end
  end
  if(resultList.size != 1)
    logger.warn("Either too few or too many results")
    print "Number of potential hits: #{resultList.size}\n"
    print "Candidate hits: #{candidateResultList}\n"
    print "Results : #{resultList}"
    return nil
  else
    return resultList
  end
end

#Determine if a name is either
#not what was asked for
#var.
#provisional
#f.
def evaluateName(requestedName, verbatimName)
  results = {"different" => FALSE, "var" => FALSE, "provisional" => FALSE, "f" => FALSE}
  if(verbatimName =~ /var\./)
    results["var"] = TRUE
  end
  if(verbatimName =~ /Provisional/i)
    results["provisional"] = TRUE
  end
  if(verbatimName =~ /f\./)
    results["f"] = TRUE
  end
  return results
end

def testAlgaebaseResultsPage(speciesName)
  resultPage = submitAlgaebaseSearch(speciesName)
  resultList = parseAlgaebaseResultPage(resultPage)
  validityList = Hash.new
  numberValidSpecies = 0
  complaintBucket = Hash.new
  resultList.each do |taxonText, theURL|
    validityList[taxonText] = evaluateName(speciesName, taxonText)
    if(!validityList[taxonText]["different"] and !validityList[taxonText]["var"] and !validityList[taxonText]["provisional"] and !validityList[taxonText]["f"])
      numberValidSpecies = numberValidSpecies + 1
    else
      foundMatch = false
      #complaintBucket[validityList[taxonText]] = (complaintBucket[validityList[taxonText]] or 0) + 1
      complaintBucket.keys.each do |key|
        if validityList[taxonText] == key
          complaintBucket[key] = complaintBucket[key] + 1
	  foundMatch = true
	  break
        end
      end
      unless(foundMatch)
        complaintBucket[validityList[taxonText]] = 1
      end
    end
  end
  print "For species #{speciesName}, there were #{resultList.size} hits, of which #{numberValidSpecies} were valid\n"
  complaintBucket.each do |complaintList, numberCases|
    print "In #{numberCases} cases"
    complaintList.each do |complaint, truthfulness|
      if (truthfulness == true)
        print ", #{complaint} was an issue"
      end
    end
    print "\n"
  end
  return nil
end

def taxonomic_intelligence_scratchpad
  #a data object has a verbatim name

  #build the url
  #base, qualifier, search term, version, keycode
  #get the name bank identifier

  require 'rubygems'
  require 'hpricot'
  require 'open-uri'
  #require 'CGI' #http://www.ruby-forum.com/topic/58715

  namebankIDs = [] # a list of name bank identifiers based on the list of taxa
  @namebankID_mappings = {} #Given a name bank identifier, what is the name string?
  taxa = Taxon.find(:all)
  taxa.each do |taxon|
    nameString = taxon.name
    namebankID = find_uBio_namebankID(nameString)
    unless not(namebankID)
      namebankIDs << namebankID 
      @namebankID_mappings[namebankID] = nameString
    end
  end

  homonym_mappings = {} #Given a name bank identifer, what are the namebankID of its homonyms?
  classification_mappings = {} #Given a name bank identifier, what are the classification bank IDs?
  namebankIDs.each do |namebankID|
    nameString = @namebankID_mappings[namebankID]
    #find_uBio_classificationBankIDs(namebankID)
    homonym_mappings[namebankID] = find_uBio_homonyms(namebankID)
    classification_mappings[namebankID] = []
    classificationBankIDs = find_uBio_classificationBankIDs(namebankID)
    classification_mappings[namebankID] = classification_mappings[namebankID] + classificationBankIDs unless not (classificationBankIDs)
    homonym_mappings[namebankID].each do |homonym_namebankID|
      classificationBankIDs = find_uBio_classificationBankIDs(homonym_namebankID)
      classification_mappings[namebankID] = classification_mappings[namebankID] + classificationBankIDs unless not (classificationBankIDs)
    end
    if classification_mappings[namebankID].size == 1
      puts "#{nameString} has precisely one classification bank entry"
    elsif classification_mappings[namebankID].size == 0
      puts "#{nameString} lacks a classification bank entry"
    else
      puts "#{nameString} has #{classification_mappings[namebankID].size} entries"
    end
  end
end

def find_uBio_namebankID(nameString)
  require 'rubygems'
  require 'hpricot'
  require 'open-uri'

  current_URL = "http://www.ubio.org/webservices/service_internal.php?"
  current_URL = current_URL + "function=namebank_search"
  current_URL = current_URL + "&search=" + CGI::escape(nameString)
  current_URL = current_URL + "&version=2.0"
  current_URL = current_URL + "&qualifier=equals&keyCode=9c57c31932f37aeb7c9b137c089e19de5591b971"

  current_doc = Hpricot(open(current_URL))
  namebankID_elements =current_doc/"//namebankid"
  if (namebankID_elements.empty?)
    puts "No result for #{nameString}"
    puts "URL of #{current_URL}"
    return nil
  else
    namebankID =namebankID_elements.first.inner_text.to_i
    puts namebankID
    return namebankID
  end
end

def find_uBio_homonyms(namebankID)
  require 'rubygems'
  require 'hpricot'
  require 'open-uri'

  current_URL = "http://www.ubio.org/webservices/service_internal.php?function=namebank_object&namebankID="+namebankID.to_s+"&version=2.0&keyCode=9c57c31932f37aeb7c9b137c089e19de5591b971"
  homonym_namebankIDs = []
  current_doc = Hpricot(open(current_URL))
  #get the elements in lexicalGroup and in basionymGroup
  lexical_value_elements = current_doc/"/lexicalGroup/value"
  lexical_value_elements.each do |lexical_value_element|
    homonym_namebankID = (lexical_value_element/"/namebankID").inner_text.to_i
    homonym_fullNameString = (lexical_value_element/"/fullNameString").inner_text
    unless @namebankID_mappings.has_key?(homonym_namebankID)
      @namebankID_mappings[homonym_namebankID] = homonym_fullNameString
    else
      puts "namebankID_mappings already has an entry for namebankID #{homonym_namebankID} of #{@namebankID_mappings[homonym_namebankID]}, and is trying to enter a string of #{homonym_fullNameString}"
    end
    if (homonym_namebankID > 0 and !homonym_namebankIDs.include?(homonym_namebankID) )
      homonym_namebankIDs << homonym_namebankID
    else
      puts "homonym_namebankID is #{homonym_namebankID} for namebankID #{namebankID}"
    end
  end
  basionym_value_elements = current_doc/"/basionymGroup/value"
  basionym_value_elements.each do |basionym_value_element|
    homonym_namebankID = (basionym_value_element/"/namebankID").inner_text.to_i
    homonym_fullNameString = (basionym_value_element/"/fullNameString").inner_text
    unless @namebankID_mappings.has_key?(homonym_namebankID)
      @namebankID_mappings[homonym_namebankID] = homonym_fullNameString
    else
      puts "namebankID_mappings already has an entry for namebankID #{homonym_namebankID} of #{@namebankID_mappings[homonym_namebankID]}, and is trying to enter a string of #{homonym_fullNameString}"
    end
    if (homonym_namebankID > 0 and !homonym_namebankIDs.include?(homonym_namebankID) )
      homonym_namebankIDs << homonym_namebankID
    else
      puts "homonym_namebankID is #{homonym_namebankID} for namebankID #{namebankID}"
    end
  end
  return homonym_namebankIDs
end

def find_uBio_classificationBankIDs(namebankID)
  require 'rubygems'
  require 'hpricot'
  require 'open-uri'

  classificationBankIDs = []
  current_URL = "http://www.ubio.org/webservices/service_internal.php?function=namebank_object"
  current_URL = current_URL + "&namebankID=" + namebankID.to_s
  current_URL = current_URL + "&version=2.0&keyCode=9c57c31932f37aeb7c9b137c089e19de5591b971"
  current_doc = Hpricot(open(current_URL))
  value_elements = current_doc/"/results/classificationBankEntries/value" #[@entryStatus='valid'] #[@classificationTitleID='99']

  classification_bank_identifier_found = FALSE
  value_elements.each do |value_element|
    if ((value_element/"/entryStatus").inner_text == "valid" and (value_element/"/classificationTitleID").inner_text == "99") 
      classificationBankID = (value_element/"/classificationBankID").inner_text.to_i
      classificationBankIDs << classificationBankID
      classificationTitleID = (value_element/"/classificationTitleID").inner_text.to_i
      #puts "namebankID #{namebankID} has classificationBankID #{classificationBankID} classificationTitleID #{classificationTitleID}"
      #puts "#{@namebankID_mappings[namebankID]} (nbID of #{namebankID}) has classificationBankID #{classificationBankID} classificationTitleID #{classificationTitleID}"
      classification_bank_identifier_found = TRUE
      #break
    end
  end
  if classificationBankIDs.size > 1
    puts "Multiple classification bank identifiers found for @namebankID_mappings[namebankID]} (nbID of #{namebankID}): #{classificationBankIDs.join", "}"
    puts "See url #{current_URL} to recreate"
  end
  return classificationBankIDs
#  if (classification_bank_identifier_found)
#    return [classificationBankID]
#  #unless (classification_bank_identifier_found)
#  else
#    #puts "No classification bank identifier found for namebankID #{namebankID}"
#    return nil
#  end
end


end
