#!/usr/bin/python

import mechanize, re, cookielib, os.path, Levenshtein
from BeautifulSoup import BeautifulSoup

# LOAD COOKIES
COOKIEFILE=os.path.expanduser("~/.pdfcookie")
cj = cookielib.LWPCookieJar()
if os.path.isfile(COOKIEFILE):
  cj.load(COOKIEFILE)

# CREATE VIRTUAL BROWSER
br = mechanize.Browser()
br.set_cookiejar(cj)
br.set_handle_equiv(True)
#br.set_handle_gzip(True)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
#br.set_debug_http(True)
#br.set_debug_redirects(True)
#br.set_debug_responses(True)
br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]

###
# GENERAL
###

# GET CONTENT OF URL (FOLLOWING REDIRECTS)
def getUrlContent(theurl):
  content = br.open(theurl).read()
  cj.save(COOKIEFILE)
  return content

# GET CONTENT TYPE OF CONTENT BEHIND URL
def getUrlType(theurl):
  br.open(theurl)
  return br.response().info()["Content-type"]

# GET URL OF FINAL REDIRECTION STEP
def getFinalLocation(theurl):
  conn = br.open(theurl)
  return conn.geturl()

# GET SOUP OBJECT FOR URL (AFTER REDIRECTS)
def getDocumentSoup(theurl):
  return BeautifulSoup(getUrlContent(theurl))


###
# GRABBERS
###

# IEEE EXPLORE GRABBER
# (ieeexplore.ieee.org)
def getIeeePdf(url):
  print "IEEE grabber"

  # FIRST PAGE
  page1 = getDocumentSoup(url)
  fullTextLinks = filter(lambda aTag : len(aTag.findAll("img", attrs={"src" : "/assets/img/btn.pdf-access-full-text.gif"})) > 0, page1.findAll("a", attrs={"href" : True}))
  if len(fullTextLinks) == 0:
    print "IEEE GRABBER ERROR: No Full Text Link for "+url+" (document perhaps unavailable?)"
    return None
  elif len(fullTextLinks) > 1:
    print "IEEE GRABBER ERROR: More than one Full Text Link for "+url+", please contact developers"
    return None    

  # SECOND PAGE
  page2 = getDocumentSoup("http://ieeexplore.ieee.org" + fullTextLinks[0]["href"])
  pdfLink = page2.findAll("frame", attrs={"src" : True})[1]["src"]
  print "PDF at " + pdfLink

  # DATA
  return getUrlContent(pdfLink)


# ACM DL GRABBER
# (dl.acm.org)
def getAcmPdf(url):
  print "ACM grabber"

  # PAGE
  page = getDocumentSoup(url)

  # PDF LINK
  pdfLink = page.findAll("a", attrs={"name":"FullTextPdf"})
  pdfLink.extend(page.findAll("a", attrs={"name":"FullTextPDF"}))

  if len(pdfLink)==1:
    # HAS PDF LINK
    pdfLink = "http://dl.acm.org/" + pdfLink[0]["href"]
    print "PDF at " + pdfLink
    return getUrlContent(pdfLink)
  elif len(pdfLink)==0:
    # HAS NO PDF LINK
    publisherLinks = page.findAll("a", attrs={"title" : "Publisher Site"})
    if len(publisherLinks) == 1:
      # HAS EXTERNAL LINK
      publisherLink = publisherLinks[0]["href"]
      print "External document. Delegating to grabber for " + publisherLink
      return getPdf(publisherLink)
    elif len(publisherLinks) == 0:
      # HAS NO EXTERNAL LINK
      print "ACM GRABBER ERROR: No PDF or external link for "+url+" (document perhaps unavailable?)"
      return None
    else:
      print "ACM GRABBER ERROR: More than one external link for " + url + ", please contact developers"
      return      
  else:
    print "ACM GRABBER ERROR: More than one PDF link for " + url + ", please contact developers"


# SPRINGERLINK GRABBER
# (springerlink.com)
def getSpringerPdf(url):
  print "Springer grabber"

  # PAGE
  page = getDocumentSoup(url)

  # PDF LINK
  pdfLinks = filter(lambda aTag : aTag["title"].find("Download PDF")==0, page.findAll("a", attrs = { "title" : True }))
  if len(pdfLinks) == 0:
    print "SPRINGER GRABBER ERROR: Found no PDF link for " + url + " (document perhaps unavailable?)"
    return
  else:
    if len(pdfLinks) > 1:
      print "SPRINGER GRABBER WARNING: Found multiple PDF links for " + url + ", using first (reason: Springer has similar links for relevant other chapters)"
    pdfLink = "http://www.springerlink.com" + pdfLinks[0]["href"]
    print "PDF at " + pdfLink
    return getUrlContent(pdfLink)



# DOI GRABBER
# (dx.doi.org)
def getDoiPdf(url):
  print "DOI grabber"
  loc = getFinalLocation(url)
  print "Delegating to grabber for " + loc
  return getPdf(loc)

# CITESEER (BASIC) GRABBER
# (citeseerx.ist.psu.edu)
def getCiteseerPdf(url):
  print "CITESEER grabber"
  # can only handle direct PDF links (yet)
  if url.find("type=pdf") != -1:
    return getOptimisticPdf(url)
  return None

# Optimistic fallback (look if it is a PDF ...)
def getOptimisticPdf(url):
  print "Checking if " + url + " references a PDF ..."
  fin = getFinalLocation(url)
  if getUrlType(fin) in ["application/pdf", "application/x-pdf"]:
    print "Found PDF at " + fin
    return getUrlContent(fin)
  return None

###
# DISPATCHERS / ENTRY METHODS
###

def getPdf(url):
  if url.lower().find("ieeexplore.ieee.org/") != -1:
    return getIeeePdf(url)
  if url.lower().find("acm.org/") != -1:
    return getAcmPdf(url)
  if url.lower().find("springerlink.com/content") != -1:
    return getSpringerPdf(url)
  if url.lower().find("dx.doi.org/") != -1:
    return getDoiPdf(url)
  if url.lower().find("citeseerx.ist.psu.edu/"):
    return getCiteseerPdf(url)
  print "No explicit grabber for " + url
  return getOptimisticPdf(url)

def getPdfScholar(title):
  print "Title Scholar Grabber"
  print "Searching for \"" + title + "\""
  reqTitleLow = unicode(re.sub("[^0-9a-zA-Z]", "", title).lower())
  page = getDocumentSoup("http://scholar.google.de/scholar?q=\"" + title.replace(" ","+") + "\"")
  links = page.find("a", attrs={"href" : True, "class" : "yC0"})
  uri = links["href"]
  fTitle = "".join(links.findAll(text = True))
  print "Found \"" + fTitle + "\" at " + uri
  foundTitleLow = re.sub("[^0-9a-zA-Z]", "", fTitle).lower()
  similarity = Levenshtein.ratio(reqTitleLow, foundTitleLow)
  print "Similarity is " + str(similarity)
  if similarity >= 0.5:
    print "Above threshold, grabbing"
    return getPdf(uri)
  else:
    print "Below threshold, not grabbing"
    return None
  


