#!/usr/bin/env python

import wsgiref.handlers
import urlparse, hashlib, robotparser, logging
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.api import urlfetch
from google.appengine.api import memcache
from google.appengine.api.labs import taskqueue
from schema import Document, Fetch, checksum_from_key_name

USER_AGENT = "BackRubRedux"

class FetchWorker(webapp.RequestHandler):
  
  def allowed_to_fetch_url(self, url):
    logging.debug("Am I allowed to fetch %s?" % url.geturl())

    # Figure out the URL to the robots file
    robots_url = url.scheme + '://' + url.netloc + '/robots.txt'
    logging.debug("The robots URL for that is %s." % robots_url)

    # The urlfetch API only supports HTTP and HTTPS.
    if not ((url.scheme == 'http') or (url.scheme == 'https')):
      logging.debug("This URL has an unsupported scheme.")
      return False

    # We can always fetch a robots file
    if robots_url == url.geturl():
      logging.debug("This is a robots URL, so it can definitely be fetched.")
      return True

    # Get the robots file
    key = 'robots-' + hashlib.md5(robots_url).hexdigest()
    logging.debug("The memcache key is %s" % key)
    robots = memcache.get(key)
    if robots is None:
      logging.debug("I'm going to try to fetch %s to find out." % robots_url)
      try:
        fetch_result = urlfetch.fetch(
          robots_url,
          follow_redirects=True,
          headers={'User-Agent': USER_AGENT}
        )
      except urlfetch.DownloadError:
        logging.error("There was an error retrieving the data.")
        return False
      except urlfetch.InvalidURLError:
        logging.error("The URL of the request was not a valid URL.")
        return False
      else:
        robots = fetch_result.content
        # Cache the robots file for a day
        memcache.add(key, robots, 60*60*24)

    # Parse the robots file
    logging.debug("Now, I'll parse the robots file.")
    rp = robotparser.RobotFileParser()
    rp.parse(robots.split('\n'))
    logging.debug("Finished parsing, now to make a decision.")
    try:
      allowed_to_fetch = rp.can_fetch(USER_AGENT, url.geturl())
    except KeyError:
      logging.error("Unable to determine whether I can get " + url.geturl())
      return False
    else:
      if allowed_to_fetch is False:
        logging.info("I'm not allowed to fetch %s" % url.geturl())
      return allowed_to_fetch
  

  def post(self):
    # Look up the URL to fetch
    key = self.request.get("document_key")
    document = Document.get(db.Key(key))
    url = urlparse.urlparse(document.url)
    
    if self.allowed_to_fetch_url(url):
      try:
        # Fetch the URL
        logging.debug("I'm going to try to fetch %s" % url.geturl())
        result = urlfetch.fetch(
          url.geturl(),
          follow_redirects=False,
          headers={'User-Agent': USER_AGENT}
        )
      except urlfetch.DownloadError:
        logging.error("There was an error retrieving the data.")
      except urlfetch.ResponseTooLargeError:
        logging.error("The response data exceeded the maximum allowed size.")
      else:
        # Package the fetch result into a datastore entity
        logging.debug("I'm going to create a fetch entity")
        fetch = Fetch(
          docid=checksum_from_key_name(document.key().name()),
          status=result.status_code,
        )
        fetch.content = result.content
        for header in result.headers:
          property_name = 'header-' + header
          setattr(fetch, property_name, db.Text(result.headers[header]))

        # Store the fetch in the datastore
        logging.debug("I'm going to store the fetch in the datastore.")
        fetch_key = fetch.put()
        
        logging.debug("I'll make a task to compress %s." % fetch_key)
        task = taskqueue.Task(
          url='/compress-fetch',
          params={
            'fetch_key': fetch_key,
            'encode': 'bz2',
          }
        )
        task.add(queue_name='compress')


def main():
  logging.getLogger().setLevel(logging.DEBUG)
  wsgiref.handlers.CGIHandler().run(webapp.WSGIApplication([
    ('/fetch', FetchWorker),
  ]))


if __name__ == '__main__':
  main()
