#!/usr/bin/env python

# (c) 2007-2011 Helsinki University of Technology and University of Helsinki
# Licensed under the MIT license; see LICENSE.txt for more information.

from worker import Worker
from document import Document

import urllib2
import urlparse
import robotparser
import httplib
from hashlib import md5
import time
import sys
import os.path
import re

URLCHARSET = 'utf-8'	# URLs are assumed to be in this charset in the crawler (later phases will use Unicode)

# exception to throw when the URL is not wanted and thus shouldn't be fetched
class UnwantedURL (urllib2.URLError):
  pass

class CachedURLError (urllib2.URLError):
  pass

class CachedResponse:
  """ superclass of all cached responses - all implementation done in subclasses """
  pass

class DataResponse (CachedResponse, file):
  """ file-like object that has an additional geturl() method, corresponding (partly) to the response
      objects that urllib2 uses """
      
  def __init__(self, datapath, hdrpath, url):
    file.__init__(self, datapath, 'r')
    self._datapath = datapath
    self._url = url
    self.code = 200
    self.msg = 'OK'
    hdrfile = file(hdrpath, 'r')
    self._info = httplib.HTTPMessage(hdrfile)
    hdrfile.close()
  
  def geturl(self):
    return self._url
  
  def info(self):
    return self._info
  
  def getFilePath(self):
    return self._datapath

class NonDataResponse (CachedResponse):
  """ represents a cached copy of a non-data response (redirect or error) """
  def __init__(self, filepath):
    f = file(filepath)
    self._data = f.read()
    f.close()
  
  def getData(self):
    return self._data
  
class RedirectResponse (NonDataResponse):
  """ cached redirect response; getData() gives the new URL """
  pass

class ErrorResponse (NonDataResponse):
  """ cached HTTP error response; getData() gives a human-readable error message """
  pass
  

class CachingHandler (urllib2.BaseHandler):
  handler_order = 50	# before ProxyHandler

  def __init__(self, crawler):
    self._crawler = crawler

  def default_open(self, req):
    # die on fragment URLs
    if req.get_full_url().find('#') != -1:
      print "invalid URL %s" % req.get_full_url()
      sys.exit(1)

    response = self._crawler.getCachedResponse(req.get_full_url())
    if response is None:
      return None	# not found in cache
    if isinstance(response, DataResponse):
      return response	# cached normal data response (HTTP code 200)
    if isinstance(response, RedirectResponse):
      # mark the old url as seen
      self._crawler.markAsSeen(req.get_full_url())
      # cached redirect response; create a new request and re-handle it

      newurl = response.getData()
      if not self._crawler.isWantedUrl(newurl):
        raise UnwantedURL(newurl)

      # FIXME should there be loop detection etc. just like in the original?
      newreq = urllib2.Request(newurl, headers=req.headers, origin_req_host=req.get_origin_req_host(), unverifiable=True)
      return self.parent.open(newreq)
    if isinstance(response, ErrorResponse):
      raise CachedURLError(response.getData())
    
    # should never happen...
    return None
  

# FIXME should we make only this handler, or a more generic handler that
# checks everything about to be retrieved?
class RestrictedRedirectHandler (urllib2.HTTPRedirectHandler):
  """ handle HTTP redirects, but don't follow redirects into unwanted URLs """
    
  def __init__(self, crawler):
    self._crawler = crawler

  def redirect_request(self, req, fp, code, msg, headers, newurl):
    # add the old url (from req) to seen
    self._crawler.markAsSeen(req.get_full_url())
    
    newurl = urlparse.urldefrag(newurl)[0]
    
    # if the new URL is not wanted, raise an exception
    if not self._crawler.isWantedUrl(newurl):
      self._crawler.setCachedResponse(req.get_full_url(), 'red', newurl)
      raise UnwantedURL(newurl)
    
    return urllib2.HTTPRedirectHandler.redirect_request(self, req, fp, code, msg, headers, newurl)
  
  def http_error_302(self, req, fp, code, msg, headers):
    # see what the superclass would do
    response = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
    if response is not None:
      # superclass decided to redirect, so cache that information
      self._crawler.setCachedResponse(req.get_full_url(), 'red', response.geturl())
    return response
  
  http_error_301 = http_error_303 = http_error_307 = http_error_302


class Crawler (Worker):
  def __init__(self, session):
    Worker.__init__(self, session.getFatalEvent(), maxqueue=0)	# no maximum size for the queue, otherwise the worker will block itself
    self._session = session
    self._robotparsers = {} # key: robots.txt URL, value: RobotFileParser object
    self._seen = set()	# already seen URLs that don't need to be processed anymore
    self._todo = set()	# outstanding URLs that should still be processed (same content as the workqueue, but with faster lookups)

    if self._session.getKeep():
      # make sure all paths exist
      datadir = self._session.getConfig().getDataDir()
      source = self._session.getSource().getName()

      if not os.path.exists(datadir):
        os.mkdir(datadir)
      
      if not os.path.exists(datadir + "/" + source):
        os.mkdir(datadir + "/" + source)

    handlers = [RestrictedRedirectHandler(self)]
    if self._session.getCached():
      handlers.append(CachingHandler(self))
    http_proxy = session.getConfig().getHTTPProxy()
    if http_proxy is not None:
      handlers.append(urllib2.ProxyHandler({'http': http_proxy}))
    self._opener = urllib2.build_opener(*handlers)
    self._limit = session.getLimit()
    if self._limit == 0: self._limit = None

  def getRobotParser(self, url):
    spliturl = urlparse.urlsplit(url)
    robotsurl = "%s://%s/robots.txt" % (spliturl[0], spliturl[1])
    if robotsurl not in self._robotparsers:
      rp = robotparser.RobotFileParser()
      rp.set_url(robotsurl)
      try:
        rp.read()
      except IOError:
        rp = None # no robots.txt was found so not using a parser
      self._robotparsers[robotsurl] = rp
    return self._robotparsers[robotsurl]
  
  def isWantedUrl(self, url):
    # check that url hasn't been processed already
    if url in self._seen:
      return False
    
    # check that the url starts with http[s]:// or ftp:// or file://, others are not wanted
    if not url.startswith('http://') and not url.startswith('https://') and not url.startswith('ftp://') and not url.startswith('file://'):
      return False
    
    # check that the URL doesn't contain '/../', as those are mostly broken anyways
    if url.find('/../') != -1:
      return False
    
    # check that the url is not excluded
    for excurl in self._session.getSource().getExcludeUrls():
      if url.startswith(excurl):
        return False
    
    # check that the url doesn't end with any of the excluded file extensions
    for ext in self._session.getSource().getExcludeExt():
      if url.lower().endswith(ext.lower()):
        return False

    # check that the url is within the allowed domains
    if self._session.getSource().getDomains():	# check domain restrictions, if any
      netloc = urlparse.urlsplit(url)[1]	    # parse into components to find network location (host and port)
      host = netloc.split(':')[0]
      domainok = False
      for domain in self._session.getSource().getDomains():
        if host == domain or host.endswith("."+domain):
          domainok = True
          break
      if not domainok:
        return False
    
    # check that the url matches a specified regex
    regex = self._session.getSource().getURLRegex()
    if regex and re.match(regex, url) is None:
      return False

    # check that we are allowed by the robots.txt file (if any) to read this URL
    rp = self.getRobotParser(url)
    if rp and not rp.can_fetch(self._session.getConfig().getUserAgent(), url):
      return False
  
    # all checks passed, url may be fetched
    return True
  
  def markAsSeen(self, url):
    self._seen.add(url)
  
  def _getCacheFile(self, url, ext):
    datadir = self._session.getConfig().getDataDir()
    sourceName = self._session.getSource().getName()
    m = md5()
    m.update(url)
    filename = m.hexdigest() + "." + ext
    return "%s/%s/%s" % (datadir, sourceName, filename)

  def getCachedResponse(self, url):
  
    # search for data and headers
    dat = self._getCacheFile(url, 'dat')
    hdr = self._getCacheFile(url, 'hdr')
    if os.path.exists(dat) and os.path.exists(hdr):
      return DataResponse(dat, hdr, url)
    
    # search for redirects
    red = self._getCacheFile(url, 'red')
    if os.path.exists(red):
      return RedirectResponse(red)
    
    # search for errors
    err = self._getCacheFile(url, 'err')
    if os.path.exists(err):
      return ErrorResponse(err)
    
    return None
      
  def setCachedResponse(self, url, type, data):
    if self._session.getKeep():
      fp = self._getCacheFile(url, type)
      f = file(fp, 'w')
      f.write(data)
      f.close()

  def getFirstURLs(self):
    return sorted(self._todo)[:10]
  
  def process(self, url):
    if url in self._todo: self._todo.remove(url)
  
    if not self.isWantedUrl(url):
      return False
    
    # check that the limit hasn't been reached
    if self._limit is not None and self._limit == 0:
      return False	# limit reached, don't fetch anything anymore
      
    # sleep a while in order to not overload the server
    # TODO this should only be done for non-cached requests
    time.sleep(self._session.getSource().getWait())
    
    # create a Request for it
    req = urllib2.Request(url)
    req.add_header('User-Agent', self._session.getConfig().getUserAgent())
    
    try:
      response = self._opener.open(req)
    except UnwantedURL:
      # nothing to see here, move on
      return False
    except CachedURLError, e:
      doc = Document(self._session, url.decode(URLCHARSET, 'replace'))
      doc.log('SKIP', 'fetch', 'Exception during URL retrieval', e.reason)
      doc.processingFinished()
      return False
    except Exception, e: # something worse than UnwantedURL happened
      print >>sys.stderr, "Exception during URL retrieval for %s: %s" % (repr(url), str(e))
      # save in cache
      self.setCachedResponse(url, 'err', str(e))
      doc = Document(self._session, url.decode(URLCHARSET, 'replace'))
      doc.log('SKIP', 'fetch', 'Exception during URL retrieval', str(e))
      doc.processingFinished()
      return False
    
    url = response.geturl()
      
    # mark the resulting (final) url as seen
    # note: intermediate (redirecting) urls have been added to seen by the handler
    self.markAsSeen(url)
    if self._limit is not None: self._limit -= 1
    
    data = response.read()

    if not isinstance(response, DataResponse):
      # a live response (was not yet in cache)
      # cache the response data (copying the data so Document can use it below)
      self.setCachedResponse(url, 'dat', data)
      # cache the response headers
      self.setCachedResponse(url, 'hdr', str(response.info()))

    response.close()

    mimetype = response.info().getheader('Content-Type').split(';')[0]
    
    # create a Document object
    doc = Document(self._session, url.decode(URLCHARSET, 'replace'), mimetype=mimetype, data=data)
    
    # check that the URL decoded properly, warn if it didn't
    try:
      dec_url = url.decode(URLCHARSET, 'strict')
    except Exception, e:
      doc.log('WARN', 'fetch', 'URL not in expected encoding (%s)' % URLCHARSET, str(e))
    
    # parse the HTML, looking for links
    if self._session.getRecurse():
      tree = doc.getParseTree()
      if tree is not None:
        # add each link to the work queue
        for a in tree.getiterator('a'):
          href = a.get('href')
          if not href: continue
          href = href.encode(URLCHARSET)
          linkurl = urlparse.urljoin(url, href, allow_fragments=False)
          linkurl = urlparse.urldefrag(linkurl)[0]
          if self.isWantedUrl(linkurl) and linkurl not in self._todo:
            self._todo.add(linkurl)
            self.addWork(linkurl)
    
    doc.setCompleted('fetch')
    self._session.schedule(doc)
    return True

  def finish(self):
    # possibly free some memory
    del self._seen
