import logging, functools
import util

logger = logging.getLogger("curlspider")

try:
   import pycurl
except ImportError:
   raise ImportError("pycurl not installed; get it from http://pycurl.sourceforge.net/ to use this library")


class Spider(dict):

   logger = logging.getLogger("spider")

   dummy_finalizer = lambda original, effective: None
   dummy_errhandler = lambda original, effective, errno, errmsg: None

   def __init__(self, urls=None, initializer=None, writer=None, finalizer=None, errhandler=None, num_conn=5, conn_timeout=30, timeout=200, validate_urls=True):
      "number of connections"

      if validate_urls:
         urls = util.validate_urls(urls)
      if not urls:
         raise Exception("no valid links to retrieve")

      self.urls = urls
      self.initialize = initializer
      self.writer = writer
      self.finalizer = finalizer or dummy_finalizer
      self.errhandler = errhandler or dummy_errhandler
      self.connection_count = num_conn
      self.timeout = timeout
      self.connection_timeout = conn_timeout
      self.follow_location = True
      self.max_redirects = 5

      self._addHandles()


   def addURLs(self, urls):
      "append a list of urls to retrieve"
      self.urls.extend(validate_urls(urls))


   def _addHandles(self):
      "configure the curl connection handles"
      self._curl = pycurl.CurlMulti()
      self._curl.handles = []
      for i in range(self.connection_count):
         c = pycurl.Curl()
         c.setopt(pycurl.FOLLOWLOCATION, self.follow_location)
         c.setopt(pycurl.MAXREDIRS, self.max_redirects)
         c.setopt(pycurl.CONNECTTIMEOUT, self.connection_timeout)
         c.setopt(pycurl.TIMEOUT, self.timeout)
         #c.setopt(pycurl.NOSIGNAL, 1)
         self._curl.handles.append(c)


   def _cleanup(self):
      "close opened files etc"
      for c in self._curl.handles:
          c.close()
      self._curl.close()


   def run(self):
      "run the curl magic machine"

      freelist = self._curl.handles[:]
      num_processed = 0
      num_urls = len(self.urls)
      queue = self.urls

      while num_processed < num_urls:
         # if there is an url to process and a free curl object, add to stack
         while queue and freelist:
            url = queue.pop(0)
            try:
               c = freelist.pop()
               c.url = url
               c.setopt(pycurl.URL, url)
               # get the callbacks
               init = self.initialize(url)
               if self.writer:
                  # a functools.partial is not accepted by curl so need a proxy
                  writer = util.get_proxywriter(self.writer,init)
                  c.setopt(pycurl.WRITEFUNCTION, writer)
               else:
                  # if writer is set to None, we track redirects
                  c.setopt(pycurl.NOBODY,1)
               c.errhandle = functools.partial(self.errhandler, init)
               c.finalize = functools.partial(self.finalizer, init)
               self._curl.add_handle(c)
               logger.debug("fetching: %s ..." % url)
            except Exception, e:
               logger.warn("skipping: %s: %s" % (url, e))
               num_processed += 1

         # run the internal state machine for the stack
         while 1:
            ret, num_handles = self._curl.perform()
            if ret != pycurl.E_CALL_MULTI_PERFORM:
               break

         # check for terminated curl objects, add them to the free list
         while 1:
            num_q, ok_list, err_list = self._curl.info_read()
            for c in ok_list:
               effective_url = c.getinfo(pycurl.EFFECTIVE_URL)
               logger.debug("retrieved: %s (%s)" % (c.url, effective_url))
               HTTP_CODE =  c.getinfo(pycurl.RESPONSE_CODE)
               if HTTP_CODE >= 400:
                  c.errhandle(effective_url, HTTP_CODE, "")
               else:
                  c.finalize(effective_url)
               self._curl.remove_handle(c)
               freelist.append(c)
            for c, errno, errmsg in err_list:
               effective_url = c.getinfo(pycurl.EFFECTIVE_URL)
               c.errhandle(effective_url, errno, errmsg)
               self._curl.remove_handle(c)
               logger.warn("not retrieved: %s: %s (%s)" % (c.url, errno, errmsg))
               freelist.append(c)
            num_processed = num_processed + len(ok_list) + len(err_list)
            if num_q == 0:
               break

         # call select() until some more data is available
         self._curl.select(1.0)

      self._cleanup()
      return num_processed


if __name__ == "__main__":
   logging.basicConfig(level=logging.DEBUG)

   # add some links and enable either spider to test
   links = [line.strip() for line in open("links.txt")]
   s = Spider(
      links,
      initializer=util.file_initializer,
      finalizer=util.file_finalizer,
      writer=util.file_writer,
      errhandler=util.file_errhandler
   )
   #s = Spider(links, initializer=debug_initializer, finalizer=debug_finalizer, errhandler=debug_errhandler)
   s.run()
