#!/usr/bin/env python2.5

'''
Simple interface for fetching URLs in parallel using the urlfetch service.

Fetcher example:

    def walk(self, depth, response):
        if isinstance(response, Exception) or response.status_code != 200:
            return

        self.urls.append(response.url)
        if not depth:
            return

        walker = lambda response: self.walk(depth-1, response)
        for url in parse_urls(response.content):
            self.fetcher.start(url, walker)

    def get(self):
        """
        Build an URL graph starting at some root URL.
        """

        self.urls = []
        self.fetcher = megafetch.Fetcher()
        self.fetcher.start(self.request['url'], lambda r: self.walk(3, r))
        self.fetcher.wait()
        self.response.out.write(str(self.urls))


fetch_cached example:

    def print_md5(self, response):
        if isinstance(response, Exception) or response.status_code != 200:
            return

        msg = 'MD5(%s) = %s\n' % (response.url,
                                  md5.new(response.content).hexdigest())
        self.response.out.write(msg)

    def get(self):
        """
        Given a bunch of URLs as get parameters, generate a document that
        contains hashes of those URLs.
        """

        self.response.headers['Content-type'] = 'text/plain'
        callback_map = dict.fromkeys(self.request.get('url'), self.print_md5)
        megafetch.fetch_cached(callback_map, ttl=3600)
'''

_author_ = 'David Wilson <dw@botanicus.net>'
_id_ = '$Id$'

import logging

from google.appengine.api import memcache
from google.appengine.api import urlfetch

from google.appengine.api.urlfetch_service_pb import URLFetchRequest, \
                                                     URLFetchResponse

from async_apiproxy import AsyncAPIProxy


_METHOD_MAP = {
    'DELETE': URLFetchRequest.DELETE,
    'GET': URLFetchRequest.GET,
    'HEAD': URLFetchRequest.HEAD,
    'POST': URLFetchRequest.POST,
    'PUT': URLFetchRequest.PUT,
    urlfetch.DELETE: URLFetchRequest.DELETE,
    urlfetch.GET: URLFetchRequest.GET,
    urlfetch.HEAD: URLFetchRequest.HEAD,
    urlfetch.POST: URLFetchRequest.POST,
    urlfetch.PUT: URLFetchRequest.PUT
}


class Fetcher(object):
    '''
    Asynchronous URL fetcher: provides an urlfetch API-alike interface but
    handles fetches in parallel.
    '''

    def __init__(self):
        '''
        Initialize a new instance.
        '''
        self.proxy = AsyncAPIProxy()

    def start(self, url, callback, payload=None, method=urlfetch.GET,
              headers={}, allow_truncated=False, follow_redirects=True):
        '''
        Start a new fetch. Parameters have the same meaning as urlfetch.fetch
        except callback, which is the response callback invoked as:

            callback(response-object-or-exception)

        Callback is expected to look like:

            def my_callback(response):
                if isinstance(response, Exception):
                    # Handle failure.
                else:
                    # Handle success.
        '''
        request = URLFetchRequest()
        response = URLFetchResponse()

        if isinstance(method, basestring):
            request.set_method(_METHOD_MAP[method.upper()])
        else:
            request.set_method(_METHOD_MAP[method])

        request.set_url(url)
        if payload is not None:
            request.set_payload(payload)
        request.set_followredirects(follow_redirects)

        for key, value in headers.iteritems():
            header = request.add_header()
            header.set_key(key)
            header.set_value(str(value))

        def wrap(response, exc):
            if exc:
                return callback(exc)
            result = urlfetch._URLFetchResult(response)
            result.url = url
            callback(result)

        self.proxy.start_call('urlfetch', 'Fetch', request, response, wrap)

    def wait(self):
        '''
        Wait for all pending URL fetches to complete.
        '''

        self.proxy.wait()


def fetch_cached(url_cb_map, ttl=3600):
    '''
    Fetch a set of URLs in parallel, caching successful responses in Memcache
    where possible.

    @param[in]  url_cb_map      Mapping of URLs to callback functions per
                                Fetcher.start()
    @param[in]  ttl             Time to live in seconds for cached results.
    @returns                    Integer number of requests satisfied by cache.
    '''

    keys = url_cb_map.keys()
    cached = 0

    for key, result in memcache.get_multi(keys, 'MegaFetch:').iteritems():
        url_cb_map.pop(key)(result)
        cached += 1

    if not url_cb_map:
        return cached

    fetcher = Fetcher()
    cache_map = {}

    for url, callback in url_cb_map.iteritems():
        def wrap(resp):
            if (not isinstance(resp, Exception)) and \
               (resp.status_code >= 200 and resp.status_code < 300):
                cache_map[resp.url] = resp
            callback(resp)
        fetcher.start(url, wrap)

    fetcher.wait()

    if cache_map:
        memcache.set_multi(cache_map, time=ttl, key_prefix='MegaFetch:')
    return cached
