
# Memento Cache Proxy for Google search engine.
# Author: Ahmed AlSum aalsum@cs.odu.edu
# Date: July 21, 2010

import urllib2

import sys, os, re

from dateutil import parser as dateparser
from baseHandler import *

end_point = 'http://webcache.googleusercontent.com/search?q=cache:'


class GoogleHandler(BaseProxyHandler):

    def fetch_changes(self, req, requri, dt=None):


        final_url = end_point + requri
        
        user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.10) Gecko/20100504 Firefox/3.5.10 (.NET CLR 3.5.30729)'

        headers = { 'User-Agent' : user_agent }

        request = urllib2.Request(final_url,None, headers)
        
        opener = urllib2.build_opener()
        response = opener.open(request)

        the_page = response.read()
        changes = None

        # @type the_page str
        if the_page.find('This is Google&#39;s cache of') > -1: # This step is required to make sure we have a google cached page.

            dateExpression = re.compile( r"((Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) \d{1,2}, (19|20)\d\d \d\d:\d\d:\d\d)" )

            result = dateExpression.search( the_page )

            if result:
                dtstr = result.group(0)
                dtstr += " GMT"
                dtobj = dateparser.parse(dtstr)
                loc =  final_url
                changes = []
                changes.append((dtobj, loc, {'last' : dtobj, 'obs' : 1, 'type' : 'observed'}))

        return changes
     

def handler(req):
    hdlr = GoogleHandler('google')
    return basehandler(req, hdlr)