
import urllib2
import StringIO
from lxml import etree
from urlparse import urlparse
import cPickle

import os, sys

import time
import datetime
from dateutil import parser as dateparser

from baseHandler import BaseProxyHandler, basehandler

utz = dateparser.parse('2009-01-01 12:00:00 GMT').tzinfo

def iso_to_dt(date):
    seq = (int(date[:4]), int(date[5:7]), int(date[8:10]), int(date[11:13]),
           int(date[14:16]), int(date[17:19]), 0, 1, -1)
    return datetime.datetime.fromtimestamp(time.mktime(time.struct_time(seq)), utz)
    

class WikiaHandler(BaseProxyHandler):

    def __init__(self, loc):
        BaseProxyHandler.__init__(self, loc)
        self.hdrs = {
                'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
                'Accept-Language' : 'en-us,en;q=0.5',
                'Proxy-Connection' : 'keep-alive',
                'Pragma' : 'no-cache',
                'Cache-Control' : 'no-cache',
                'User-Agent' : 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-US; rv:1.9.1.2) Gecko/20090729 Firefox/3.5.2'}

        hdlr = urllib2.HTTPRedirectHandler()
        self.opener = urllib2.build_opener(hdlr)

        self.hosts = {}
        if os.path.exists('wikihash.pkl'):
            fh = file('wikihash.pkl')
            hsts = cPickle.load(fh)
            fh.close()
            self.hosts = hsts

    def fetch_dom(self, wikiuri, host):
        
        self.hdrs['Host'] = host
        try:
            ureq = urllib2.Request(wikiuri, headers=self.hdrs)
            fh = self.opener.open(ureq)
        except Exception, e:
            return None

        data = fh.read()
        fh.close()
        try:
            dom = etree.parse(StringIO.StringIO(data))
        except:
            return None
        dom = dom.getroot()
        return dom


    def fetch_changes(self, req, requri, dt=None):

        p = urlparse(requri)
        host = p[1]
        upath = p[2]
        try:
            qry = dict([part.split('=') for part in p[4].split('&')])
        except:
            qry = {}

        api = ''
        for k in self.hosts.keys():
            if requri.startswith(k):
                api = self.hosts[k]
                base = k
                break
        if not api:
            return []

        # Find title from requri
        if qry.has_key('title'):
            title = qry['title']
        else:
            (pref, title) = upath.rsplit('/', 1)
        
        url = "%s?format=xml&action=query&prop=revisions&meta=siteinfo&rvprop=timestamp|ids&rvlimit=500&redirects=1&titles=%s" % (api, title)

        changes = []
        base = "%sindex.php?oldid=" % (base,)

        dom = self.fetch_dom(url, host)
        while dom is not None:
            revs = dom.xpath('//rev')
            for r in revs:
                i = iso_to_dt(r.attrib['timestamp'])
                changes.append((i, base + r.attrib['revid'], {}))                
            cont = dom.xpath('/api/query-continue/revisions/@rvstartid')
            if cont:
                dom = self.fetch_dom(url + "&rvstartid=" + cont[0], host)
            else:
                dom = None                
        changes.sort()
        return changes


def handler(req):
    os.chdir('/home/web/mementoproxy')
    hdlr = WikiaHandler('smwiki')
    return basehandler(req, hdlr)

if __name__ == '__main__':
    hdlr = WikiaHandler('smwiki')
    c = hdlr.fetch_changes(None, sys.argv[1])
    # pprint.pprint(c)
