

# Version 3:  Rewrite to use Wikimedia API, rather than HTML scraping

import sys, os, re
import time

import urllib2
import StringIO
from lxml import etree
from dateutil import parser as dateparser
import pprint
import re
from baseHandler import BaseProxyHandler, basehandler
import string
from time import strftime
from datetime import datetime
import datetime as dtmod
from datetime import timedelta
from isodate import parse_duration

def now():
    return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())

class WikiHandler(BaseProxyHandler):

    def __init__(self, d):
        BaseProxyHandler.__init__(self, d)
        
 
    def handle_dt(self, req):

        nowd = now()
        current = dateparser.parse(nowd)

        ### Process Request information
        try:
            reqdate = req.headers_in['accept-datetime']
        except KeyError:
            reqdate = nowd

        urlOffset = len(self.urldir) + len('/timegate/')
        if req.unparsed_uri.startswith(self.host):
            urlOffset += len(self.host)
        requri = req.unparsed_uri[urlOffset:]
        
        if requri.startswith('http/'):
            requri = requri.replace('http/', 'http://')
        if not requri.startswith('http'):
            requri = "http://" + requri

        # ; duration handling
        befored = False
        afterd = False
        if reqdate.find(';') > -1:
            dinfo = reqdate.split(';')
            reqdate = dinfo[0]
            if (len(dinfo) == 3):
                # ID Version
                before = dinfo[1]
                after = dinfo[2]
            else:
                before = ""
                after = ""

            # strip whitespace
            reqdate = reqdate.strip()
            before = before.strip()
            after = after.strip()
            if len(before) > 0:
                if before[0] == '-' :
                    before = before[1:]
                else:
                    before = ""
            else:
                before = ""
            if len(after) > 0:
                if after[0] == '+':
                    after = after[1:]
                else:
                    after = ""
            else:
                after = ""

            # parse duration
            # on fail, 400
            try:
                befored = parse_duration(before)
            except:
                befored = None
            try:
                afterd = parse_duration(after)
            except:
                afterd = None

        try:    
            wanted = dateparser.parse(reqdate)
            if (wanted.tzinfo == None or  wanted.tzinfo.utcoffset(wanted) == None):
                # Naive date. Reparse with Timezone
                reqdate += " GMT"
                wanted = dateparser.parse(wanted)
        except:
            # Unparseable, but we need to fetch the list anyway for header :(
            wanted = None        
            
        if befored and wanted:
            # recalculate min/max datetimes
            minDateTime = wanted - befored
            maxDateTime = wanted + afterd
        elif befored == False and wanted:
            minDateTime = datetime(1,1,1,tzinfo=wanted.tzinfo)
            maxDateTime = datetime(9999,12,31,tzinfo=wanted.tzinfo)
        else:
            minDateTime = None
            maxDateTime = None

        # Database Access via overridden fetch_changes
        changes = self.fetch_memento(req, requri, dt=wanted)
        self.revisions = changes

        # Setup response information: link headers
        if changes:
            first = changes[0]
            last = changes[-1]
        else:
            first = None
            last = None

        next = None
        prev = None

        ore = self.host + self.urldir + '/timebundle/' + requri
        tmap = self.host + self.urldir + '/timemap/link/' + requri
        links = ['<%s>;rel="timebundle"' % ore,
                 '<%s>;rel="original"' % requri,
                 '<%s>;rel="timemap";type="application/link-format"' % tmap]

        # Process Error Conditions

        if type(changes) == str:
            # Redirect to better TimeGate
            req.err_headers_out['Location'] = changes            
            req.err_headers_out['Link'] = '<%s>;rel="original"' % requri
            return self.send('Redirecting to better TimeGate: %s' % changes, req, status=302)
        else:
            req.err_headers_out['Vary'] = 'negotiate,accept-datetime'

            # check VERB used for GET/HEAD
            if not changes:
                return self.error('Resource not in archive<br/><i><blockquote><ul><li>Resource: <b>%s</b></li></ul></blockquote></i>' % requri, req, status=404)
            elif req.method != "GET" and req.method != "HEAD":
                # 405
                req.err_headers_out['Allow'] = "GET, HEAD"
                req.err_headers_out['Link'] = self.construct_linkhdr(links, first, last)
                return self.error("Only GET and HEAD allowed", req, status=405)
            elif wanted == None:
                fmtstr = "%a, %d %b %Y %H:%M:%S GMT"
                req.err_headers_out['Link'] = self.construct_linkhdr(links, first, last)

                # XXX Body must have list of resources
                return self.error("Datetime format not correct<br/><i><blockquote><ul><li>Date: %s</li><li>Expected: %s</li></ul></blockquote></i>" % (reqdate, current.strftime(fmtstr)), req, status=400)                
            elif minDateTime == None or maxDateTime == None:
                fmtstr = "%a, %d %b %Y %H:%M:%S GMT"
                req.err_headers_out['Link'] = self.construct_linkhdr(links, first, last)

                # XXX Body must have list of resources
                return self.error("Duration format not correct<br/><i><blockquote><ul><li>Date: %s</li><li>Expected: %s</li></ul></blockquote></i>" % (';'.join(dinfo), "P[yY][mM][dD][T[hH][mM][sS]]"), req, status=400)                
        
        if wanted == current or len(changes) == 1:
            # return last (or only)
            loc = last
            next = None
            prev = None
        elif wanted < first[0]:
            loc = first
            next = changes[1]
        elif wanted > last[0]:
            loc = last
            prev = changes[-1]
        else:
            tdiff = lambda y,x: float(abs((y-x).days * 86400) + abs((y-x).seconds))
            # Else find closest
            for c in range(1, len(changes)):
                this = changes[c]
                if wanted < this[0] or c == len(changes)-1:
                    llast = changes[c-1]
                    loc = llast
                    if (c-2 >= 0):
                        prev = changes[c-2]
                    next = this

                    # code to fetch the nearest memento instead of the prev memento

                    #tdelta1 = tdiff(llast[0], wanted)
                    #tdelta2 = tdiff(this[0], wanted)

                    #if tdelta1 < tdelta2:
                        # Closest Memento to request is previous
                    #    loc = llast
                    #    if (c-2 >= 0):
                    #        prev = changes[c-2]
                    #    next = this
                    #else:
                    #    loc = this
                    #    prev = llast
                    #    if (c < len(changes)-1):
                    #        next = changes[c+1]
                    break
                    
        if loc[0] < minDateTime or loc[0] > maxDateTime:
            req.err_headers_out['Link'] = self.construct_linkhdr(links, first, last, loc, next, prev)
            return self.error('Outside of request duration', req, status=406)
        else:
            req.err_headers_out['Link'] = self.construct_linkhdr(links, first, last, loc, next, prev)
            req.err_headers_out['Location'] = loc[1]
            return self.send('', req, status=302)

        
    def fetch_dom(self, wikiuri, req):
        
        try:
            ureq = urllib2.Request(wikiuri, headers=self.hdrs)
            hdlr = urllib2.HTTPRedirectHandler()
            opener = urllib2.build_opener(hdlr)
            fh = opener.open(ureq)
        except Exception, e:
            return self.error("Couldn't retrieve Wikipedia data from %s" % wikiuri, req, status=404)

        data = fh.read()
        fh.close()
        try:
            dom = etree.parse(StringIO.StringIO(data))
        except:
            return self.error("Response from Wikipedia (%s) not parsable" % wikiuri, req, status=500)
        dom = dom.getroot()
        return dom

    def fetch_memento(self, req, requri, dt=None):
        changes = []
        valid = re.compile('^http://.+.wikipedia.org')
        match = valid.match(requri)

        dtfmstr = "%Y%m%d%H%M%S"

        if match is None:
            return changes
		
        dt_next = False
        if dt is None:
            nowd = now()    
            current = dateparser.parse(nowd)
            dt = current.strftime(dtfmstr)
        else:
            dt_del = timedelta(seconds=1)
            dt_next = dt + dt_del
            dt_next = dt_next.strftime(dtfmstr)
            dt = dt.strftime(dtfmstr)

        self.hdrs = {'Host' : match.group()[7:],
            'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Language' : 'en-us,en;q=0.5',
            'Proxy-Connection' : 'keep-alive',
            'Pragma' : 'no-cache',
            'Cache-Control' : 'no-cache',
            'User-Agent' : 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-US; rv:1.9.1.2) Gecko/20090729 Firefox/3.5.2'}

        titleIndex = string.find(requri,'/wiki/')
        title = requri[titleIndex+6:]

        url_list = []

        # url for getting the memento, prev
        mem_prev = match.group() + "/w/api.php?format=xml&action=query&prop=revisions&rvprop=timestamp|ids|user&rvlimit=2&redirects=1&titles=%s&rvdir=older&rvstart=%s" % (title, dt)
        url_list.append('mem_prev')

        # url for next
        if dt_next:
            next = match.group() + "/w/api.php?format=xml&action=query&prop=revisions&rvprop=timestamp|ids|user&rvlimit=2&redirects=1&titles=%s&rvdir=newer&rvstart=%s" % (title, dt_next)
            url_list.append('next')

        # url for last
        last = match.group() + "/w/api.php?format=xml&action=query&prop=revisions&rvprop=timestamp|ids|user&rvlimit=1&redirects=1&titles=%s" % title
        url_list.append('last')

        # url for first
        first = match.group() + "/w/api.php?format=xml&action=query&prop=revisions&rvprop=timestamp|ids|user&rvlimit=1&redirects=1&rvdir=newer&titles=%s" % title
        url_list.append('first')


        #url = url % (title, dt)
        #sys.stderr.write(url)
        #sys.stderr.flush()
        base = match.group() + "/w/index.php?oldid="
        dtobj = None

        for url in url_list:
            dom = self.fetch_dom(vars()[url], req)
            revs = dom.xpath('//rev')
            for r in revs:
                info = {}
                try: info['dcterms:creator'] = match.group() +'/wiki/User:' + r.attrib['user']
                except: pass
                info['type'] = 'valid'
                dtobj = dateparser.parse(r.attrib['timestamp'])
                info['last'] = dtobj
                # unknown usage... but likely loads
                info['obs'] = 0
                changes.append((dtobj, base + r.attrib['revid'], info))                
            
        if changes:
            changes.sort()
            changes[-1][-1]['last'] = 'now'
        return changes


    def fetch_changes(self, req, requri, dt=None):
        #self.fetch_memento( req, requri, dt )
        changes = []
        valid = re.compile('^http://.+.wikipedia.org')
        match = valid.match(requri)
        if match is None:
            return changes

        self.hdrs = {'Host' : match.group()[7:],
            'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Language' : 'en-us,en;q=0.5',
            'Proxy-Connection' : 'keep-alive',
            'Pragma' : 'no-cache',
            'Cache-Control' : 'no-cache',
            'User-Agent' : 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-US; rv:1.9.1.2) Gecko/20090729 Firefox/3.5.2'}

        titleIndex = string.find(requri,'/wiki/')
        title = requri[titleIndex+6:]
        # with extra info
        url = match.group() + "/w/api.php?format=xml&action=query&prop=revisions&meta=siteinfo&rvprop=timestamp|ids|user&rvlimit=500&redirects=1&titles="

        # basic info
        #url = "http://en.wikipedia.org/w/api.php?format=xml&action=query&prop=revisions&meta=siteinfo&rvprop=timestamp|ids&rvlimit=500&redirects=1&titles="

        
        base = match.group() + "/w/index.php?oldid="
        dom = self.fetch_dom(url + title, req)
        dtobj = None
        while dom is not None:
            revs = dom.xpath('//rev')
            for r in revs:
                info = {}
                try: info['dcterms:creator'] = match.group() +'/wiki/User:' + r.attrib['user']
                except: pass
                info['type'] = 'valid'
                info['last'] = dtobj
                dtobj = dateparser.parse(r.attrib['timestamp'])
                # unknown usage... but likely loads
                info['obs'] = 0
                changes.append((dtobj, base + r.attrib['revid'], info))                
            cont = dom.xpath('/api/query-continue/revisions/@rvstartid')
            if cont:
                dom = self.fetch_dom(url + title + "&rvstartid=" + cont[0], req)
            else:
                dom = None
                
        if changes:
            changes.sort()
            changes[-1][-1]['last'] = 'now'
        return changes


def handler(req):

    hdlr = WikiHandler('wiki')
    return basehandler(req, hdlr)


if __name__ == '__main__':
    hdlr = WikiHandler('aggr')
    start = time.time()
    c = hdlr.fetch_changes(None, sys.argv[1])
    end = time.time() - start
    pprint.pprint(c)
    print "Fetch %s revs in %s seconds" % (len(c), end)

