
import sys, os, re
import urllib
import StringIO
from lxml import etree
from dateutil import parser as dateparser
import cPickle
import bsddb as bdb
from baseHandler import *

parser = etree.HTMLParser()

class ArchiveItHandler(BaseProxyHandler):

    def fetch_changes(self, req, requri, dt=None):

        cxn = bdb.db.DB()
        cxn.open('archiveItHash.bdb')
        c = cxn.cursor()
        (key, val) = c.set_range(requri)

        if not key.startswith(requri) and not requri.startswith(key):
            try:
                x = requri.index('/', 7)
                curi = requri[:x+1]
            except:
                curi = requri
            (key, val) = c.set_range(curi)

            if not key.startswith(curi) and not requri.startswith(key) and curi.startswith('http://www.'):
                curi = 'http://' + curi[11:]
                (key, val) = c.set_range(curi)

            if not key.startswith(curi) and not requri.startswith(key):
                return self.error("404: Resource not present in Web Archives: %s  (%s vs %s)" % (requri, curi, key), req, status=404, ct="text/plain")
        else:
            curi = requri

        collections = {}        
        lval = cPickle.loads(val)
        for l in lval:
            collections[l] = 1

        (key, val) = c.next()
        while (key.startswith(curi) or requri.startswith(key)):
            lval = cPickle.loads(val)
            for l in lval:
                collections[l] = 1
            (key, val) = c.next()

        c.close()
        cxn.close()

        colls = collections.keys()
        colls.sort()
        changes = []

        for c in colls:
            iauri = "http://wayback.archive-it.org/%s/*/%s" % (c, requri)
            try:
                fh = urllib.urlopen(iauri)
            except:
                continue
            data = fh.read()
            fh.close()
            try:
                dom = etree.parse(StringIO.StringIO(data), parser)
            except:
                continue
            xps  = dom.xpath('//tr[@bgcolor="#EBEBEB"]')
            if len(xps) != 1:
                continue
            else:
                # process response
                tr = xps[0]
                alist = tr.xpath('.//a')
                for a in alist:
                    date = a.text
                    dtobj = dateparser.parse(date+  ' 00:00:00Z')
                    loc = a.attrib['href']
                    info = {'last' : dtobj, 'obs' : 1}
                    if a.tail:
                        changes.append((dtobj, loc, info))
                    else:
                        changes[-1][-1]['last'] = dtobj
                        changes[-1][-1]['obs'] += 1                        
        changes.sort()
        return changes


def handler(req):
    hdlr = ArchiveItHandler('ait')
    return basehandler(req, hdlr)


