#!/usr/bin/python -i

import sys, os, re
import time
import urllib
import StringIO
from lxml import etree
from dateutil import parser as dateparser

import cPickle
import bsddb as bdb

parser = etree.HTMLParser()
topuri = "http://wayback.archive-it.org/"

try:
    fh = urllib.urlopen(topuri)
except:
    print "Couldn't retrieve IA data"

data = fh.read()
fh.close()

try:
    dom = etree.parse(StringIO.StringIO(data), parser)
except:
    print "Not parsable"

linkre = re.compile('^[0-9]+/$')
links = dom.xpath('//a/@href')
collections = []
for l in links:
    if linkre.match(l):
        collections.append(l[:-1])


linkCollHash = {}

        
for c in collections:
    print "Processing Collection: %s" % c

    uri = "http://www.archive-it.org/collections/" + c
    try:
        fh = urllib.urlopen(uri)
    except:
        print "Couldn't retrieve IA data"
        
    data = fh.read()
    fh.close()

    try:
        dom = etree.parse(StringIO.StringIO(data), parser)
    except:
        print "Not parsable"

    links = dom.xpath('//a/@href')
    for l in links:
        base = 'http://wayback.archive-it.org/%s/*/' % c
        if l.startswith(base):
            href = l[len(base):]
            try:
                linkCollHash[href].append(c)
            except:
                linkCollHash[href] = [c]
                

            
save = cPickle.dumps(linkCollHash)
fh = file('linkCollHash.pickle', 'w')
fh.write(save)
fh.close()

cxn = bdb.db.DB()
dbPath = "archiveItHash.bdb"
cxn.open(dbPath, dbtype=bdb.db.DB_BTREE, flags = bdb.db.DB_CREATE, mode=0660)
for (k,v) in linkCollHash.iteritems():
    try:
        cxn.put(k, cPickle.dumps(v))
    except:
        cxn.put(k.encode('utf8'), cPickle.dumps(v))
cxn.close()

    
