

import sys, os, re
from dateutil import parser as dateparser
import bsddb as bdb
from baseHandler import BaseProxyHandler, basehandler
import cPickle

import threading
import pprint
import time
import urllib
import socket

socket.setdefaulttimeout(60)


def parse_link(header):
    state = 'start'
    data = [d for d in header]
    links = {}

    while data:
        if state == 'start':
            # Only space or <
            d = data.pop(0)
            while d.isspace():
                d = data.pop(0)
            if d != "<":
                raise ValueError("Expected < in start, got %s" % d)
            state = "uri"
        elif state == "uri":
            uri = []
            d = data.pop(0)
            while d != ">":
                uri.append(d)
                d = data.pop(0)
            uri = ''.join(uri)
            links[uri] = {}
            state = "paramstart"
        elif state == 'paramstart':
            # consume until ;
            d = data.pop(0)
            while d.isspace():
                d = data.pop(0)
            if d == ";":
                state = 'linkparam';
            elif d == ',':
                state = 'start'
            else:
                print header
                raise ValueError("Expected ; in paramstart, got %s" % d)
        elif state == 'linkparam':
            d = data.pop(0)
            while d.isspace():
                d = data.pop(0)
            paramType = []
            while not d.isspace() and d != "=":
                paramType.append(d)
                d = data.pop(0)
            while d.isspace():
                d = data.pop(0)
            if d != "=":
                raise ValueError("Expected = in linkparam, got %s" % d)
            state='linkvalue'
            pt = ''.join(paramType)
            if not links[uri].has_key(pt):
                links[uri][pt] = []
        elif state == 'linkvalue':
            # token or quoted string
            # got to paramstart after to switch
            d = data.pop(0)
            while d.isspace():
                d = data.pop(0)
            paramValue = []
            if d == '"':
                # consume until " not escaped by \
                pd = d
                d = data.pop(0)
                while d != '"' and pd != '\\':
                    paramValue.append(d)
                    pd = d
                    d = data.pop(0)
            else:
                while not d.isspace() and not d in (',', ';'):
                    paramValue.append(d)
                    if data:
                        d = data.pop(0)
                    else:
                        break
                if data:
                    data.insert(0, d)
            state = 'paramstart'
            pv = ''.join(paramValue)
            if pt == 'rel':
                links[uri][pt].extend(pv.split(' '))
            else:
                links[uri][pt].append(pv)
    return links

class FetchChangesThread(threading.Thread):

    def run(self):
        # call thr.start(), which calls this
        # NB if this raises, processing will never end

        changes = []
        ruri = self.getName() 

        try:
            fh = urllib.urlopen(ruri)
            data = fh.read();
            ct = fh.headers.dict['content-type']
            fh.close()
        except:
            self.resultHash[ruri] = []
            return

        if (ct.startswith('text/plain') or ct.startswith('text/csv') or data.find(';rel=') > -1 or data.find('; rel=') > -1):
            # Fast and UGLY
            for line in data.split('\n'):
                line = line.strip()
                didx = line.find('datetime=')
                if (didx > -1):
                    uri = line[line.find('<')+1:line.find('>')]
                    date = line[didx+10:-1]
                    if (date[-1] == '"'):
                        date = date[:-1]
                    dt = dateparser.parse(date)
                    changes.append((dt, uri, {}))
        else:
            changes.sort()
        self.resultHash[ruri] = changes
    
class AggrHandler(BaseProxyHandler):


    def __init__(self, d):
        BaseProxyHandler.__init__(self, d);
        self.mementoRes = [re.compile('^http://web.archive.org/[0-9]+/.+'),
                           re.compile('^http://(www.)?webcitation.org/.*'),
                           re.compile('^http://wayback.archive-it.org/[0-9]+/.+'),
                           re.compile('^http://upload.wikimedia.org/.+'),
                           re.compile('^http://.+.wikipedia.org/w/index.php?.*oldid=.+')]
        self.smwiki = {}
        if os.path.exists('wikihash.pkl'):
            fh = file('wikihash.pkl')
            hsts = cPickle.load(fh)
            fh.close()
            self.smwiki = hsts


    def fetch_changes(self, req, requri, dt=None):
        # check that we're not asking for a Memento

        try:
            self.cc = req.headers_in['cache-control']
        except KeyError:
            self.cc = None

        #self.send(self.cc, req, 404, ct='text/html')

        for patt in self.mementoRes:
            m = patt.search(requri)
            if m:
                # redirect straight back to original
                return requri

        if requri.startswith('http://en.wikipedia.org/wiki'):
            return 'http://mementoproxy.lanl.gov/wiki/timegate/%s' % requri
        elif requri.startswith('http://lanlsource.lanl.gov/'):
            return 'http://mementoarchive.lanl.gov/store/ta/timegate/%s' % requri
        elif requri.startswith('http://odusource.cs.odu.edu/'):
            return 'http://odusource.cs.odu.edu/store/ta/timegate/%s' % requri
        elif requri.find('//github.com/') > -1:
            return 'http://mementoproxy.lanl.gov/git/timegate/%s' % requri
        elif requri.find('.wikia.com/') > -1:
            return "http://mementoproxy.lanl.gov/wikia/timegate/%s" % requri
        else:
            for u in self.smwiki.keys():
                if requri.startswith(u):
                    return 'http://mementoproxy.lanl.gov/smwiki/timegate/%s' % requri

        if self.cc != 'no-cache':
            cxn = bdb.db.DB()
            cxn.open('aggregatorCache.bdb')
            changeStr = cxn.get(requri)
            cxn.close()
        else:
            changeStr = None

        if changeStr:
            changes = cPickle.loads(changeStr)
            return changes
        else:
            # Breaking style sheets etc.
            # 'http://mementoproxy.lanl.gov/bing/timemap/link/',   # Bing Cache
            # 'http://memento.web.archive.org/list/timemap/link/', # Internet Archive
            # 'http://mementoproxy.lanl.gov/ia/timemap/link/',  # Internet Archive
                        
            baseURIs = [
                'http://api.wayback.archive.org/list/timemap/link/',   # Internet Archive
                'http://mementoproxy.lanl.gov/web/timemap/link/',  # Web Citation
                'http://mementoproxy.lanl.gov/ait/timemap/link/',  # Archive It
                'http://mementoproxy.lanl.gov/loc/timemap/link/',  # Lib of Congress
                'http://mementoproxy.lanl.gov/uk/timemap/link/',   # National Archives UK
                'http://mementoproxy.lanl.gov/aweu/timemap/link/', # ArchiefWeb of EU
                'http://www.webarchive.org.uk/waybacktg/ore/timemap/link/' # British Library
                ]

            if requri.find('.gc.ca') > -1:
                baseURIs.append('http://mementoproxy.lanl.gov/can/timemap/link/')
            elif requri.find('.cn') > -1:
                baseURIs.append('http://mementoproxy.lanl.gov/cn/timemap/link/') # China Infomall


            resHash = {}
            for b in baseURIs:
                t = FetchChangesThread()
                t.setName(b + requri)
                t.resultHash = resHash
                t.start()
            while (len(resHash) < len(baseURIs)):
                # XXX If threads die, this may never end...
                # print "%s/%s" % (len(resHash),len(baseURIs))
                time.sleep(0.5)

            
            changes = []
            for v in resHash.values():
                changes.extend(v)
            changes.sort()

            # XXX Should really be transaction
            data = cPickle.dumps(changes)
            cxn = bdb.db.DB()
            cxn.open('aggregatorCache.bdb')
            cxn.put(requri, data)
            cxn.close()
            
            return changes
        

def handler(req):
    hdlr = AggrHandler('aggr')
    return basehandler(req, hdlr)

if __name__ == '__main__':
    hdlr = AggrHandler('aggr')
    c = hdlr.fetch_changes(None, sys.argv[1])
    pprint.pprint(c)

