

import time, sys, os, re
import pprint
import socket
import threading
import site
from dateutil import parser as dateparser

import urllib

site.addsitedir('/home/azaroth/aggregator')

import pycassa
from cassandra.ttypes import NotFoundException

from date_utils import int_to_dt, dt_to_int
from baseHandler import BaseProxyHandler

from lxml import etree
import StringIO

os.environ['PYTHON_EGG_CACHE'] = '/home/azaroth/.eggs'
site.addsitedir('/home/azaroth/archive')

client = pycassa.connect()
database = 'Memento'

# URI-R : Time -> URI-M
UriTimes = pycassa.ColumnFamily(client, database, 'Uris')


def parse_link(header):
    state = 'start'
    data = [d for d in header]
    links = {}

    while data:
        if state == 'start':
            # Only space or <
            d = data.pop(0)
            while d.isspace():
                d = data.pop(0)
            if d != "<":
                raise ValueError("Expected < in start, got %s" % d)
            state = "uri"
        elif state == "uri":
            uri = []
            d = data.pop(0)
            while d != ">":
                uri.append(d)
                d = data.pop(0)
            uri = ''.join(uri)
            links[uri] = {}
            state = "paramstart"
        elif state == 'paramstart':
            # consume until ;
            d = data.pop(0)
            while d.isspace():
                d = data.pop(0)
            if d == ";":
                state = 'linkparam';
            elif d == ',':
                state = 'start'
            else:
                raise ValueError("Expected ; in paramstart, got %s" % d)
        elif state == 'linkparam':
            d = data.pop(0)
            while d.isspace():
                d = data.pop(0)
            paramType = []
            while not d.isspace() and d != "=":
                paramType.append(d)
                d = data.pop(0)
            while d.isspace():
                d = data.pop(0)
            if d != "=":
                raise ValueError("Expected = in linkparam, got %s" % d)
            state='linkvalue'
            pt = ''.join(paramType)
            if not links[uri].has_key(pt):
                links[uri][pt] = []
        elif state == 'linkvalue':
            # token or quoted string
            # got to paramstart after to switch
            d = data.pop(0)
            while d.isspace():
                d = data.pop(0)
            paramValue = []
            if d == '"':
                # consume until " not escaped by \
                pd = d
                d = data.pop(0)
                while d != '"' and pd != '\\':
                    paramValue.append(d)
                    pd = d
                    d = data.pop(0)
            else:
                while not d.isspace() and not d in (',', ';'):
                    paramValue.append(d)
                    if data:
                        d = data.pop(0)
                    else:
                        break
                if data:
                    data.insert(0, d)
            state = 'paramstart'
            pv = ''.join(paramValue)
            if pt == 'rel':
                links[uri][pt].extend(pv.split(' '))
            else:
                links[uri][pt].append(pv)
    return links



class FetchChangesThread(threading.Thread):

    def run(self):
        changes = []
        ruri = self.getName()
        try:
            fh = urllib.urlopen(ruri)
            data = fh.read()
            ct = fh.headers.dict['content-type']
            fh.close()
        except:
            self.resultHash[ruri] = []
            return

        if (ct.startswith('text/csv') or data.find(';rel=') > -1):
            # Fast, Ugly link parser
            try:
                for line in data.split('\n'):
                    line = line.strip()
                    didx = line.find('datetime=')
                    if didx > -1:
                        uri = line[line.find('<')+1:line.find('>')]
                        date = line[didx+10:-1]
                        if (date[-1] == '"'):
                            date = date[:-1]
                        dt = dateparser.parse(date)
                        changes.append((dt,uri))
            except:
                raise
            

        if not changes:
            # do it the slow way
            pass

        changes.sort()
        self.resultHash[ruri] = changes
        

class AggrHandler(BaseProxyHandler):

    def __init__(self, d):
        BaseProxyHandler.__init__(self, d);

        self.mementoRes = [re.compile('^http://web.archive.org/[0-9]+/.+'),
                           re.compile('^http://(www.)?webcitation.org/.*'),
                           re.compile('^http://wayback.archive-it.org/[0-9]+/.+'),
                           re.compile('^http://upload.wikimedia.org/.+'),
                           re.compile('^http://.+.wikipedia.org/w/index.php?.*oldid=.+')]

        self.baseURIs = ['http://memento.waybackmachine.org/list/timemap/link/',   # Internet Archive!
                        'http://www.webarchive.org.uk/waybacktg/ore/timemap/link/', # British Library
                        'http://mementoproxy.lanl.gov/ait/timemap/link/',  # Archive It
                        'http://mementoproxy.lanl.gov/loc/timemap/link/',  # Lib of Congress
                        'http://mementoproxy.lanl.gov/uk/timemap/link/',   # National Archives UK
                        'http://mementoproxy.lanl.gov/aweu/timemap/link/', # ArchiefWeb of EU
                        'http://mementoproxy.lanl.gov/web/timemap/link/'  # Web Citation
                        ]
        self.requri = None


    def fetch_changes(self, requri, dt=None):

        for patt in self.mementoRes:
            m = patt.search(requri)
            if m:
                return [(dateparser.parse('1996/01/01 00:00:00 UTC'), requri), (dateparser.parse('1996/01/02 00:00:00 UTC'), requri)]

        try:
            self.requri = None
            return self.do_cassandra(requri, dt)
        except:
            self.requri = requri                
            if dt:
                # New strategy:  Forward to IA and *then* load
                return "http://memento.waybackmachine.org/memento/timegate/" + requri
            else:
                # We need it now for timemap, so just call cleanup and rerun
                self.cleanup()
                return self.do_cassandra(requri, dt)

    def cleanup(self):
        requri = self.requri

        if not requri:
            return

        # copy to new value
        baseURIs = self.baseURIs[:]

        if requri.startswith('http://en.wikipedia.org/wiki'):
            return 'http://mementoproxy.lanl.gov/wiki/timegate/%s' % requri
        elif requri.startswith('http://lanlsource.lanl.gov/'):
            return 'http://mementoarchive.lanl.gov/store/ta/timegate/%s' % requri
        elif requri.startswith('http://odusource.cs.odu.edu/'):
            return 'http://odusource.cs.odu.edu/store/ta/timegate/%s' % requri
        elif requri.find('.wikia.com/') > -1:
            return "http://mementoproxy.lanl.gov/wikia/timegate/%s" % requri
        elif requri.find('.gc.ca') > -1:
            baseURIs.append('http://mementoproxy.lanl.gov/can/timemap/link/')
        elif requri.find('.cn') > -1:
            baseURIs.append('http://mementoproxy.lanl.gov/cn/timemap/link/') # China Infomall

        resHash = {}
        for b in baseURIs:
            t = FetchChangesThread()
            t.setName(b + requri)
            t.resultHash = resHash
            t.start()
        while (len(resHash) < len(baseURIs)):
            # XXX If threads die, this may never end...
            if __name__ == '__main__':
                print "%s/%s" % (len(resHash),len(baseURIs))
            time.sleep(0.5)

        changes = []
        for v in resHash.values():
            changes.extend(v)
        changes.sort()

        # write changes to Cassandra
        changeHash = {}
        for c in changes:
            i = dt_to_int(c[0])
            dti = "%012d" % i
            changeHash[dti] = c[1]

        changeHash['_updated'] = "%012d" % time.time()
        UriTimes.insert(requri, changeHash)
        self.requri = None
        

    def do_cassandra(self, uri, dt=None):

        if dt:
            # Try to find only useful info, not every. single. memento.
            req = "%012d" % dt_to_int(dt)

            (t, mem) = UriTimes.get(uri, column_start='0', column_count=1).items()[0]
            first = (int_to_dt(t), mem)

            (t, mem) = UriTimes.get(uri, column_start='9', column_count=1, column_reversed=True).items()[0]
            last = (int_to_dt(t), mem)

            if first == last:
                # we only have one entry
                return {'loc' : first, 'first' : first, 'last' : first, 'next' : None, 'prev' : None}
                
            if dt < last[0]:
                try:
                    nexts = [(int_to_dt(x), y) for (x,y) in UriTimes.get(uri, column_start=req, column_count=2).items()]
                except NotFoundException:
                    nexts = []
            else:
                nexts = []

            if dt > first[0]:
                try:
                    prevs = [(int_to_dt(x), mem) for (x,mem) in UriTimes.get(uri, column_start=req, column_count=2, column_reversed=True).items()]
                except:
                    prevs = []
            else:
                prevs = []

            # Now calculate next and prev
            tdiff = lambda y,x: float(abs((y-x).days * 86400) + abs((y-x).seconds))

            prev = []
            next = []
            # next is ascending, prev is descending
            if (nexts and prevs):
                # find closest
                nd = tdiff(nexts[0][0], dt)
                pd = tdiff(prevs[0][0], dt)
                if (nd <= pd):
                    # next is closer
                    loc = nexts[0]
                    next = nexts[1] if len(nexts) > 1 else []
                    prev = prevs[0]
                else:
                    loc = prevs[0]
                    prev = prevs[1] if len(prevs) > 1 else []
                    next = nexts[0]
            elif nexts:
                # find closest next and first
                nd = tdiff(nexts[0][0], dt)
                pd = tdiff(first[0], dt)
                if nd <= pd:
                    loc = nexts[0]
                    next = nexts[1] if len(nexts) > 1 else []                        
                else:
                    loc = first
                    next = nexts[0]

            elif prevs:
                # find closest prev and last
                pd = tdiff(prevs[0][0], dt)
                nd = tdiff(last[0], dt)
                if nd <= pd:
                    loc = last
                    prev = prevs[0]
                else:
                    loc = prevs[0]
                    prev = prevs[1] if len(prevs) > 1 else []                        
            else:
                # find closest first and last
                pd = tdiff(first[0], dt)
                nd = tdiff(last[0], dt)
                loc = first if pd < nd else last

            if prev == last or prev == loc:
                prev = None
            if next == first or next == loc:
                next = None

            changes = {'loc' : loc, 'first' : first, 'last' : last, 'next' : next, 'prev' : prev}
        else:
            changes = [(int_to_dt(x), mem) for (x,mem) in UriTimes.get(uri, column_start='0', column_finish='09', column_count=999999).items()]
            changes.sort()
        return changes

application = AggrHandler('aggr')


if __name__ == '__main__':

    if len(sys.argv) > 2:
        dt = sys.argv[2]
        dt = dateparser.parse(dt)
        print "Request Date is: " + str(dt)
    else:
        dt = None

    c = application.fetch_changes(sys.argv[1], dt)
    pprint.pprint(c)





    
    
