
import cgitb
import wsgiref.util as wu
from wsgiref import headers
import StringIO
import pprint

from isodate import parse_duration

import os, time
from dateutil import parser as dateparser
from datetime import datetime

from foresite import *
from foresite import conneg
from foresite.utils import namespaces
from rdflib import Namespace, URIRef

def now():
    return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())

try:
    import json
except:
    import simplejson as json

srlzHash = {'rdf/' : RdfLibSerializer('pretty-xml'),
            'rdfxml/' : RdfLibSerializer('xml'),
            'nt/' : RdfLibSerializer('nt'),
            'turtle/' : RdfLibSerializer('turtle'),
            'n3/' : RdfLibSerializer('n3'),
            'atom/' : AtomSerializer(),
            'html/' : RdfLibSerializer('rdfa'),
            'json/' : RdfLibSerializer('pretty-json'),
            'rdfjson/' : RdfLibSerializer('json')
            }

srlzHash['json/'].mimeType = 'application/json';
srlzHash['rdfjson/'].mimeType = 'application/rdf+json';

mimeHash = {}
for (k,v) in srlzHash.items():
    mimeHash[v.mimeType] = k
mimeHash['*/*'] = 'rdf/'
mimeHash['text/html'] = 'html/'
mimeHash['application/csv'] = 'link/'

mimeStr = ', '.join(mimeHash.keys())
mimeList = conneg.parse(mimeStr)

namespaces['mem'] = Namespace('http://www.mementoweb.org/terms/tb/')


class WsgiApp:

    def __init__(self, d):
        self.codes = {
            200 : 'OK',
            201 : 'Created',
            202 : 'Accepted',
            300 : 'Multiple Choies',
            301 : 'Moved Permanently',
            302 : 'Found',
            303 : 'See Other',
            304 : 'Not Modified',
            400 : 'Bad Request',
            401 : 'Unauthorized',
            403 : 'Forbidden',
            404 : 'Not Found',
            405 : 'Method Not Allowed',
            406 : 'Not Acceptable',
            501 : 'Internal Server Error'
            }

        if d[0] != '/':
            d = '/' + d
        self.urldir = d
        self.error_tmpl = "<html><body><br/><center><table width='800px'><tr><td><div style='background-color: #e0e0e0; padding: 10px;'><br/><center><b>Error: %s</b></center>%s<br/><br/></div></td></tr></table></body></html>"



    def __call__(self, environ, start_response):
        self.environ = environ
        self.start_response = start_response
        self.full_uri = environ['SCRIPT_URI']
        self.path = environ['SCRIPT_URL'][len(environ['SCRIPT_NAME']):]
        self.host = environ['wsgi.url_scheme'] + "://" + environ['SERVER_NAME']
        self.method = environ['REQUEST_METHOD']

        #if environ['QUERY_STRING']:
        #    self.query = urlparse.parse_qs(environ['QUERY_STRING'])

        h = {}
        for (k,v) in environ.items():
            if k.startswith('HTTP_'):
                name = k[5:].lower().replace('_', '-')
                h[name] = v
        self.in_headers = h

        self.status = 200
        self.out_headers = headers.Headers([])


        try:
            try:
                data = self.handle()
            except:
                sio = StringIO.StringIO()
                cgitb.Hook(file=sio).handle()
                sio.seek(0)
                data = sio.read()
                self.out_headers['Content-type'] = 'text/html'
                self.status = 501

            self.out_headers['Content-length'] = str(len(data))    
            status = "%s %s" % (self.status, self.codes[self.status])
            start_response(status, self.out_headers.items())

            if type(data) == str:
                return [data]
            elif type(data) == unicode:
                return [data.encode('utf-8')]
            else:
                # see if response is iterable
                try:
                    iter(data)
                    return data
                except TypeError:
                    return [data]
        finally:
            self.cleanup()



    def error(self, msg, status=501):
        self.status = status
        self.out_headers['Content-type'] = 'text/html'
        return self.error_tmpl % (status, msg)

    def send(self, data, status=200, ct = 'text/html'):
        self.status = status
        self.out_headers['Content-type'] = ct
        return data

    def cleanup(self):
        pass




class BaseProxyHandler(WsgiApp):

    def __init__(self, d):
        WsgiApp.__init__(self, d)
        os.chdir('/home/azaroth/aggregator')

    def fetch_changes(self, requri, dt=None):
        raise NotImplementedError()

    def handle_dt(self):

        nowd = now()
        current = dateparser.parse(nowd)
        
        try:
            reqdate = self.in_headers['accept-datetime']
        except KeyError:
            reqdate = nowd

        # strip off silly {} characters, if get old request
        if reqdate[0] == "{":
            reqdate = reqdate[1:]
        if reqdate[-1] == "}":
            reqdate = reqdate[:-1]
           
        # /xxx/timegate/(URL) -- use unparsed_uri to include ?bla
        urlOffset = len('/timegate/')
        requri = self.path[urlOffset:]

        if not requri:
            return self.error("Missing URI in path: %s" % self.path, status=404)
        
        if not requri.startswith('http'):
            requri = "http://" + requri


        # ; Duration handling

        durm = False
        if reqdate.find(';') > -1:
            (reqdate, durn) = reqdate.split(';')
            reqdate = reqdate.strip()
            durn = durn.strip()
            try:
                durm = parse_duration(durn)
            except:
                durm = None

        try:    
            wanted = dateparser.parse(reqdate)
            if (wanted.tzinfo == None or  wanted.tzinfo.utcoffset(wanted) == None):
                # Naive date. Reparse with Timezone
                reqdate += " GMT"
                wanted = dateparser.parse(wanted)
        except:
            # Unparseable
            wanted = None

        if durm and wanted:
            minDateTime = wanted - durm
            maxDateTime = wanted + durm
        elif durm == False:
            minDateTime = datetime(1,1,1, tzinfo=wanted.tzinfo)
            maxDateTime = datetime(9999,12,31, tzinfo=wanted.tzinfo)
            
        # Find times for requested URI
        changes = self.fetch_changes(requri, dt=wanted)

        next = None
        prev = None
        loc = None
        if not changes:
            first = None
            last = None
        elif type(changes) == type({}):
            first = changes['first']
            last = changes['last']
            next = changes['next']
            prev = changes['prev']
            loc = changes['loc']
        elif type(changes) == type([]):
            first = changes[0]
            last = changes[-1]

        ore = self.host + self.urldir + '/timebundle/' + requri
        tmap = self.host + self.urldir + '/timemap/link/' + requri;

        links = ['<%s>;rel="timebundle"' % ore,
                 '<%s>;rel="original"' % requri,
                 '<%s>;rel="timemap";type="text/csv"' % tmap]

        # Process Error Conditions

        if type(changes) == str:
            # Redirection to better timegate, no Connect so no Vary/link
            self.out_headers['Location'] = changes            
            self.status = 302
            self.out_headers['Content-type'] = 'text/plain'
            return 'Redirecting to better TimeGate: %s' % changes             
        else:
            self.out_headers['Vary'] = 'negotiate,accept-datetime'
            if changes:
                lnkhdr = self.construct_linkhdr(links, first, last, loc, next, prev)                

            if not changes:
                return self.error('Resource not in archive<br/><i><blockquote><ul><li>Resource: %s</li></ul></blockquote></i>' % requri, status=404)
            elif self.method != "GET" and self.method != "HEAD":
                self.out_headers['Link'] = lnkhdr
                return self.error("Only GET or HEAD allowed", status=405)
            elif wanted == None:
                fmtstr = "%a, %d %b %Y %H:%M:%S GMT"
                self.out_headers['Link'] = lnkhdr
                # XXX Body must have list of resources too
                return self.error("Datetime format not correct<br/><i><blockquote><ul><li>Date: %s</li><li>Expected: %s</li></ul></blockquote></i>" % (reqdate, current.strftime(fmtstr)), status=400)
            elif durm == None:
                self.out_headers['Link'] = lnkhdr
                return self.error("Duration format not correct<br/><i><blockquote><ul><li>Got: %s</li><li>Expected: %s</li></ul></blockquote></i>" % (durn, "P[yY][mM][dD][T[hH][mM][sS]]"), status=400)

        # Setup headers
        self.out_headers['TCN'] = 'choice'

        if wanted == current or first == last:
            # return last (or only)
            loc = last
            next = None
            prev = None
        elif not loc:
            # Else find closest
            tdiff = lambda y,x: float(abs((y-x).days * 86400) + abs((y-x).seconds))
            for c in range(len(changes)):
                this = changes[c]
                if wanted < this[0] or c == len(changes)-1:
                    if c:
                        llast = changes[c-1]
                        tdelta1 = tdiff(last[0], wanted)
                        tdelta2 = tdiff(this[0], wanted)
                        valid = len(this) == 3 and this[2].has_key('type') and this[2]['type'] == 'valid'
                        if valid or tdelta1 < tdelta2:
                            # Closest Memento to request is previous
                            loc = llast
                            if (c-2 >= 0):
                                prev = changes[c-2]
                            next = this
                        else:
                            loc = this
                            prev = llast
                            if (c < len(changes)-1):
                                next = changes[c+1]
                        break

        self.out_headers['Link'] = self.construct_linkhdr(links, first, last, loc, next, prev)
        if (loc[0] < minDateTime or loc[0] > maxDateTime):
            return self.error('Outside of request duration<br/><blockquote>Min/Max: %s / %s<br/>Location Time: %s</blockquote>' % (minDateTime, maxDateTime, loc[0]), status=406)
        else:
            self.out_headers['Content-type'] = 'text/plain'
            self.out_headers['Location'] = loc[1]
            self.status = 302
            return 'Redirect to Memento'


    def make_covers(self, t1, t2=None):
        ts = ArbitraryResource()
        ts._rdf.type = namespaces['mem']['TimeSpan']
        if not isinstance(t1, datetime):
            t1 = dateparser.parse(t1)
        ts._mem.start = t1
        if t2:
            if not isinstance(t2, datetime):
                t2 = dateparser.parse(t2)
            ts._mem.end = t2
        return ts

    def make_obs(self, t1, t2=None, n=0):
        ts = ArbitraryResource()
        ts._rdf.type = namespaces['mem']['TimeSpan']
        if not isinstance(t1, datetime):
            t1 = dateparser.parse(t1)
        ts._mem.start = t1
        if t2 == 'now':
            if n:
                ts._mem.observations = n
        elif t2:
            if not isinstance(t2, datetime):
                t2 = dateparser.parse(t2)
            ts._mem.end = t2
            if n:
                ts._mem.observations = n
        else:
            ts._mem.end = t1
            ts._mem.observations = 1
        return ts
           

    def handle_aggr(self):
        # This handles the aggregation/timebundle redirect to the appropriate resourcemap/timemap
        urlOffset = len('/timebundle/')
        requri = self.path[urlOffset:]
        if not requri:
            return self.error("Missing URI: %s???" % self.path, status=404)

        if not requri.startswith('http'):
            requri = "http://" + requri

        try:
            wanted = self.in_headers['accept']
        except KeyError:
            wanted = 'application/rdf+xml'
        mts = conneg.parse(wanted)
        mt = conneg.best(mts, mimeList)
        if not mt:
            which = 'rdf/'
        else:
            which = mimeHash[str(mt)]
        location = self.host + self.urldir + '/timemap/%s%s' % (which, requri)
        self.out_headers['Location'] = location
        self.out_headers['Vary'] = "Accept"
        self.status = 303
        return ""


    def handle_rem(self):
        # This generates the TimeMap
        urlOffset = len('/timemap/')
        requri = self.path[urlOffset:]
        if not requri:
            return self.error("Missing URI: %s???" % self.path, status=404)

        if requri.startswith('link/'):
            srlz = "LINK"
            requri = requri[5:]
        else:
            srlz = None
            for (k,v) in srlzHash.items():
                if requri.startswith(k):
                    srlz = v
                    requri = requri[len(k):]
                    break
            if not srlz:
                # unknown rem srlz, raise
                return self.error('Unknown time map serialization', status=404)

        if not requri.startswith('http'):
            requri = "http://" + requri
        
        changes = self.fetch_changes(requri)
        if not changes:
            return self.error('Resource not in archive<br/><blockquote>Resource: %s</blockquote>' % requri, status=404)
        elif type(changes) == str:
            # fetch_changes is trying to redirect... but to where?
            self.requri = requri
            return self.error('Resource not in archive<br/><blockquote>Resource: %s</blockquote>' % requri, status=404)

        if srlz == 'LINK':
            # do link header format as body
            fmtstr = "%a, %d %b %Y %H:%M:%S GMT"
            ore = self.host + self.urldir + '/timebundle/' + requri
            links = ['<%s>;rel="timebundle"' % ore, '<%s>;rel="original"' % requri]
            if len(changes) == 1:
                links.append('<%s>;rel="first-memento last-memento";datetime="%s"' % (changes[0][1], changes[0][0].strftime(fmtstr)))
            else:
                links.append('<%s>;rel="first-memento";datetime="%s"' % (changes[0][1], changes[0][0].strftime(fmtstr)))
                for ch in changes[1:-1]:
                    links.append('<%s>;rel="memento";datetime="%s"' % (ch[1], ch[0].strftime(fmtstr)))
                links.append('<%s>;rel="last-memento";datetime="%s"' % (changes[-1][1], changes[-1][0].strftime(fmtstr)))
            data = ',\n '.join(links)
            self.status = 200
            self.out_headers['Content-type'] = 'text/csv'
            return data

        aggr = Aggregation(self.host  + self.urldir + '/timebundle/' + requri)
        rem = aggr.register_serialization(srlz, self.full_uri)
        rem._rdf.type = namespaces['mem']['TimeMap']

        aggr._dc.title = 'Memento Time Bundle for ' + requri
        aggr._rdf.type = namespaces['mem']['TimeBundle']            

        # add base resource
        orig = AggregatedResource(requri)
        orig._rdf.type = namespaces['mem']['OriginalResource']
        aggr.add_resource(orig)

        tg = AggregatedResource(self.host + self.urldir + '/timegate/' + requri)
        tg._rdf.type = namespaces['mem']['TimeGate']            
        tg._mem.timeGateFor = orig._uri_
        covs = self.make_covers(changes[0][0], changes[-1][0])
        tg.add_triple(covs)
        tg._mem.covers = covs._uri_
        aggr.add_resource(tg)

        for c in changes:
            dtobj = c[0]
            loc = c[1]
            ar = AggregatedResource(loc)
            ar._rdf.type = namespaces['mem']['Memento']            
            ar._mem.mementoFor = orig._uri_
            if len(c) == 2:
                span = self.make_obs(dtobj)
                ar.add_triple(span)
                ar._mem.observedOver = span._uri_
            else:                
                if c[2].has_key('dcterms:creator'):
                    ar._dcterms.creator = URIRef(c[2]['dcterms:creator'])
                span = self.make_obs(dtobj, t2=c[2]['last'], n=c[2]['obs'])
                ar.add_triple(span)
                if c[2].has_key('type') and c[2]['type'] == 'valid':
                    ar._mem.validOver = span._uri_
                else:
                    ar._mem.observedOver = span._uri_
            aggr.add_resource(ar)
        rd = rem.get_serialization()

        self.status = 200
        self.out_headers['Content-type'] = srlz.mimeType
        return rd.data

    def construct_linkhdr(self, links, first, last, curr=None, next=None, prev=None):
        fmtstr = "%a, %d %b %Y %H:%M:%S GMT"

        mylinks = []
        dt = first[0].strftime(fmtstr)
        uri = first[1]
        rel = "first-memento"
        if last and last[1] == uri:
            rel += " last-memento"
            last = None
        if prev and prev[1] == uri:
            rel += " prev-memento"
            prev = None
        elif curr and curr[1] == uri:
            rel += " memento"
            curr = None
        mylinks.append((uri, rel, dt))

        if last:
            dt = last[0].strftime(fmtstr)
            uri = last[1]
            rel = "last-memento"
            if curr and curr[1] == uri:
                rel += " memento"
                curr = None
            elif next and next[1] == uri:
                rel += " next-memento"
                next = None
            mylinks.append((uri, rel, dt))

        if prev:
            mylinks.append((prev[1], 'prev-memento', prev[0].strftime(fmtstr)))
        if next:
            mylinks.append((next[1], 'next-memento', next[0].strftime(fmtstr)))
        if curr:
            mylinks.append((curr[1], 'memento', curr[0].strftime(fmtstr)))

        lh = ['<%s>;rel="%s";datetime="%s"' % x for x in mylinks]

        links.extend(lh)

        return ','.join(links)
        

    def handle(self):
        if self.path.startswith('/timegate/'):
            return self.handle_dt()
        elif self.path.startswith('/timebundle/'):
            return self.handle_aggr()
        elif self.path.startswith('/timemap/'):
            return self.handle_rem()
        else:
            return self.error("404: Unknown proxy command in:  %s" % self.path, status=404)


