
import urllib2
import StringIO
from lxml import etree
from urlparse import urlparse

import os

import time
import datetime
from dateutil import parser as dateparser

from baseHandler import BaseProxyHandler, basehandler

utz = dateparser.parse('2009-01-01 12:00:00 GMT').tzinfo

def iso_to_dt(date):
    seq = (int(date[:4]), int(date[5:7]), int(date[8:10]), int(date[11:13]),
           int(date[14:16]), int(date[17:19]), 0, 1, -1)
    return datetime.datetime.fromtimestamp(time.mktime(time.struct_time(seq)), utz)
    

class WikiaHandler(BaseProxyHandler):

    def __init__(self, loc):
        BaseProxyHandler.__init__(self, loc)
        self.hdrs = {
                'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
                'Accept-Language' : 'en-us,en;q=0.5',
                'Proxy-Connection' : 'keep-alive',
                'Pragma' : 'no-cache',
                'Cache-Control' : 'no-cache',
                'User-Agent' : 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-US; rv:1.9.1.2) Gecko/20090729 Firefox/3.5.2'}

        hdlr = urllib2.HTTPRedirectHandler()
        self.opener = urllib2.build_opener(hdlr)

        self.hosts = []
        if os.path.exists('wikiahosts.txt'):
            fh = file('wikiahosts.txt')
            self.hosts = fh.readlines()
            fh.close()
        

    def fetch_dom(self, wikiuri, host):
        
        self.hdrs['Host'] = host
        try:
            ureq = urllib2.Request(wikiuri, headers=self.hdrs)
            fh = self.opener.open(ureq)
        except Exception, e:
            return None

        data = fh.read()
        fh.close()
        try:
            dom = etree.parse(StringIO.StringIO(data))
        except:
            return None
        dom = dom.getroot()
        return dom


    def fetch_wikilist(self):
        # plus *.wikia.com

        baseuri = "http://www.wikia.com/api.php?format=xml&action=query&list=wkdomains&wkfrom=%s&wkto=%s"
        start = 1
        step = 5000
        domains = []
        while True:
            uri = baseuri % (start, start+step)
            start = start + step
            dom = self.fetch_dom(uri, 'www.wikia.com')
            if dom is not None:
                vars = dom.xpath('//variable')
            else:
                break
            if not vars:
                break
            else:
                for v in vars:
                    d = v.attrib['domain']
                    if d.find('wikia.com') == -1:
                        domains.append(d)
        return domains


    def fetch_changes(self, req, requri, dt=None):

        # parse requri
        # http://www.wowwiki.com/Cloth_armor              --> /api.php
        # http://dragonage.wikia.com/wiki/Morrigan        --> /api.php
        # http://memory-alpha.org/en/wiki/Fraggle_Rock    --> /en/api.php

        p = urlparse(requri)
        host = p[1]
        upath = p[2]

        if host.find('.wikia.com') == -1 and not host in self.hosts:
            return []
        

        (pref, title) = upath.rsplit('/', 1)
        if pref:
            # look for /wiki
            pref = pref.replace('/wiki', '')
        
        url = "http://%s%s/api.php?format=xml&action=query&prop=revisions&meta=siteinfo&rvprop=timestamp|ids&rvlimit=500&redirects=1&titles=%s" % (host, pref, title)

        changes = []
        base = "http://%s%s/index.php?oldid=" % (host, pref)

        dom = self.fetch_dom(url, host)
        while dom is not None:
            revs = dom.xpath('//rev')
            for r in revs:
                i = iso_to_dt(r.attrib['timestamp'])
                changes.append((i, base + r.attrib['revid'], {}))                
            cont = dom.xpath('/api/query-continue/revisions/@rvstartid')
            if cont:
                dom = self.fetch_dom(url + "&rvstartid=" + cont[0], host)
            else:
                dom = None                
        changes.sort()
        return changes


def handler(req):
    os.chdir('/home/web/mementoproxy')
    hdlr = WikiaHandler('wikia')
    return basehandler(req, hdlr)
