#!/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division

# PopCulture - A package to compute metrics on Wikipedia articles.
# Copyright (C) 2012  Nuno J. Silva
#
# This file is part of PopCulture
#
# PopCulture is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# PopCulture is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PopCulture.  If not, see <http://www.gnu.org/licenses/>.

import mwclient
import zlib
import bz2
import json
import re
import calendar
import datetime
import collections
import operator
import sys
import shelve
import difflib
import textmatcher
import threading
import time
import itertools

__differ = difflib.Differ()

class PageNotFound(Exception):
    def __init__(self, page_title, hostname, path):
        self.page_title = page_title
        self.hostname = hostname
        self.path = path
    def __str__(self):
        return 'The page {0} was not found on the Wikimedia instance running at {1}{2}.'.format(
            self.page_title,
            self.hostname,
            self.path)
                                                                                               

def get_stats_by_section(r, permutation, previous=[], *args):
    next_revs = textmatcher.permutate_list(split_revision_by_section(r), permutation)

    num_sections = len(next_revs)
    gap = num_sections - len(previous)
    if gap > 0:
        previous.extend([None] * gap)
        
    res = [RevisionStats(next_revs[i], previous[i], *args) if next_revs[i] is not None else None
           for i in range(num_sections)]

    return res 

def get_stats_by_article(rev, *args):
    """
    Return a RevisionStats object for the given revision, avoiding the
    cost of creating a new instance if there is already one in the
    cache.
    """

    return RevisionStats(rev, *args)

def split_revision_by_section(revision):
    """
    Given an mwclient revision object, covering the whole article
    length, split it in several objects, one for each section.
    """
    text = revision[u'*']

    # Use regexes. We can say this was sort-of inspired by
    # http://stackoverflow.com/questions/7795345/python-regex-mediawiki-section-parsing,
    # even if the regex is not the same.
    sections = re.split('======[^=]*======|=====[^=]*=====|====[^=]*====|===[^=]*===|==[^=]*==', text)

    result = []
    for i, section in enumerate(sections):
        rev = revision.copy()
        rev[u'*'] = section
        rev[u'secid'] = i
        result.append(rev)
    return result


__words = dict()
__words['en.wikipedia.org/w/'] = {
    'good': set(['the', 'of', 'and', 'in', 'to', 'a', 'is', 'for', 'on', 'was', 'as', 'with', 'by', 'that', 'at', 'from', 'his', 'he', 'it', 'are', 'an', 'be', 'which', 'were', 'this', 'or', 'has', 'first']),
    'offensive': set(['arse', 'asshole', 'asswipe', 'bitch', 
                      'bollocks', 'breeder', 'bugger', 
                      'bullshit', 'cunt', 'damnation', 
                      'faggot', 'feck', 'frak', 'fubar', 
                      'fuck', 'git', 'motherfucker', 
                      'nigger', 'pissant', 'pussy', 'shit', 
                      'slut', 'spastic', 'twat', 'wanker']),
    'bias': set(['legendary', 'great', 'eminent', 'visionary', 
                 'outstanding', 'leading', 'celebrated', 
                 'cutting-edge', 'extraordinary', 'brilliant', 
                 'famous', 'renowned', 'remarkable', 'prestigious', 
                 'world-class', 'respected', 'notable', 'virtuoso',
                 'cult', 'racist', 'perverted', 'sect', 
                 'fundamentalist', 'heretic', 'extremist', 
                 'denialist', 'terrorist', 'controversial', 
                 'supposed', 'purported', 'alleged', 'accused', 
                 'so-called', 'notably', 'interestingly', 
                 'clearly', 'certainly', 'fortunately', 'happily', 
                 'unfortunately', 'tragically', 'untimely', 
                 'reveal', 'expose', 'explain', 'find', 'note', 
                 'observe', 'insist', 'speculate', 'surmise', 
                 'claim', 'assert', 'admit', 'confess', 'deny'])}


__words['pt.wikipedia.org/w/'] = {
    'good': set(['de', 'a', 'e', 'o', 'do', 'da', 'em', 'que', 'no', 'com', 'uma', 'um', 'para', 'na', 'os', 'por', 'dos', 'como', 'se', 'foi', 'as', 'mais', 'ao', 'sua', 'das', 'seu', 'ou', 'ser']),
    'offensive': set(['babaca','bacanal','bacurinha','badalo','badamerda','baranga','bardamerda','barrote','bedamerda','benga','berdamerda','bichana','bilha','bimba','bimbada','boceta','boiola','bolagato','brichote','broche','bruaca','buceta','bugiranga','bunda','burro','cabaço','cacete','cachorra','cadela','cagalhão','cagar','canhão','capô-de-fusca','caralho','catatau','catraia','chavasca','chuchu','chupeta','colhão','comua','cona','conanas','conaça','coxanga','crica','cu','cu-de-ferro','cu-de-mãe-joana','cunete','cunilíngua','cuzão','fiote','foda','foder','forunfar','fuampa','fufa','furico','furunfar','gaita','gajo','galinha','grelo','grão','gulosa','idiota','jeba','jumento','kiwi','lola','mamado','mangalho','maricas','mastro','meinha','merda','messalina','minete','moca','nhonhoca','pandeiro','paneleiro','patolar','pau-no-cu','peida','peidar','peido','pene','pentelho','pica','pindocar','pinto','pintudo','piranha','piroca','pirrola','pito','piupiu','pixa','piça','porra','potranca','punheta','puta','puto','queca','quenga','rabeta','racha','rapidinha','sapatona','sapatão','siririca','suruba','tabaca','tanajura','tesudo','tesão','teta','toba','troca-troca','vadia','xana','xarifa','xarola','xarolo','xavasca','xereca','xibungo','ximbica','xiranha','xiri','xota','xoxota','óbu']),
    'bias': set(['lendário', 'grande', 'eminente', 'visionário', 'notável', 'líder', 'célebre',
                 'extraordinário', 'brilhante', 'famoso', 'renomado', 'prestigioso', 'respeitado',
                 'virtuoso', 'culto', 'racista', 'perverso', 'seita', 'fundamentalista', 'herege',
                 'extremista', 'negacionista', 'terrorista', 'libertador', 'controverso',
                 'acredita-se', 'suposto', 'alegado', 'pretenso', 'acusado', 'chamado',
                 'notavelmente', 'interessantemente', 'claramente', 'certamente',
                 'afortunadamente', 'felizmente', 'infelizmente', 'tragicamente', 'precocemente',
                 'revelou', 'indicou', 'expôs', 'explicou', 'encontrou', 'notou', 'observou',
                 'insistiu', 'especulou', 'conjeturou', 'alegou', 'afirmou', 'admitiu', 'confessou',
                 'negou'])}

__words['fr.wikipedia.org/w/'] = {
    'good': set(['de', 'la', 'le', 'et', 'en', 'du', 'des', 'les', 'est', 'dans', 'un', 'par', 'au', 'une', 'pour', 'il', 'sur', 'qui', 'que', 'a', 'son', 'avec', 'plus', 'se', 'sont', 'ou', 'ce', 'aux']),
    'offensive': set(['beauf','blueneck','boche','con','enfoiré','fif','gabacho','gouine','métèque','nègre','polard','poufiasse','pédé','pétroleuse','racaille','social-traître','travelo','wackes']),
    'bias': set(['souvent','généralement','inégalé','prétendre','soi-disant','naturellement','manifestement','essentiellement','principalement','extrême','terroriste','terrorisme','résistance','fondamentalisme','théorie','mythe','dictateur','auteur'])}

__words['es.wikipedia.org/w/'] = {
    'good': set(['de', 'la', 'en', 'el', 'y', 'a', 'que', 'del', 'los',
                 'se', 'por', 'con', 'las', 'un', 'su', 'una', 'al',
                 'como', 'para', 'es', 'no', 'fue', 'o', 'lo', 'sus',
                 'entre', 'este', 'esta']),

    'offensive': set(['alampar', 'batir', 'blanquillo', 'bola', 'bollo',
                      'cabronazo', 'cachada', 'cachar', 'cachero',
                      'cachucha', 'cacorro', 'cagar', 'cagarla',
                      'cagón', 'cajeta', 'callampa', 'calzar', 'caraja',
                      'carajo', 'cepillar', 'chele', 'chichi', 'chinga',
                      'chingadazo', 'chingadera', 'chingado', 'chingar',
                      'chocho', 'cholga', 'choro', 'chota', 'choto',
                      'chucha', 'chuchumeca', 'chupe', 'cimbrel',
                      'cipote', 'coco', 'cojón', 'concha', 'conchudo',
                      'correrse', 'coño', 'criatura', 'culero',
                      'culiado', 'culicagado', 'culo', 'cámara',
                      'difarear', 'difariar', 'diuca', 'empergeñar',
                      'empernar', 'enculado', 'escoñetar', 'fleto',
                      'follar', 'furcia', 'gilipollas', 'goma',
                      'gonorrea', 'güila', 'haiga', 'haigamos', 'haigan', 'haigas', 'haigáis', 'hijuna', 'hocico', 'hostia',
                      'hoyo', 'huevón', 'inflar', 'jalar', 'jarioso', 'joder', 'lagarta', 'leche', 'lumi',
                      'macaquero', 'machete', 'madrazo', 'madrear', 'madriza', 'maricón', 'marihuanero',
                      'mazo', 'mear', 'mes', 'mierda', 'mojar', 'mojarse', 'mojón', 'mostacero', 'nabo',
                      'nardo', 'orto', 'paja', 'paja' 'mental', 'pajero', 'pajilla', 'pajillero', 'panocha',
                      'pechuga', 'pedar', 'peder', 'pedo', 'pedorrear', 'perra', 'petar', 'picha', 'picho',
                      'pichula', 'pico', 'pijo', 'pinche', 'pindonga', 'pinga', 'pisar', 'playo', 'polla',
                      'polludo', 'poronga', 'potorro', 'pucha', 'puta', 'putazo', 'putañero', 'putiza',
                      'puto', 'pájaro', 'quilombo', 'raja', 'reculiado', 'sapo', 'tango', 'torta',
                      'tragasable', 'tragasables', 'tripa', 'vacunar', 'verga', 'vergación', 'verijón',
                      'zuma', 'ñinga']),
    'bias': set(['afirmó', 'aseguró', 'indicó', 'descubrió', 'reveló', 'supuesto', 'pretendido',
                 'aunque', 'naturalmente', 'evidentemente', 'indudablemente', 'obviamente',
                 'claramente', 'indiscutiblemente', 'principalmente', 'básicamente',
                 'especialmente', 'irónicamente', 'sorprendentemente', 'desafortunadamente',
                 'afortunadamente', 'curiosamente', 'tristemente', 'trágicamente', 'escándalo',
                 'polémica', 'controversia', 'legendario', 'mito', 'teóricamente',
                 'fundamentalista', 'secta'])}





def compute_word_ratios(split_content, *siteargs):
    """
    Return the set of ratios for the occurence of several families of
    words in the given text.
    """
    try:
        words = __words[''.join(siteargs)]
    except KeyError:
        return None

    frequent_words = set([i for i,j in collections.Counter(map(str.lower, split_content)).most_common(15)])

    ratios = dict()
    for key in words:
        ratios[key] = len(frequent_words & words[key]) / 15

    return ratios

class RevisionStats:
    """
    Class that holds a set of statistics (\"scores\") about a
    wikipedia revision: length, controversiality, quality and
    vandalism.

    The granularity of the data analyzed is not tied, as far as it is
    provided as a dictionary with the entries 'user', 'comment', '*'
    (content) and 'anon' (if made by an anonymous user).

    A previous RevisionStats object (previous_revision) may also be
    provided. The granularity is not enforced in any way, but of course
    that, in order to get meaningful results, the granularity of the
    current and previous revision data should be the *same*.

    Previous revision data is used to compute how much the content
    changed and who is the author that gets associated with the
    current revision (if there are no changes, the author is inherited
    from the previous revision). It is also used to detect deletions.

    In the case no previous revision is supplied, the current revision
    is assumed to be the very first one (previous revision gets the
    same author and empty content).
    """
    def __init__(self, revision, previous_revision=None, site='localhost', path='/mediawiki/'):
        content = revision[u'*'].encode("utf-8")

        split_content = content.split()
        new = collections.Counter(split_content)
        self.split_counter = new
        self.author = revision[u'user']

        self.revid = revision[u'revid']

        self.number_of_words = len(split_content)

        if previous_revision is not None:
            original = previous_revision.split_counter
           
            minus = sum((original - new).itervalues())
            plus = sum((new - original).itervalues())

            # Dirty hack here: keep the value as long as needed but
            # discard it as soon as it's not required anymore
            previous_revision.split_counter = None

            num_changes = plus + minus
            
            if num_changes == 0:
                self.author = previous_revision.author

            deletion_impact = (minus / num_changes) if num_changes > 0 else 0
            
            combinated_number_of_words = self.number_of_words + previous_revision.number_of_words
            self.impact = num_changes / combinated_number_of_words if combinated_number_of_words > 0 else 0
        else:
            num_changes = 0
            deletion_impact = 0
            self.impact = 1

        
        
        
        # small properties
        self.length = len(content)

        if u'comment' in revision:
            self.is_revert = 'rv' in revision[u'comment']
        else:
            self.is_revert = 0
            
        self.is_deletion = (deletion_impact == 1)

        self.date = revision[u'unixtime']
        
        if (previous_revision is not None
            and previous_revision.date > self.date):
            self.date = previous_revision.date
            print ' WARNING: Hit MediaWiki revision date bug for {0}{1}{2}! '.format(site, path, self.revid)

        self.url = revision[u'url']

        if ((num_changes == 0)
            and (previous_revision is not None)):           
            self.quality = previous_revision.quality
            self.vandalism = previous_revision.vandalism
            self.controversiality = previous_revision.controversiality
        else:
            if self.length > 0:
                links =  len(re.findall('\[\[[^\]]*\]\]', content))
                link_ratio = links / self.number_of_words if self.number_of_words > 0 else 0
                reference_link_ratio = 0.052
                link_score = ((1 - abs(link_ratio - reference_link_ratio) / reference_link_ratio)
                              if link_ratio < 2 * reference_link_ratio else 0)

                # Character frequency score: a score measuring how closely
                # the given text follows the common character frequency of
                # english texts.

                text_top_chars = set([i for i,j in collections.Counter(map(str.lower, content)).most_common(10)])
                top_chars = set(['a', 'e', 'h', 'i', 'n', 'o', 'r', 's', 't'])
                char_frequency = len(text_top_chars & top_chars) / 9 # was 10 (WHY?)

                # Compressibility score: deviation from a reference
                # compressibility ratio.
                ratio = len(zlib.compress(content)) / self.length
                reference_ratio = 0.362
                compress_score = ((1 - abs(ratio - reference_ratio) / reference_ratio)
                                  if ratio < 2 * reference_ratio else 0)

                if self.length <= 1000:
                    handicap = 0.2 + self.length / 1250
                else:
                    handicap = 1
                
           
                # Compute final values
                word_ratios = compute_word_ratios(split_content, site, path)

                if word_ratios is not None:
                    self.quality = sum([compress_score,
                                        word_ratios['good'],
                                        (1 - word_ratios['offensive']),
                                        (1 - word_ratios['bias']),
                                        char_frequency, 
                                        link_score]) / 6


                    self.vandalism = sum([(1 - compress_score),
                                          1 - word_ratios['good'],
                                          word_ratios['offensive'],
                                          (1 - char_frequency),
                                          (1 - link_score)]) / 5 


                    self.controversiality = sum([word_ratios['good'],
                                                 word_ratios['bias'],
                                                 char_frequency,
                                                 (1 - deletion_impact)]) / 4
                else:
                    self.quality = sum([compress_score,
                                        char_frequency, 
                                        link_score]) / 3


                    self.vandalism = sum([(1 - compress_score),
                                          (1 - char_frequency),
                                          (1 - link_score)]) / 3

                
                    self.controversiality = sum([char_frequency,
                                                 (1 - deletion_impact)]) / 2

                self.vandalism = self.vandalism * handicap
                self.controversiality = self.controversiality * handicap
            else:
                self.quality = 0
                self.vandalism = 0
                self.controversiality = 0
            


    def debug(self):
        """
        Print the metrics and properties of the current object.
        """
        print 'P self.is_revert: ', self.is_revert
        print 'P self.is_deletion: ', self.is_deletion
        print 'P self.date: ', self.date
        print '* self.length: ', self.length
        print '* self.quality: ', self.quality
        print '* self.vandalism: ', self.vandalism
        print '* self.controversiality: ', self.controversiality

    def list_repr(self):
        return [self.quality,
                self.vandalism,
                self.controversiality,
                self.length,
                self.impact,
                self.date,
                1 if self.is_deletion else 0,
                1 if self.is_revert else 0,
                self.author,
                self.revid,
                self.url]

def section_stats(page_title, start, end, *siteargs):
    """
    Retrieves section-level statistics for an article, describing the
    revisions which occurred from start, up to (but not including)
    end.
    
    Statistics for sections at times they don't exist are filled with
    empty values (zeros and empty strings).
    
    """

    all_stats = page_stats(page_title, *siteargs)[1]
    start_index = 0
    end_index = 0
    article_level_stats = page_stats(page_title, *siteargs)[0]
    generator = enumerate(article_level_stats)
    
    for i, rs in generator:
        if rs[5] >= start:
            break
    start_index = i
        
    for i, rs in generator:
        if rs[5] >= end:
            i = i - 1
            break
    end_index = i + 1 if start <= article_level_stats[start_index][5] < end else start_index

    if start_index > 0:
        start_index = start_index - 1

    end_index = min (len(article_level_stats), end_index + 2)

    res =  [section[start_index:end_index] for section in all_stats]

    if len(res) > 0 and(len(res[0])) == 0:
        return []
    else:
        return res

def revision_stats_json(*args):
    """
    JSON wrapper to retrieve section-level statistics (section_stats()).
    """

    return json.dumps(section_stats(*args))

__revcount_cache = dict()
__progress_cache = dict()

def increment_count(page_title, *siteargs):
    key = ''.join(siteargs).encode("utf8") + page_title.encode("utf8")
    count = __progress_cache.get(key, 0)
    __progress_cache[key] = count + 1

def talk_pages(page_title, *siteargs):
    site = mwclient.Site(*siteargs)
    page = site.Pages['Talk:' + page_title]
    revisions = page.revisions(prop='timestamp', limit='max')
    return [talkrev[u'unixtime'] for talkrev in revisions]

def revision_count(page_title, *siteargs):
    key = ''.join(siteargs).encode("utf8") + page_title.encode("utf8")
    if key in __revcount_cache:
        return __revcount_cache[key]
    site = mwclient.Site(*siteargs)
    page = site.Pages[page_title]
    revisions = page.revisions(prop='', limit='max')
    __revcount_cache[key] = len(list(revisions))
    return __revcount_cache[key]

def status(page_title, *siteargs):
    try:
        key = ''.join(siteargs).encode("utf8") + page_title.encode("utf8")
        return '{0} of {1}'.format(__progress_cache[key],
                                   revision_count(page_title, *siteargs))
    except KeyError:
        return '0 of {0}'.format(revision_count(page_title, *siteargs))

def retrieve_revisions(page_title, *siteargs):
    """
    Revision retriever: retrieves the URL and content, along with
    several other properties, from a MediaWiki instance (specified by
    siteargs) for the chosen article (page_title).
    """
    
    properties = 'ids|content|user|comment|timestamp'
    site = mwclient.Site(*siteargs)
    page = site.Pages[page_title]
    revisions = page.revisions(prop=properties, dir='newer')
    return revisions

def split_by_wiki_section(string):
    string = re.sub('======([^=]*)======', '<section-boundary/>\n\n<h5>\\1</h5>\n\n', string)
    string = re.sub('=====([^=]*)=====', '<section-boundary/>\n\n<h4>\\1</h4>\n\n', string)
    string = re.sub('====([^=]*)====', '<section-boundary/>\n\n<h3>\\1</h3>\n\n', string)
    string = re.sub('===([^=]*)===', '<section-boundary/>\n\n<h2>\\1</h2>\n\n', string)
    string = re.sub('==([^=]*)==', '<section-boundary/>\n\n<h1>\\1</h1>\n\n', string)
    res = re.split('<section-boundary/>', string)
    return res

def wiki_parse_string(string, *siteargs):
    serverurl = ''.join(siteargs).encode("utf8")
    link_base = 'http://' + serverurl + 'index.php?title='

    string = re.sub('\\n\\*', '\\n<li>', string)
    string = re.sub('\[\[(Image:[^\|\]]*)\|([^\|\]]*\|)*([0-9]*) ?px[^\]]*\]\]', '<a href="' + link_base + '\\1"><img width="\\3px" src="' + link_base + 'Special:Filepath/\\1"></a>', string)
    string = re.sub('\[\[(Image:[^\|\]]*)\|[^\]]*\]\]', '<a href="' + link_base + '\\1"><img src="' + link_base + 'Special:Filepath/\\1"></a>', string)
    string = re.sub('\[\[(Image:[^\]]*)\]\]', '<a href="' + link_base + '\\1"><img src="' + link_base + 'Special:Filepath/\\1"></a>', string)
    string = re.sub('\[\[(File:[^\|\]]*)\|[^\]]*\]\]', '<a href="' + link_base + '\\1"><object data="' + link_base + 'Special:Filepath/\\1"></object></a>', string)
    string = re.sub('\[\[(Media:[^\|\]]*)\|[^\]]*\]\]', '<a href="' + link_base + '\\1"><object data="' + link_base + 'Special:Filepath/\\1"></object></a>', string)
    string = re.sub('\[\[([^\]]*)\|([^\]]*)\]\]', '<a href="' + link_base + '\\1">\\2</a>', string)
    string = re.sub('\[\[([^\]]*)\]\]', '<a href="' + link_base + '\\1">\\1</a>', string)
    string = re.sub('\[([^ \]]*) ([^\]]*)\]', '<a href="\\1">\\2</a>', string)
    string = re.sub('\[([^\]]*)\]', '<a href="\\1">[&#x21d7;]</a>', string)
    string = re.sub('\'\'([^\']*)\'\'', '<em>\\1</em>', string)
    string = re.sub('\\{\\{', '<span class="wikitemplate" style="display: none">', string)
    string = re.sub('\\}\\}', '</span>', string)
    string = re.sub('\\n\\n', '<br/>', string)
    string = re.sub('\\n', ' ', string)
    string = u'\n'.join([u'<p>{0}</p>'.format(s) for s in string.split('<br/>')])

    return string

def wiki_parse(string, previous, comment, permutation, *siteargs):
    """
    Parse the given wikipedia content, replacing some of the markup by
    its HTML counterpart. The Wikimedia instance this content belongs
    to is also specified, in order to translate wikilinks.
    """


    tmp_res = split_by_wiki_section(string)
    res = [wiki_parse_string(string, *siteargs) for string in tmp_res]
    res = textmatcher.permutate_list(res, permutation)


    diffs = []

    diff_acc_plus = 0
    diff_acc_minus = 0

    for pair in itertools.izip_longest(previous, res, fillvalue=''):
        line_diffs = __differ.compare(pair[0].split('\n') if pair[0] is not None else [''],
                                      pair[1].split('\n') if pair[1] is not None else [''])
        acc = []
        diff_counter_plus = 0
        diff_counter_minus = 0
        for line in line_diffs:
            diff_move = line[0:2]
            if diff_move == u'+ ':
                diff_counter_plus = diff_counter_plus + 1
                diff_move_str = u'plus'
            elif diff_move == u'- ':
                diff_counter_minus = diff_counter_minus + 1
                diff_move_str = u'minus'
            else:
                diff_move_str = u''

            if diff_move != u'? ':
                acc.append(u'<div class="diff{2}"><p><span class="diffmarker{2}">{0}</span>{1}</p></div>\n'.format(diff_move,re.sub('</?p>','',line[2:]),diff_move_str))
        diff_acc_plus = diff_acc_plus + diff_counter_plus
        diff_acc_minus = diff_acc_minus + diff_counter_minus
        diffs.append(u'Section diffs: + {0}, - {1}'.format(diff_counter_plus, diff_counter_minus) + ''.join(acc))
    
    return [res, diffs, u'<p><strong>Rev. comment:</strong> ' + comment +'</p>', u'<p><strong>Article diffs: +{0} -{1}</strong></p>'.format(diff_acc_plus, diff_acc_minus), permutation]


def article_stats(page_title, start, end, *siteargs):
    """
    Retrieves article-level statistics for an article, describing the
    revisions which occurred from start, up to (but not including)
    end.
    """
    all_stats = page_stats(page_title, *siteargs)[0]
    start_index = 0
    end_index = 0
    generator = enumerate(all_stats)
    
    for i, rs in generator:
        if rs[5] >= start:
            break
    start_index = i
        
    for i, rs in generator:
        if rs[5] >= end:
            i = i - 1
            break
    end_index = i + 1 if start <= all_stats[start_index][5] < end else start_index

    if start_index > 0:
        start_index = start_index - 1

    end_index = min (len(all_stats), end_index + 2)

    res =  all_stats[start_index:end_index]

    return [res] 

def talk_stats(page_title, start, end, *siteargs):
    """
    Retrieves article-level statistics for an article, describing the
    revisions which occurred from start, up to (but not including)
    end.
    """
    all_stats = sorted(page_stats(page_title, *siteargs)[2])
    start_index = 0
    end_index = 0

    if len(all_stats) == 0:
        return []

    generator = enumerate(all_stats)

    for i, rs in generator:
        if rs >= start:
            break
    start_index = i
        
    for i, rs in generator:
        if rs >= end:
            i = i - 1
            break
    end_index = i + 1 if start <= all_stats[start_index] < end else start_index

    if start_index > 0:
        start_index = start_index - 1

    if end_index < len(all_stats) - 1:
        end_index = end_index + 1

    res =  all_stats[start_index:end_index]

    return res 


__stats_cache = shelve.open('stats_cache.db', protocol=2)
__content_cache = shelve.open('content_cache.db', protocol=2)

__wikimedia_locks = dict()
def retrieve_lock(*siteargs):
    lock_key = ''.join(siteargs)
    if lock_key not in __wikimedia_locks:
        __wikimedia_locks[lock_key] = threading.Lock()
    return __wikimedia_locks[lock_key]

def page_stats(page_title, *siteargs):
    """
    Retrieves statistics for an article, both article- and
    section-level, describing all of its revisions.

    Results will be cached to a disk cache.
    """
        
    master_key = ''.join(siteargs).encode("utf8")
    key = master_key + page_title.encode("utf8")
    if key in __stats_cache:
        return __stats_cache[key]
    else:
        lock = retrieve_lock(*siteargs)
        lock.acquire()
        try:
            if key in __stats_cache:
                lock.release()
                return __stats_cache[key]

            _ = revision_count(page_title, *siteargs)
            revs = retrieve_revisions(page_title, *siteargs)

            previous_revision = None
            article_res = []
            previous = []
            previous_content = []
            section_res = []

            num_sections = 0

            previous_split_content = []

            for revision in revs:
                increment_count(page_title, *siteargs)
                content_key = master_key + str(revision[u'revid'])

                # Wikipedia revisions may get their content deleted
                # from public archives.
                if u'*' not in revision:
                    revision[u'*'] = ''

                if u'user' not in revision:
                    revision[u'user'] = '(REDACTED)'
                    
                split_content = split_by_wiki_section(revision[u'*'])
                permutation = textmatcher.match_texts(split_content, previous_split_content)

                split_content_ordered = textmatcher.permutate_list(split_content, permutation)

                for i,token in enumerate(split_content_ordered):
                    if token is None:
                        split_content_ordered[i] = previous_split_content[i]

                previous_split_content = split_content_ordered

                parsed_text = wiki_parse(revision[u'*'] if u'*' in revision else '', previous_content, revision.get(u'comment',u''), permutation, *siteargs)
                __content_cache[content_key] = parsed_text
                previous_content = parsed_text[0]
                article_rev_stats = get_stats_by_article(revision, previous_revision, *siteargs)
                previous_revision = article_rev_stats
                article_res.append(article_rev_stats.list_repr())

                section_rev_stats = get_stats_by_section(revision, permutation, previous, *siteargs)
                num_sections = max(num_sections, len(section_rev_stats))
                previous = section_rev_stats
                section_res.append([rs.list_repr() if rs is not None else None for rs in section_rev_stats])



            if len(section_res) == 0:
                raise PageNotFound(page_title, *siteargs)


            last_article_stats = article_rev_stats.list_repr()
            last_article_stats[5] = time.time()
            article_res.append(last_article_stats)

            sections = []
            for i in range(num_sections):
                sections.append([rev[i] if len(rev) > i
                                 else None
                for rev in section_res])
                last = sections[i][-1]
                if last is not None:
                    last[5] = time.time()
                sections[i].append(last)

            talk_times = talk_pages(page_title, *siteargs)

            __stats_cache[key] = (article_res, sections, talk_times)
            return (article_res, sections, talk_times)
        finally:
            lock.release()


def revision_content(revid, host='', path='', section=None):
    """
    Retrieve the content of the specified revision from the specified
    WikiMedia instance, either for the entire article or for a
    specific section.
    """
    master_key = ''.join([host, path, str(revid)]).encode("utf8")
    try:
        content = __content_cache[master_key][0]
        comment = __content_cache[master_key][2]
        
        if section is not None:
            return comment + content[section]
        else:
            permutation = __content_cache[master_key][4]

            res = [content[i] for i in filter(None,permutation)]

            for (i,e) in enumerate(permutation):
                if e is None:
                    res.append(content[i])
                    
            return comment + ''.join([el for el in res if el is not None])

    except KeyError:
        return 'Revision not cached'
    except IndexError:
        return 'Invalid Section'

def revision_diff(revid, host='', path='', section=None):
    """
    Retrieve the content of the specified revision from the specified
    WikiMedia instance, either for the entire article or for a
    specific section.
    """
    master_key = ''.join([host, path, str(revid)]).encode("utf8")
    try:
        content = __content_cache[master_key][1]
        comment = __content_cache[master_key][2]

        if section is not None:
            try:
                return comment+content[section]
            except IndexError:
                return '<p>(New section)</p>' + __content_cache[master_key][0][section]
        else:
            article_diff = __content_cache[master_key][3]
            permutation = __content_cache[master_key][4]

            list_of_entries = filter(None,permutation)

            for (i,e) in enumerate(permutation):
                if e is None:
                    list_of_entries.append(i)
            
            res = [content[i] for i in list_of_entries]

            

            for i in range(len(content)):
                if i not in list_of_entries:
                    entry = content[i]
                    if entry != 'Section diffs: + 0, - 0<div class="diff"><p><span class="diffmarker">  </span></p></div>\n':
                        res.append(entry)

            return article_diff + comment + ''.join(res)

    except KeyError:
        return 'Revision not cached'
    except IndexError:
        return 'Invalid Section'

def article_stats_json(page_title, start, end, *siteargs):
    """
    JSON wrapper to retrieve article-level statistics (article_stats()).
    """
    return json.dumps(article_stats(page_title, start, end, *siteargs))

def talk_stats_json(page_title, start, end, *siteargs):
    """
    JSON wrapper to retrieve talk page statistics (talk_stats()).
    """
    return json.dumps(talk_stats(page_title, start, end, *siteargs))
