#!/usr/bin/env python3
"""wikiproc.py - Object-oriented interface to Wiki XML dumps.
Wikiproc converts relevant information from a Wikipedia XML
dump to JSON format.

Copyright Matthew Leon Grinshpun, 2009
"""

import re
import json
import os
import os.path
import cleaner
from multiprocessing import Pool, Manager 
from optparse import OptionParser

def tag_re(tagname, flags=0):
    """Return a regular expression that searches for text within a tag"""
    return re.compile("<%s>(.*?)</%s>" % ((tagname,)*2), flags)

"Various regular expressions to locate features of articles"
titlere = tag_re('title') 
idre = tag_re('id') 
transblocre = re.compile('((?:\[\[[-a-z]{2,}:(.*?)]]\n?)+)</text>')
transre = re.compile('([-a-z]{2,}):(.*?)]]')
redirectre = re.compile('<text[^>]*>#REDIRECT \[\[(.*?)]](:?.*?)</text>', re.DOTALL)
langre = re.compile('xml:lang="(.*?)"')
textre = re.compile('<text[^>]*>(.+?)</text>', re.DOTALL)

def feed_queue(func, queue):
    return func(*queue.get())

def process_article(text, trans_re, exclusive=False, redirects=False, 
                    savedir=None, wc=False, cleaners=None):
    redirectmatch = redirectre.search(text)
    if redirectmatch and redirects:
        return Redirect(text, redirectmatch.group(1))
    else:
        article = Article(text, trans_re)
        if not exclusive or article.translation:
            if savedir:
                match = textre.search(text)
                if match:
                    articletext = cleaner.clean(match.group(1), cleaners) if cleaners else match.group(1)
                    with open(os.path.join(savedir, article.title + ".txt"), 'w') as outfile:
                        outfile.write(articletext)
                    if wc:
                        article.wc = len(articletext.split())
            return article

class WikiDump:
    def __init__(self, dumpfile, **kwargs):
        self.dumpfile = dumpfile
        if kwargs:
            for (attr, val) in kwargs.items():
                setattr(self, attr, val)

        self.articles = []

        self.lang = None

        if self.cleanerfile:
            self._cleaners = cleaner.import_cleaners(self.cleanerfile)

        self._trans_re = None
        if self.trans_lang:
            trans_re_str = r'^\[\[{}:([^]]+)'.format(self.trans_lang)
            self._trans_re = re.compile(trans_re_str, re.MULTILINE) 
            
    def get_article(self):
        with open(self.dumpfile) as file: # TODO:clean
            self.lang = langre.search(next(file)).group(1)
            inarticle = False
            content = ""
            for line in file:
                if not inarticle and "<page>" in line:
                    inarticle = True
                elif "</page>" in line:
                    inarticle = False
                    yield content
                    content = ""
                if inarticle:
                    content += line

    def process(self):
        if self.savedir and not os.path.isdir(self.savedir):
            os.mkdir(self.savedir)
        pool = Pool()
        sm = Manager()
        queue = sm.Queue(len(pool._pool)*2)
        for article in self.get_article(): 
            queue.put((article, self._trans_re, self.exclusive, self.redirects, 
                       self.savedir, self.wordcount, self._cleaners))
            pool.apply_async(feed_queue, (process_article, queue), callback=self.article_callback)
        pool.close()
        pool.join()

    def article_callback(self, article):
        if article:
            self.articles.append(article)

    def __str__(self):
        return "Wikipedia language: " + self.lang + \
               "\nARTICLES:\n" + "\n".join(str(article) for article in self.articles) 

class Entry:
    def __init__(self, articleText=None):
        self.title = ""
        if articleText:
            self._progress = self.process_title(articleText)
    
    def process_title(self, text):
        m = titlere.search(text)
        self.title = m.group(1)
        return m.end()

    def __str__(self):
        return "Article: " + self.title

class Redirect(Entry):
    """Redirect - an abstraction of a wikipedia redirect page.

    Many pages in wikipedia simply redirect to another page, indicating
    that the page's title is a synonym of the title of the target page.
    """

    def __init__(self, articleText, target):
        Entry.__init__(self, articleText)
        self.target = target

    def __str__(self):
        return Entry.__str__(self) + "\nRedirect to: " + self.target

class Article(Entry):
    """Article - an abstraction of a wikipedia article.

    Contains elements of a wikipedia article necessary to the task at hand.
    At this point, we deal with translations of an article and synonyms.
    Synonyms should be culled through redirects.
    """

    def __init__(self, articleText, trans_re=None):
        Entry.__init__(self, articleText)

        self._trans_re=trans_re
        self.translation = None 

        self.synonyms = [] 
        self.process(articleText)

    def process(self, text):
        """Process the text of a wikipedia article."""
        if self._trans_re: #translation
            match = self._trans_re.search(text)
            if match:
                self.translation = match.group(1)

    def __str__(self):
        return Entry.__str__(self) + "\nTranslations: " + str(self.translation)

def json_encoder():
    """Returns a json encoder that copies object dictionaries' public attributes"""

    def no_underscores(obj):
        """convert an object to a dictionary representing all 'public' attributes"""
        return dict([(key, obj.__dict__[key]) for key in list(obj.__dict__.keys())
            if key[0] != '_' and obj.__dict__[key]])

    return json.JSONEncoder(ensure_ascii=False, default=no_underscores) 

def main():
    parser = OptionParser(usage="usage: %prog [options] dumpfile")
    parser.add_option("-t", "--translation-language",
                        dest="trans_lang", default=None,
                        help="store translation for specified language")
    parser.add_option("-x", "--exclusive",
                        action="store_true", dest="exclusive", default=False,
                        help="only store those terms that are translated into language")
    parser.add_option("-s", "--save-pages",
                        dest="savedir", default=None,
                        help="save wikipedia pages to given directory \
                              (it will be created if it does not exist)")
    parser.add_option("-c", "--clean-pages",
                        dest="cleanerfile", default=None,
                        help="clean pages with specified cleaner file")
    parser.add_option("-w", "--wordcount",
                        action="store_true", dest="wordcount", default=False,
                        help="store wordcount of cleaned pages")
    parser.add_option("-r", "--process-redirects",
                        action="store_true", dest="redirects", default=False,
                        help="process synonym information from redirects \
                             (false by default)")
    (options, args) = parser.parse_args()

    dump = WikiDump(args.pop(), **options.__dict__)
    dump.process()
    print(json_encoder().encode(dump))
    
if __name__ == "__main__":
    exit(main()) 
