#!/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import sys, urllib, traceback
import requests
from itertools import izip_longest
from time import time

def retrieve_titles(lang, apcontinue='', limit=500):
    url = ('http://{lang}.wikipedia.org/w/api.php?action=query&format=json&list=allpages'
            '&apfilterredir=nonredirects&aplimit={limit}&apcontinue={apcontinue}')
    url = url.format(lang=lang, apcontinue=apcontinue, limit=limit)

    js = requests.get(url).json()

    if 'query-continue' in js:
        apcontinue = js['query-continue']['allpages']['apcontinue'].encode('utf-8')
    else:
        apcontinue = None
    
    titles = [page['title'].encode('utf-8') for page in js['query']['allpages']]
    return (titles, apcontinue)

def retrieve_page(lang, title):
    title = urllib.quote_plus(title)

    url = 'http://{lang}.wikipedia.org/w/api.php?action=query&format=json&prop=extracts&titles={title}&explaintext'
    url = url.format(lang=lang, title=title)

    js = requests.get(url).json()
    return js['query']['pages'].values()[0]['extract'].encode('utf-8')

def retrieve_langlinks(lang, titles, trg_lang):
    title_param = '|'.join(urllib.quote_plus(title) for title in titles)
    url = ('http://{lang}.wikipedia.org/w/api.php?action=query&format=json&prop=langlinks'
            '&lllang={trg_lang}&titles={titles}')
    url = url.format(lang=lang, trg_lang=trg_lang, titles=title_param)
    js = requests.get(url).json()
    
    langlinks = []
    for page in js['query']['pages'].values():
        if 'langlinks' in page:
            src_title = page['title'].encode('utf-8')
            trg_title = page['langlinks'][0]['*'].encode('utf-8')
            langlinks.append((src_title, trg_title))
    
    return sorted(langlinks, key=lambda p: titles.index(p[0]))

def retrieve_all_titles(lang):
    apcontinue = ''
    total = 0
    while apcontinue is not None:
        titles, apcontinue = retrieve_titles(lang, apcontinue)
        total += len(titles)
        for title in titles:
            print title
    return total

def retrieve_all_articles(lang, titles):
    total = 0
    for title in titles:
        try:
            txt = retrieve_page(lang, title)
        except:
            traceback.print_exc()
            sys.stderr.write('Title: {}\n'.format(title))
            continue
        print '<article title="{}">'.format(title.replace('"', '&quot;'))
        print txt
        print '</article>'
        total += 1
    return total

def retrieve_all_langlinks(lang, titles, trg_lang):
    total = 0
    limit = 10

    args = [iter(titles)] * limit # voodoo magic
    for src_titles in izip_longest(*args, fillvalue=None):
        src_titles = [title for title in src_titles if title is not None]

        for src_title, trg_title in retrieve_langlinks(lang, src_titles, trg_lang):
            print '{}\t{}'.format(src_title, trg_title)

        total += len(src_titles)

    return total

if __name__ == '__main__':
    """
    ./wikicrawler.py titles mi | head -n 100 | ./wikicrawler.py articles mi > wiki.mi
    """
    try:
        action, lang = sys.argv[1:3]
        if action == 'langlinks':
            trg_lang = sys.argv[3]
    except:
        sys.exit('usage: {} ACTION LANG [TRG_LANG]'.format(sys.argv[0]))

    try:
        t = time()
        if action == 'titles':
            total = retrieve_all_titles(lang)
        elif action == 'articles':
            total = retrieve_all_articles(lang, (line[:-1] for line in sys.stdin))
        elif action == 'langlinks':
            total = retrieve_all_langlinks(lang, (line[:-1] for line in sys.stdin), trg_lang)
        
        t = time() - t
        sys.stderr.write('Retrieved {} articles in {:.2f} sec, average: {:.1f} articles/sec\n'.format(total, t, total / t))
    except (IOError, KeyboardInterrupt):
        pass
        
