#!/usr/bin/env python
""" Downloads the entire revision history of a Wikipedia article """

__licence__ = """
Copyright (C) 2011  Dennis Hoppe <dennis.hoppe(/\t)uni-weimar.de>

This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.

This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
along with this program.  If not, see <http://www.gnu.org/licenses/>.
"""

from cjson import decode
from thirdparty.elementtree.SimpleXMLWriter import XMLWriter
from getopt import getopt
from getopt import GetoptError
from IPy import IP
from logging import DEBUG
from logging import INFO
from logging import getLogger
from logging import Formatter
from logging import StreamHandler
from urllib import urlencode
from warnings import warn
from pprint import pprint
import urllib2
from sys import argv
from sys import exit
from sys import stdout

__author__ = 'Dennis Hoppe'
__copyright__ = 'Copyright 2011'
__credits__ = ['Dennis Hoppe']
__version__ = '$Id: wikidownloader.py,v 1.3 2011/03/06 19:32:13 hoppe Exp $'
__maintainer__ = 'Dennis Hoppe'
__email__ = 'dennis.hoppe(/\t)uni-weimar.de'
__status__ = 'Development'

class WikipediaDownloader():
  """
  Usage: python wikipedidownloader.py [options] [source]

  Options:
    -a ..., --article=...   select an article name for which the revision 
                            history should be fetched
    -h, --help              show this help
    -d                      show debugging information while parsing

  Examples:
    wikipedidownloader.py -a 1992_Aloha_Bowl   downloads the revision
                                               history for the article
                                               '1992 Aloha Bowl'
  """
  
  def __init__(self, log_level=INFO):
    self.wikiapi_url = 'http://en.wikipedia.org/w/api.php?'
    # logging
    self.logger = getLogger('WikipediaDownloader')
    self.logger.setLevel(log_level)
    handler = StreamHandler()
    handler.setLevel(log_level)
    formatter = Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
    handler.setFormatter(formatter)
    self.logger.addHandler(handler)
  
  def download_article(self, article_name, rvlimit=50, rvstartid=None):
    self.params = { 
      'action':'query', 
      'prop':'revisions',
      'titles':article_name,
      'rvprop': 'ids|timestamp|user|userid|comment|content',
      'rvlimit':rvlimit,
      'rvdir':'newer',
      'format':'json' }
    if rvstartid is not None:
      self.params['rvstartid'] = rvstartid
      
    data = urlencode(self.params)
    self.logger.info('downloading ' + str(rvlimit) + 
      ' revision(s) of article ' + article_name  + 
      ' starting at revision ' + str(rvstartid))
    request = urllib2.Request(self.wikiapi_url, data)
    response = urllib2.urlopen(request)
    json = response.read(); response.close()
    return json
  
  def decode_json(self, json):
    json_contents = decode(json)
    return json_contents

  def get_article_information(self, json_contents):
    try:
      article_id = json_contents['query']['pages'].keys()[0]
    except IndexError:
      raise 'IndexError: article_id = contents.keys()[0]'
    article = json_contents['query']['pages'][article_id]
    article_title = article['title']
    return (article_title, article_id)

  def process_json(self, json_contents, article_id):
    revision_objects = []
    revisions = json_contents['query']['pages'][article_id]['revisions']
    for revision in revisions:
      try: 
        comment = revision['comment']
      except KeyError:
        comment = ''
      try:
        timestamp = revision['timestamp']
      except KeyError:
        timestamp = ''
      try:
        userid = revision['userid']
      except KeyError:
        userid = ''
      try:
        revid = revision['revid']
      except KeyError:
        raise 'revision id is not set'
      try:
        user = revision['user']
      except KeyError:
        user = ''
      try:
        content = revision['*']
      except KeyError:
        warn('revision content is empty', UserWarning) 
        content = ''
      revision_obj = {}
      revision_obj['comment']= comment.encode('ascii', 'xmlcharrefreplace')
      revision_obj['timestamp'] = timestamp
      revision_obj['user'] = user.encode('ascii', 'xmlcharrefreplace')
      revision_obj['userid'] = userid
      revision_obj['revid'] = revid
      revision_obj['content'] = content.encode('ascii', 'xmlcharrefreplace')
      revision_objects.append(revision_obj)
    #rof
    try:
      rvstartid = json_contents['query-continue']
      rvstartid = rvstartid['revisions']['rvstartid']
    except KeyError:
      rvstartid = -1
    return (revision_objects, rvstartid)

  def convert_revision_to_xml(self, writer, revision):
    writer.start('revision') # open 'revision'
    writer.element('id', str(revision['revid']))
    writer.element('timestamp', str(revision['timestamp']))
    writer.start('contributor') # open 'contributor'
    user = revision['user']
    if user != '':
      if self.correct_ip(user):
        writer.element('ip', str(user))
      else:
        writer.element('username', revision['user'])
    userid = revision['userid']
    if userid != '':
      writer.element('id', str(userid))
    writer.end() # close 'contributor'
    writer.start('comment') # start 'comment'
    comment = revision['comment']
    if comment != '':
      writer.data(comment)
    writer.end() # close 'comment'
    writer.element('text', revision['content'], { 'xml:space':'preserve' })
    writer.end() # close 'revision'
    writer.flush()
   
  def correct_ip(self, ip_address):
    if len(str(ip_address).split('.')) != 4:
      return False
    try:
      IP(ip_address)
      return True
    except ValueError:
      return False

  def recursive_download(self, name, writer, article_id, startid=None):
    json = self.download_article(article_name=name, rvstartid=startid)
    json_contents = self.decode_json(json)
    (revisions, rvstartid) = self.process_json(json_contents, article_id)
    for revision in revisions:
      self.convert_revision_to_xml(writer, revision)
    if rvstartid > -1:
      self.recursive_download(name, writer, article_id, rvstartid)

def main(argv):
  name = ''
  log_level = INFO
  try:
    opts, args = getopt(argv, "ha:v", ["help", "article", "verbose"])
  except GetoptError:
    usage()
    exit(2)
  for opt, arg in opts:
    if opt in ('-h', '--help'):
      usage()
      exit()
    elif opt in ('-a', '--article'):
      name = arg
    elif opt in ('-v', '--verbose'):
      log_level = DEBUG

  # download article
  instance = WikipediaDownloader(log_level)

  json = instance.download_article(article_name=name, rvlimit=1)
  json_contents = instance.decode_json(json)
  (article_title, article_id) = instance.get_article_information(json_contents)
    
  # output_stream = stdout
  output_stream = open(str(article_id) + '.xml', 'w')
  writer = XMLWriter(output_stream, 'utf-8')
  attributes = { 'xmlns':'http://www.mediawiki.org/xml/export-0.3/',
    'xmlns:xsi':'http://www.w3.org/2001/XMLSchema-instance',
    'xsi:schemaLocation':'http://www.mediawiki.org/xml/export-0.3/ ' + 
    'http://www.mediawiki.org/xml/export-0.3.xsd',
    'version':'0.3', 'xml:lang':'en' }
  mediawiki = writer.start('mediawiki', attributes)
  writer.start('siteinfo') # open 'siteinfo'
  writer.element('sitename', 'Wikipedia')
  writer.element('base', 'http://en.wikipedia.org/wiki/Main_Page')
  writer.element('generator', 'MediaWiki 1.18alpha')
  writer.element('case', 'first-letter')
  writer.start('namespaces') # open 'namespaces'
  writer.element('namespace', 'Media', { 'key':'-2' })
  writer.element('namespace', 'Special', { 'key':'-1' })
  writer.start('namespace', { 'key':'0' }); writer.end()
  writer.element('namespace', 'Talk', { 'key':'1' })
  writer.element('namespace', 'User', { 'key':'2' })
  writer.element('namespace', 'User talk', { 'key':'3' })
  writer.element('namespace', 'Wikipedia', { 'key':'4' })
  writer.element('namespace', 'Wikipedia talk', { 'key':'5' })
  writer.element('namespace', 'Image', { 'key':'6' })
  writer.element('namespace', 'Image talk', { 'key':'7' })
  writer.element('namespace', 'MediaWiki', { 'key':'8' })
  writer.element('namespace', 'MediaWiki talk', { 'key':'9' })
  writer.element('namespace', 'Template', { 'key':'10' })
  writer.element('namespace', 'Template talk', { 'key':'11' })
  writer.element('namespace', 'Help', { 'key':'12' })
  writer.element('namespace', 'Help talk', { 'key':'13' })
  writer.element('namespace', 'Category', { 'key':'14' })
  writer.element('namespace', 'Category talk', { 'key':'15' })
  writer.element('namespace', 'Portal', { 'key':'100' })
  writer.element('namespace', 'Portal talk', { 'key':'101' })
  writer.end() # close 'namespaces'
  writer.end() # close 'siteinfo'
  writer.start('page') # open 'page'
  writer.element('title', article_title)
  writer.element('id', str(article_id))
  
  instance.recursive_download(name, writer, article_id)

  writer.end() # close 'page'
  writer.close(mediawiki)
  output_stream.close()
  
def usage():
  print __doc__

if __name__ == "__main__":
  main(argv[1:])
