#!/usr/bin/env python
# -*- coding: utf-8 -*-

__author__ = "Mark Perry"
__date__ = "10/22/2008"
__license__ = "GPLv2"

"""
diff the wikipedia revision history and create a wikianalysis

uncompressed wikipedia dump input comes from STDIN 
"""

import os, sys
import re, string
import time
import wikianalysis, wikistrip

DEBUG = False
IGNORE_REDIRECTS = True

# string handling
re_title = re.compile(r'<title>(.+?)</title>\s*?<id>(.*?)</id>', re.DOTALL)
re_timestamp = re.compile(r'<timestamp>(.*?)</timestamp>')
re_user = re.compile(r'<username>(.*?)</username>.*?<id>([0-9]*)</id>', re.DOTALL)
re_text = re.compile(r'(<contributor>.*?)<text.*?>(.*)</text>', re.DOTALL)
re_add = re.compile(r'\{\+(.*?)\+\}', re.DOTALL)
re_remove = re.compile(r'\[\-(.*?)\-\]', re.DOTALL)
re_skip = re.compile(r'[^:]*?(Talk:|Image:|User:)', re.IGNORECASE)


def diffText(last_words, words, text, num):
    """ diff a text with the previous and return add/removes """
    
    s1 = 'temp%s' % (num % 2)
    s2 = 'temp%s' % ((num + 1) % 2)
    
    # base case -- write a blank file
    if num == 0:
        f = open(s1,'w');
        f.close()
    
    # write the temp file
    f = open(s2,'w'); 
    f.write(text); 
    f.close()

    # diff the temp file with stdin
    pipe = os.popen('wdiff -w " [-" -x "-] " -y " {+" -z "+} " %s %s' % (s1, s2)); 
    diff = pipe.read()
    pipe.close()

    add_ranges, remove_ranges = [], []
    in_add, in_remove = False, False
    start, end = 0, 0
    add_offset, remove_offset = 0, 0
    
    # split diff into words to find add and removes
    # also keep track of add remove positions
    changes = diff.split()
    for (i, change) in enumerate(changes):
        
        if change.startswith('{+'):
            # start of a add
            in_add = True
            start = i - add_offset
            
            if in_remove: print '*error during diff: in remove but start of add'
            
        elif change.startswith('[-'):
            # start of a remove
            in_remove = True
            start = i - remove_offset
            
            # remove also counts as an add position as different words are together
            add_ranges.append( (i - add_offset, i - add_offset) )
            
            if in_add: print '*error durring diff: in an add but start of remove'
            
        if in_add:
            remove_offset += 1
            
            if change.endswith('+}'):
                # end of an add
                in_add = False
                end = i - add_offset + 1
                
                add_ranges.append( (start, end) )
        
        elif in_remove:
            add_offset += 1
            
            if change.endswith('-]'):
                # end of a remove
                in_remove = False
                end = i - remove_offset + 1
                
                remove_ranges.append( (start, end) )

    return add_ranges, remove_ranges


# initialize variables
page, line = [], ''
title, page_id, revision_num, contrib = '', '', 0, ''
text, words, last_words = '', [], []
timestamp = ()
page_count, revision_count = 0, 0
skip = False
analysis = None
start_time = time.mktime(time.localtime())

# input comes from STDIN
for line in sys.stdin:

    if line.find("<page>") >= 0:
        # start of a new page
        # finish processing previous page
    
        # statistics
        page_count += 1
        revision_count += revision_num
    
        if IGNORE_REDIRECTS and re.match("#redirect",text,re.IGNORECASE) != None:
            print 'ignoring redirect'
        elif not skip and title != '':
            # finish processing a page
            analysis.finish(text, revision_num, timestamp)
        
            # print statistics
            cur_time = time.mktime(time.localtime())
            diff_time = cur_time - start_time
            print "%s id=%s with %s revisions." % (title, page_id, revision_num) 
            print "Total pages processed: %s\tTotal revisions seen: %s" % (page_count, revision_count)
            if diff_time > 0:
                print "%s pages/sec, %s revisions/sec" % (page_count / diff_time, revision_count / diff_time)
        
        # reset variables
        page = []
        title, page_id, revision_num, contrib = '', '', 0, ''
        text, words, last_words = '', [], []
        analysis = None
        skip = False
    
    elif skip:
        # skipping unwanted page
        continue

    elif line.find("</revision>") >= 0:
        # end of revision, process it
        revision = ' '.join(page)
        
        # get the timestamp
        s_time = re_timestamp.search(revision)
        if s_time == None: 
            page = []
            if DEBUG: print '*error durring diff: no timestamp found'
            continue
        timestamp = time.strptime(s_time.group(1), "%Y-%m-%dT%H:%M:%SZ")
        
        if title == '':
            # start of a new page
            s = re_title.search(revision)
            if s == None:
                page = []
                if DEBUG: print '*error durring diff: no title/page_id found'
                continue
            title = s.group(1)
            page_id = s.group(2)
        
            skip |= re_skip.match(title) != None
            ###skip |= int(page_id) % 9 != 0
            if int(page_id) > 100: break
            if skip:
                print '\n' + title + " skipping..."                
                continue
        
            print '\n' + title + " processing..."
            # init a wikianalysis
            analysis = wikianalysis.WikiAnalysis(title, page_id, timestamp)
        
        # find the text
        s_text = re_text.search(revision)
        if s_text == None:
            if DEBUG: print '*error durring diff: no text found'
            page = []
            continue
        
        contrib = s_text.group(1)
        text = s_text.group(2)
        
        # get the user
        s_user = re_user.search(contrib)
        if s_user == None:
            s_contrib = re.search(r'<ip>(.*?)</ip>',contrib)
            if s_contrib == None: username = ""
            else: username = s_contrib.group(1)
            user_id = 0
        else:
            username = s_user.group(1)
            user_id = int(s_user.group(2))
        
        # strip wiki formatting
        stripped = wikistrip.strip(text)
        words = stripped.split()
        joined = ' '.join(words)
        
        # diff to get add remove chunks
        add_range, remove_range = diffText(last_words, words, joined, revision_num)
        
        # process revision
        analysis.process_age(revision_num, timestamp, username, user_id, add_range, remove_range, words, last_words, stripped)
        
        # clear the page and continue
        page = []
        revision_num += 1
        last_words = words
    
    # continue collecting page
    else:
        page.append(line)



