#!/usr/bin/env python
# -*- coding: utf-8 -*-

__author__ = "Mark Perry"
__date__ = "10/22/2008"
__license__ = "GPLv2"

"""
train using crm114 on a wikipedia article's diff history
also classifies most recent revision of the article text

article diffs come from stdin for Hadoop/MapReduce piping support
"""

import os, sys
import re, string
import crm
import time
import csv
import copy

import wikistrip

# directories
TEMP_DIR = '/dev/shm/'
OUTPUT_DIR = 'wiki/public/jp/'
csv_file = 'articles.csv'

def check_path(path):
    if not os.path.exists(path):
        os.mkdir(path)
check_path(TEMP_DIR)
check_path(OUTPUT_DIR)

# constants
WINDOW_SIZE = 25                    # window size for retained content
CHUNKSIZE = 5
CLASSIFY_WINDOW_SIZE = 5            # sliding window classification size for: i-window:i+window

VERBOSE = True
DEBUG = True

class Bunch:
    def __init__(self, **kwds):
        self.__dict__.update(kwds)


class WikiAnalysis:
    """ collect and learn an article """
    

    ############
    ### INIT ###
    
    def __init__(self, title, page_id, timestamp):
        """ init paths and variables 
            called from wikidiff on the start of a new article """
        
        # remove any previous classifiers
        exists = os.path.exists(TEMP_DIR + "wiki" + page_id)
        if exists:
            from shutil import rmtree
            rmtree(TEMP_DIR + "wiki" + page_id)
        
        # variables
        self.title = title
        self.page_id = page_id
        self.creation_time = timestamp
        
        # add/remove variables
        self.c_ar = crm.Classifier(TEMP_DIR + "wiki" + page_id, ['sub', 'add'])
        self.windowed_ar = []
        self.ar_adds, self.ar_removes = [], [] # batch training
        
        # age variables
        self.age_dict = {}
        self.most_recent_time = int(time.mktime(timestamp))
        
        # stats
        self.add_trains, self.sub_trains = 0, 0
        
        
    ##################
    ### ADD/REMOVE ###
    
    def check_ar(self, add_indexes, remove_indexes, cur_words, last_words, revision_num, text):
        """ checks if windowed add/removes have reached the window size
            called from process_ar() """
        
        # window stores add and remove ranges
        self.windowed_ar.append( (add_indexes, remove_indexes, cur_words, last_words, text) )
        
        if len(self.windowed_ar) < WINDOW_SIZE:
            # dont process if window size hasn't been reached
            return
        
        # process old windowed revision
        w_add_indexes, w_remove_indexes, w_cur_words, w_last_words, text = self.windowed_ar.pop(0)
        
        # add classify/train text
        ratio = self.process_ar(w_add_indexes, w_cur_words, revision_num, add = True)
        if ratio > .5:
            t, prob, pr = self.c_ar.classify(text)
            desired_pr = min(len(cur_words)/2, 300.0)  # for small articles
            
            if t != "add" or pr < desired_pr:
                self.c_ar.learn("add", text)
                self.add_trains += 1
                if VERBOSE: print "TRAIN ADD - pr was %s" % pr
        
        # remove
        self.process_ar(w_remove_indexes, w_last_words, revision_num, add = False)
    
    
    def process_ar(self, index_array, words, revision_num, add):
        """ joins words based on indexes to train for add/remove 
            called from check_ar() """
        
        # variables
        prev_i, to_train = 0, ""
        count = 0
        
        # go through windowed indexes
        for (i, prev_revisions_seen) in index_array:
            
            # get the chunk (i = middle index of a chunks)
            start = max(i - CHUNKSIZE/2, 0)
            end = i + CHUNKSIZE/2 + 1
            chunk = ' '.join(words[start : end])
            age_bunch = self.age_dict[chunk]
            
            # find the total number of revisions seen within the window
            revisions_seen = age_bunch.revisions_seen
            if age_bunch.count > 0:
                revisions_seen += revision_num - age_bunch.last_revision
            revisions_seen -= prev_revisions_seen
            retained_percent = revisions_seen/float(WINDOW_SIZE)
            
            # determine if retained for add/remove
            if add and retained_percent > .75:
                # add just tallies
                count += 1
                continue
            elif not add and retained_percent < .75:
                # remove joins everything
                if to_train == '':
                    # start - join left side of the chunk (for context)
                    to_train = ' '.join(words[start : i+1])
                else:
                    to_train += ' ' + words[i]
                
                # remember last position            
                prev_i = i
                count += 1
        
        # calculate ratio within retained threshold
        if len(index_array) == 0: ratio = 0
        else: ratio = count / float(len(index_array))
        
        # finish training
        if not add and to_train != "":
            # join words to the right (for context)
            to_train += ' ' + ' '.join(words[prev_i + 1 : prev_i + CHUNKSIZE/2 + 1])
            self.sub_trains += 1
            self.c_ar.learn("sub", to_train)
            #if ratio > .5: 
            #    # train extra if over half of the removes are kept removed
            #    self.c_ar.learn("sub", to_train)
        
        if VERBOSE and add:
            print "%s - revision %s - %s/%s adds" % (self.title, revision_num, count, len(index_array))
        elif VERBOSE and not add:
            print "%s - revision %s - %s/%s removes" % (self.title, revision_num, count, len(index_array))
        
        # return ratio
        return ratio
    
    
    ###############
    ##### AGE #####
    
    def process_age(self, revision_num, timestamp, username, user_id, add_range, remove_range, cur_words, last_words, text):
        """ process age by splitting text into chunks and keep track of statistics 
            stores add and removes to be processed at window_size revisions later
            called from wikidiff after finished diffing a revision """        
        
        add_indexes, remove_indexes = [], []      # A/R list of indexes
        
        def process_block(block_range, words, add = False):
            # helper function to process by chunks
            
            trained = set()         # keep track of trained to ignore overlaps
            
            for (start, end) in block_range:
                
                # expand add/remove block to chunksize
                start = max(start - CHUNKSIZE + 1, 0)
                end = min(end + CHUNKSIZE - 1, len(words))
                size = end - start - CHUNKSIZE + 1
                
                for i in range(size):
                    # go through and join chunks

                    j = start + i
                    k = start + i + CHUNKSIZE
                    chunk = ' '.join(words[j:k])
                    
                    if DEBUG and len(words[j:k]) != CHUNKSIZE and len(words) > CHUNKSIZE:
                        print "* error during train: chunk not equal to chunksize"
                    
                    if j in trained:
                        # overlap so ignore
                        continue
                    trained.add( j )
                    
                    if add:
                        # add handling
                        
                        if chunk not in self.age_dict:
                            # initialize chunk
                            age_bunch = Bunch(  chunk = chunk, username = username, user_id = user_id
                                                , first_revision = revision_num, first_added = timestamp
                                                , last_revision = revision_num, last_added = timestamp
                                                , seconds_seen = 0, revisions_seen = 1
                                                , count = 1, times_removed = 0 )
                            
                            self.age_dict[chunk] = age_bunch
                            
                            # A/R add
                            middle_index = (j+k)/2
                            add_indexes.append( (middle_index, age_bunch.revisions_seen) )
                            
                        else:
                            # increment chunk
                            age_bunch = self.age_dict[chunk]
                            age_bunch.count += 1
                            
                            if age_bunch.count == 1:
                                # restored chunk to article
                                age_bunch.last_revision = revision_num
                                age_bunch.last_added = timestamp
                                
                                # A/R add
                                middle_index = (j+k)/2
                                add_indexes.append( (middle_index, age_bunch.revisions_seen) )
                        
                    else:
                        # remove handling
                        
                        if DEBUG and chunk not in self.age_dict:
                            print '*ERROR: during train -- chunk not in dict'
                            continue
                        
                        age_bunch = self.age_dict[chunk]
                        age_bunch.count -= 1
                        age_bunch.times_removed += 1
                        
                        if DEBUG and age_bunch.count < 0:
                            print '*ERROR: during train -- negative count'
                            age_bunch.count = 0
                            continue
                        
                        if age_bunch.count == 0:
                            # removing chunk from article
                            
                            cur_seconds = int(time.mktime(timestamp))
                            self.most_recent_time = max(cur_seconds, self.most_recent_time)
                            last_seconds = int(time.mktime(age_bunch.last_added))
                            
                            if cur_seconds > last_seconds:
                                age_bunch.seconds_seen += cur_seconds - last_seconds
                            else:
                                # the new timestamp is older so use most recent timestamp seen
                                # not perfect but as good as it is going to get
                                age_bunch.seconds_seen += self.most_recent_time - last_seconds
                            
                            age_bunch.revisions_seen += revision_num - age_bunch.last_revision
                            
                            # A/R remove
                            middle_index = (j+k)/2
                            remove_indexes.append( (middle_index, age_bunch.revisions_seen) )
        
        process_block(add_range, cur_words, add = True)
        process_block(remove_range, last_words, add = False)
        
        # A/R processing
        self.check_ar(add_indexes, remove_indexes, cur_words, last_words, revision_num, text) 
    
    
    ###################
    ### CLASSIFYING ###
        
    def finish(self, text, revision_num, timestamp):
        """ finish processing and then classify latest article revision """
        
        if VERBOSE:
            print "add trains: %s remove trains: %s" % (self.add_trains, self.sub_trains)
        
        # calculate seconds in articles existence
        create = int(time.mktime(self.creation_time))
        final = int(time.mktime(timestamp))
        existence = final - create
        
        # format
        text = wikistrip.format(text)
        
        # classify
        self.classify(text, revision_num, timestamp, existence)
        
        # write csv
        #TODO: replace with hadoop output
        writer = csv.writer(open(csv_file, "a"))
        writer.writerow([self.title, self.page_id])


    def classify(self, text, revision_num, timestamp, seconds_in_existence):
        """ classify a revision """
        
        # split apart html tags and words    
        tags, tag_index = [], []
        index = 0
        words = []
        
        text_split = re.split("(<[^>]*>)", text)
        for part in text_split:
            if part.startswith("<"):
                # tag
                tags.append(part)
                tag_index.append(index)
                index += 1
            else:
                # word
                w = re.findall('\s*[^\s]+\s*', part)
                if len(w) == 0 and len(part) > 0 and len(words) > 0:
                    # space! add onto the end of last word
                    words[len(words)-1] += part
                else:
                    # add words
                    index += len(w)
                    words.extend(w)
        
        # ADD/REMOVE OUTPUT
        t_add_conf, t_add_pr = [], []
        
        # sliding window classify
        conf_list, conf2_list = [], []
        for i in xrange(len(words)):
            
            # join words
            start = max(i - CLASSIFY_WINDOW_SIZE - 1, 0)
            end = min(i + CLASSIFY_WINDOW_SIZE, len(words))
            s = ''.join(words[start:end])
            
            # add/remove
            t, conf, pr = self.c_ar.classify(s)
            if t == 'sub': 
                conf = 1.0 - conf
                pr = -pr
            t_add_conf.append(str(conf))
            t_add_pr.append(str(pr))
        
        # AGE OUTPUT
        # split text into chunks

        t_first_revision = []
        t_first_added = []
        t_username = []
        t_seconds_seen = []
        t_revisions_seen = []
        t_times_removed = []
        
        # build chunks
        chunks = []
        detagged = re.sub('<[^>]*>','',text).split()
        for i in xrange(len(detagged)):
            # TODO: double check these...
            start = min(i-CHUNKSIZE/2, len(detagged)-CHUNKSIZE)
            if start < 0: start = 0
            end = start + CHUNKSIZE
            
            chunk = ' '.join(detagged[start:end])
            chunks.append(chunk)
            
            if DEBUG and len(detagged) > CHUNKSIZE:
                if start < 0: print '*DEBUG: start less than zero'
                if len(detagged[start:end]) != CHUNKSIZE: print '*DEBUG: chunk size not correct'
            
            # get chunk data            
            if chunk in self.age_dict:
                age_bunch = self.age_dict[chunk]
                
                t_first_revision.append(str(age_bunch.first_revision))
                t_first_added.append(str(age_bunch.first_added))
                t_username.append(age_bunch.username)
                
                seconds = age_bunch.seconds_seen
                revisions = age_bunch.revisions_seen
                
                if age_bunch.count > 0:
                    # increment seconds seen
                    cur_seconds = int(time.mktime(timestamp))
                    last_seconds = int(time.mktime(age_bunch.last_added))
                    seconds += cur_seconds - last_seconds
                
                    # increment revisions seen
                    revisions += revision_num - age_bunch.last_revision
                
                t_seconds_seen.append(str(seconds))
                t_revisions_seen.append(str(revisions))
                t_times_removed.append(str(age_bunch.times_removed))
                
            elif len(t_first_revision) > 0:
                if DEBUG: print '*DEBUG: chunk not seen before during classify -- %s' % chunk
                t_first_revision.append(t_first_revision[-1:][0])
                t_first_added.append(t_first_added[-1:][0])
                t_username.append(t_username[-1:][0])
                
                t_seconds_seen.append(t_seconds_seen[-1:][0])
                t_revisions_seen.append(t_revisions_seen[-1:][0])
                t_times_removed.append(t_times_removed[-1:][0]) 

        # tally points
        ln_name, ln_num_chunks, ln_total_time, ln_total_revisions = self.tally_points()
        
        # put tags back in
        for (i, tag) in enumerate(tags):
            words.insert(tag_index[i], tag)
        
        # JOINING
        # join the delimited text output
        out = '\t'.join(words) + '\n'
        # add
        out += '\t'.join(t_add_pr) + '\n'

        # age
        out += '\t'.join(t_first_revision) + '\n'
        out += '\t'.join(t_first_added) + '\n'
        out += '\t'.join(t_username) + '\n'
        out += '\t'.join(t_seconds_seen) + '\n'
        out += '\t'.join(t_revisions_seen) + '\n'
        out += '\t'.join(t_times_removed) + '\n'
        
        # user
        out += '\t'.join(ln_name) + '\n'
        out += '\t'.join(ln_num_chunks) + '\n'
        out += '\t'.join(ln_total_time) + '\n'
        out += '\t'.join(ln_total_revisions) + '\n'
        
        # misc
        out += str(seconds_in_existence) + '\n'
        out += str(revision_num) + '\n'
        out += time.asctime(time.localtime()) + '\n'
        
        # TODO: replace with s3
        f = open(OUTPUT_DIR + self.page_id, 'w')
        f.write(out)
        f.close()


    ########################
    ### HELPER FUNCTIONS ###
    
    def tally_points(self):
        # user id : [name, total chunks, total time lived]
        user_dict = {}
        
        # tally up user points
        for (chunk,age_bunch) in self.age_dict.iteritems():
            if age_bunch.user_id != 0:
                # not dealing with an ip
                if age_bunch.user_id in user_dict:
                    # increment counts
                    [name, num_chunks, total_time, total_revisions] = user_dict[age_bunch.user_id]
                    num_chunks += 1
                    total_time += age_bunch.seconds_seen
                    total_revisions += age_bunch.revisions_seen
                    user_dict[age_bunch.user_id] = [name, num_chunks, total_time, total_revisions]
                else:
                    # create counts
                    user_dict[age_bunch.user_id] = [age_bunch.username, 1
                                , age_bunch.seconds_seen, age_bunch.revisions_seen]
        
        # sort by total seconds
        # point dict = {total seconds : user_array}
        point_dict = {}
        for (user_id, user_array) in user_dict.iteritems():
            [name, num_chunks, total_time, total_revisions] = user_array
            if total_time in point_dict:
                point_dict[total_time].append(user_array)
            else:
                point_dict[total_time] = [user_array]
        keys = point_dict.keys()
        keys.sort(reverse=True)
          
        # output
        ln_name, ln_num_chunks, ln_total_time, ln_total_revisions = [], [], [], []
        for key in keys:
            for user_array in point_dict[key]:
                [name, num_chunks, total_time, total_revisions] = user_array
                
                # convert to days
                total_time = total_time / 60 / 60 / 24
                
                ln_name.append(name)
                ln_num_chunks.append(str(num_chunks))
                ln_total_time.append(str(total_time))
                ln_total_revisions.append(str(total_revisions))
        
        return ln_name, ln_num_chunks, ln_total_time, ln_total_revisions
    

