'''
Created on May 20, 2013

@author: a.renduchintala
'''
import multiprocessing
from cluster import computeTrigramQuality
'''
Created on May 13, 2013

@author: arenduchintala
'''
import re
import math
import itertools
import datetime as dt
import sys
import copy
from pprint import pprint
from collections import Counter
from multiprocessing import Pool

'''
Ideas:
make 'clustering' a matrix of terms Vs cluster ids
this will speed the look up for whether a term belongs to a particular cluster

'''
TRIGRAM = 'trigram'
BIGRAM = 'bigram'
INPUT = '../data/test-short.txt'
SEED_RATIO = 0.1
SEED_SIZE = 0
METHOD = BIGRAM
OUTPUT = '../data/simple-output.txt'
START = '*'

mergeQuality = {}

def setArgs(args):
    for a in range(len(args)):
        if args[a] == '-input':
            global INPUT
            INPUT = args[a + 1].strip()
        if args[a] == '-output':
            global  OUTPUT
            OUTPUT = args[a + 1].strip()
        elif args[a] == '-mode':
            global  METHOD
            METHOD = args[a + 1].strip()
        elif args[a] == '-seedSize':
            global  SEED_SIZE
            SEED_SIZE = int(args[a + 1].strip())

            
def computeTrigramQuality(lhs , rhs, clustering, membership, tokens_sequence):
    cluster_sequence = [ membership[token] if membership.has_key(token) else '0' for token in tokens_sequence]
    bigram_cluster_sequence = [(cluster_sequence[x - 1], cluster_sequence[x]) for x in range(1, len(cluster_sequence)) if  cluster_sequence[x] != START]
    trigram_cluster_sequence = [(cluster_sequence[x - 2], cluster_sequence[x - 1], cluster_sequence[x]) for x in range(2, len(cluster_sequence)) if  cluster_sequence[x] != START]
    
    quality = 0.0
    for (c1, c2, c3) in list(itertools.permutations(sorted(clustering.keys()), 3)):
            '''
            computing:
            Q(C) =  sigma(sigma ( p(c1c2c3) x log ( p(c1c2c3) / p(c1c2)p(c3) ))
            '''

            prob_c1c2c3 = float(bigram_cluster_sequence.count((c1, c2, c3))) / float(len(trigram_cluster_sequence))
            prob_c1c2 = float(bigram_cluster_sequence.count((c1, c2))) / float(len(bigram_cluster_sequence))
            prob_c3 = float(cluster_sequence.count(c3)) / float(len(bigram_cluster_sequence))  # true length of sequnce is not len(cluster_sequence)
                                                                                                #   because  start symbols have been inserted
            if (prob_c1c2c3 > 0.0 and prob_c1c2 > 0.0):
                t = math.log(prob_c1c2c3) - math.log(prob_c1c2) - math.log(prob_c3)
                quality = quality + (prob_c1c2c3 * t)
               
    return {'q':quality, 'c':clustering, 'm':membership, 'lhs':lhs, 'rhs':rhs}

def computeBigramQuality(lhs , rhs, clustering, membership, tokens_sequence):
    '''
    Computes the Quality of a given clustering
    '''
    cluster_sequence = [ membership[token] if membership.has_key(token) else '0' for token in tokens_sequence]
    bigram_cluster_sequence = [(cluster_sequence[x - 1], cluster_sequence[x]) for x in range(1, len(cluster_sequence)) if  cluster_sequence[x] != START]
    quality = 0.0
    for (c1, c2) in list(itertools.permutations(sorted(clustering.keys()), 2)):
        '''
        computing:
        Q(C) =  sigma(sigma ( p(c1c2) x log ( p(c1c2) / p(c1)p(c2) ))
        '''
        prob_c1c2 = float(bigram_cluster_sequence.count((c1, c2))) / float(len(bigram_cluster_sequence))
        prob_c1 = float(cluster_sequence.count(c1)) / float(len(bigram_cluster_sequence))
        prob_c2 = float(cluster_sequence.count(c2)) / float(len(bigram_cluster_sequence))
        if (prob_c1c2 > 0.0):
            t = math.log(prob_c1c2) - math.log(prob_c1) - math.log(prob_c2)
            quality = quality + (prob_c1c2 * t)
           
    return {'q':quality, 'c':clustering, 'm':membership, 'lhs':lhs, 'rhs':rhs}


def accumilateQualities(returnDict):
    # print 'merging result'
    global mergeQuality
    if not mergeQuality.has_key(returnDict['q']):
        mergeQuality[returnDict['q']] = returnDict

    
    
def parallemMerge(a_clustering, membership, tokens_sequence):

    pool = Pool(processes=multiprocessing.cpu_count()) 
    for (m1, m2) in list(itertools.combinations(sorted(a_clustering.keys()), 2)):
        if (m1 != '*' and m2 != '*'):  # nothing merges with START
            
            new_clustering = copy.deepcopy(a_clustering)
            lhs = a_clustering[m1]
            rhs = a_clustering[m2]
            new_clustering[m1 + ',' + m2] = new_clustering.pop(m1) + new_clustering.pop(m2)
           
            new_membership = copy.deepcopy(membership)
            for new_merged_tokens in new_clustering[m1 + ',' + m2]:
                new_membership[new_merged_tokens] = m1 + ',' + m2
            '''
            passing tokens_sequence because windows wont work with out it
            see: http://stackoverflow.com/questions/6596617/python-multiprocess-diff-between-windows-and-linux
            '''
            if METHOD == TRIGRAM:
                pool.apply_async(computeTrigramQuality, args=(lhs, rhs , new_clustering, new_membership, tokens_sequence), callback=accumilateQualities)
            else:                          
                pool.apply_async(computeBigramQuality, args=(lhs, rhs , new_clustering, new_membership, tokens_sequence), callback=accumilateQualities)
    pool.close()
    pool.join()
    
def mergeResult(bts):
    global mergeQuality
    bestDict = mergeQuality[max(mergeQuality.keys())]
    new_bts = updateBitStrings(bts, bestDict['lhs'], bestDict['rhs'])
    return bestDict['c'], new_bts, bestDict['m']
      

def updateBitStrings(bts, left_branch_words, right_branch_words):
    for w1 in left_branch_words:
        if(bts.has_key(w1)):
            bts[w1] = '1' + bts[w1] 
        else:
            bts[w1] = '1'
    for w2 in right_branch_words:
        if bts.has_key(w2):
            bts[w2] = '0' + bts[w2]
        else:
            bts[w2] = '0'
    return bts

    
if __name__ == "__main__":
    setArgs(sys.argv[1:])
    print  'config:', '\n' , INPUT, '\n' , OUTPUT, '\n' , METHOD, '\n' , SEED_SIZE
    clustering = {}
    bitstrings = {}
    token_membership = {}
    text = re.sub('[^\sA-Za-z0-9]+', '', open(INPUT, 'r').read().lower())
    if (METHOD == TRIGRAM):
        lines = START + ' ' + START + ' ' + str(' ' + START + ' ' + START + ' ').join(re.split('\r+|\n+', text))
    else:
        lines = START + ' ' + str(' ' + START + ' ').join(re.split('\r+|\n+', text))
    
    tokens_sequence = re.split('\s+', lines.strip())
    counter = Counter(tokens_sequence)
    seed = []
    print len(set(list(counter.elements())))
    '''
    This cutoff should be set manually 
    cutoff controls initial seed clusters
    default is set to half the full vocabulary
    '''
    vocab_size = len(set(list(counter.elements())));
    if (SEED_SIZE > 0 and SEED_SIZE < vocab_size):
        cutoff = SEED_SIZE
    else:
        cutoff = int(vocab_size * SEED_RATIO)  
    for (s, c) in  counter.most_common(cutoff):
        if (s != START):
            seed.append(s)
    non_seed = set(list(counter.elements()))
    non_seed = non_seed - set(seed) - set([START])
    non_seed = list(non_seed)

    clustering[START] = [START]
    token_membership[START] = START
    for token in seed:
            cluster_id = str(len(clustering))
            clustering[cluster_id] = [token]
            token_membership[token] = cluster_id
    
    ns_label = len(clustering) + 1
    '''
    Initial clustering with seed clusters
    '''
    start_time = dt.datetime.now()
    
    while len(non_seed) > 0:
        new_word = non_seed.pop()
        cluster_id = str(ns_label)
        clustering[cluster_id] = [new_word]
        token_membership[new_word] = cluster_id
        mergeQuality = {}
        parallemMerge(clustering, token_membership, tokens_sequence)
        (clustering, bitstrings, token_membership) = mergeResult(bitstrings) 
        ns_label += 1
     
    '''
    Complete the full hireachy by merging all clusters untill 
    there are only 2 clusters 
    '''   
    print 'reducing'
    while(len(clustering) > 2):
        mergeQuality = {}
        parallemMerge(clustering, token_membership, tokens_sequence)
        (clustering, bitstrings, token_membership) = mergeResult(bitstrings)

        
    end_time = dt.datetime.now()
    print 'final'
    inverted_bitstrings = dict([[v, k] for k, v in bitstrings.items()])
    pprint(inverted_bitstrings)
   
    writer = open(OUTPUT, 'w')
    for k in sorted(inverted_bitstrings.keys()):
        writer.write(k + '\t\t' + inverted_bitstrings[k] + '\n')
    writer.flush()
    writer.close()
    
    print (end_time - start_time).microseconds


