#!/usr/bin/python

# BigData course project
# SOM encoding (semantic maps #3)

import sys
import datetime
import re
import nltk
from nltk import word_tokenize, trigrams, pos_tag
from math import sqrt

#the one below needs to use interactive nltk.download() from terminal
#nltk.download("tokenizers/punkt/english.pickle")

fst = lambda w: w[0]
snd = lambda w: w[1]

# extracts the pos tag from a histogram entry
hist_tag = lambda w: snd(fst(w))

# extracts the word index from one vocab entry
word_idx = lambda w: fst(snd(w))

# sentence tokenizer
sentok = nltk.data.load("tokenizers/punkt/english.pickle")

# regular expression to detect english words
word_re = re.compile("^[a-z']+$")

# numeric weight for left, right contexts (word "attributes")
left_w = float(1)
right_w = float(1)

# numeric weight for the word symbol itself
sym_w = float(0.2)

# norm for all encoded vectors (trigrams and vocab)
norm   = sqrt(left_w*2 + sym_w**2 + right_w**2)
norm_v = sqrt(sym_w**2)

# returns current time in milliseconds (unix style)
def get_currtime():
    start = datetime.datetime(1970, 1, 1)
    return int((datetime.datetime.utcnow() - start).total_seconds() * 1000) 

# logs a message and return current time in ms
def log(fmt, *args):
    sys.stdout.write(fmt % args)
    sys.stdout.flush()

# helper routine to show progress periocally
def show_progress(start, end, ctx_label, item_label):
    global g_t
    global g_dur
    global g_batch_size
    g_t += 1
    g_dur += (end - start)
    if (g_t%g_batch_size == 0):
        log("%s: processed %d %s so far (last %d in %.3f secs)\n", 
            ctx_label, g_t, item_label, g_batch_size, g_dur/1000.0)
        g_dur = 0

# adds a list of words to histogram (dictionary)
def words2hist(words, hist):
    for w in words:
        if w in hist:
            hist[w] += 1
        else:
            hist[w] = 1

# tells if a pos tag is noun
def is_noun(tag):
    return tag in ("NN","NNP","NNPS","NNS")

# tells if a pos tag is a pronoun
def is_pronoun(tag):
    return tag in ("PRP","WP$","PRP$","WP")

# tells if a pos tag is an adjective
def is_adjective(tag):
    return tag in ("JJ","JJR","JJS")

# tells if a pos tag is a verb
def is_verb(tag):
    return tag in ("VB","VBD","VBG","VBN","VBP","VBZ","MD")

# tells if a pos tag is an adverb
def is_adverb(tag):
    return tag in ("RB","RBR","RBS","WRB")

# filter only the histogram words with a pos tag category in:
# noun, pronoun, adjective, verb, adverb
def filter_hist(w):
    tag = hist_tag(w)
    return is_noun(tag) or is_pronoun(tag) or is_adjective(tag) or is_verb(tag) or is_adverb(tag)
    
# sentence to trigrams mapping (also returns vocabulary found)
def sent2trigs(s):
    start = get_currtime()
    tokens = pos_tag(word_tokenize(s))
    is_word = lambda s: word_re.match(s[0]) != None
    words = filter(is_word, [(w[0].lower(),w[1]) for w in tokens])
    hist = dict()  # histogram of words
    words2hist(words, hist)
    end = get_currtime()
    show_progress(start, end, "sent2trigs", "sentences")
    return (hist, trigrams(words))

# encode each trigram as an sparse vector, having symbol, left and right 
# encodings; the 3 of them represented by entries of the form 
# (word_index, normal_weight). so this function returns a list of 3 tuples.
# the first parameter is a hash mapping each word to its index. those indexes
# are shifted to the right, depending on the part of the trigram considered
def encode(vocab, trig):
    n = len(vocab)
    left = trig[0]
    word = trig[1]
    right = trig[2]
    return [(0*n + vocab[word][0],  sym_w/norm), 
            (1*n + vocab[left][0],  left_w/norm), 
            (2*n + vocab[right][0], right_w/norm)]

# encode a word in the vocabulary in same way as training trigrams, 
# but just puts the "symbol" part, that is do not use a context of 
# (left,right) words as attributes. this is to be used for the 
# classification stage of the som (once is trained)
def encode_vocab(vocab, w):
    return [(vocab[w[0]][0], sym_w/norm_v)] 

def get_args(argv):
    if len(sys.argv) != 7:
        print("Usage: %s <text_file> <n> <m> <vocab_file> <encoding_file_learn> <encoding_file_class>\n" % sys.argv[0])
        sys.exit(1)
    else:
        return (sys.argv[1], int(sys.argv[2]), int(sys.argv[3]), 
                sys.argv[4], sys.argv[5], sys.argv[6])

def get_text(text_file):
    f = open(text_file, "r")
    text = f.read()
    f.close()
    return text

def dump_vocab(vocab, file):
    reorder_w = lambda w: (w[1][0], w[0][0], w[1][1], w[0][1])
    pp_voc = lambda w: " %6d %-30s %7d %-10s" % reorder_w(w)
    f = open(file, "w")
    f.write("\n".join(map(pp_voc, sorted(vocab.items(), key=word_idx))))
    f.write("\n")
    f.close()

def dump_encoding(encoding, file):
    pp_enc = lambda e: " ".join(map(lambda t: "%6d %.6f" % t, e))
    f = open(file, "w")
    f.write("\n".join(map(pp_enc, sorted(encoding))) + "\n")
    f.close()

# adds two word histograms (dictionaries), reuses fst argument for efficiency
def add_hists(h1, h2):
    for w in h2:
        if w in h1:
            h1[w] += h2[w]
        else:
            h1[w] = h2[w]            
    return h1

# adds two lists, reuses fst argument for efficiency
def add_lists(l1, l2):
    l1.extend(l2)
    return l1

# accumulate histograms+trigrams
def accum_hist_trigs(ht1, ht2):
    start = get_currtime()
    ht12 = (add_hists(ht1[0],ht2[0]), add_lists(ht1[1],ht2[1]))
    end = get_currtime()
    show_progress(start, end, "accum_hist_trigs", "sentences")
    return ht12

#main
(text_file, n, m, voc_file, enc_file_learn, enc_file_class) = get_args(sys.argv)

log("\nreading text file ... ")
g_start = get_currtime()
text = get_text(text_file)
g_end = get_currtime()
log("done (%d bytes, %.3f secs)\n", len(text), (g_end-g_start)/1000.0)

log("\ntokenizing %d sentences ... ", m)
g_start = get_currtime()
ss = sentok.tokenize(text)[:m]
g_end = get_currtime()
log("done (%d sentences, %.3f secs)\n", len(ss), (g_end-g_start)/1000.0)

log("\ncalculating histograms and trigrams per sentence ...\n")
g_start = get_currtime()
g_t = 0
g_dur = 0
g_batch_size = 100
hist_trigs = [sent2trigs(s) for s in ss]
g_end = get_currtime()
log("done (%d sentences proc, %.3f secs)\n", 
    len(hist_trigs), (g_end-g_start)/1000.0)

log("\naccumulating histograms and trigrams per sentence ...\n")
g_start = get_currtime()
g_t = 0
g_dur = 0
g_batch_size = 1000
(hist, trigs) = reduce(accum_hist_trigs, hist_trigs)
g_end = get_currtime()
log("done (%d sentences proc, %d histogram words, %d trigrams, %.3f secs)\n", 
    len(hist_trigs), len(hist), len(trigs), (g_end-g_start)/1000.0)

log("\nfilter the categories we care about ... ")
start = get_currtime()
hist = dict(filter(filter_hist, hist.items()))
end = get_currtime()
log("done (%d words, %.3f secs)\n", len(hist), (end-start)/1000.0)

log("\nsort vocab and take first %d ... ", n)
g_start = get_currtime()
# sort by occurrences and word itself
cmp_word = lambda w: (w[1], w[0][0])
vocab = sorted(hist.items(), key=cmp_word, reverse=True)[:n]
g_end = get_currtime()
log("done (%d words, %.3f secs)\n", n, (g_end-g_start)/1000.0)

log("\nreshaping vocab ... ")
g_start = get_currtime()
vocab = dict(zip(map(fst, vocab), zip(xrange(len(vocab)), map(snd,vocab))))
g_end = get_currtime()
log("done (%d words, %.3f secs)\n", len(vocab), (g_end-g_start)/1000.0)

log("\nfiltering trigrams ... ")
g_start = get_currtime()
in_vocab = lambda t: all(map(lambda w: w in vocab, t))
trigs = filter(in_vocab, trigs)
g_end = get_currtime()
log("done (%d trigrams, %.3f secs)\n", len(trigs), (g_end-g_start)/1000.0)

log("\nencoding trigrams ... ")
g_start = get_currtime()
encoding = map(lambda t: encode(vocab,t), trigs)
g_end = get_currtime()
log("done (%d trigrams, %.3f secs)\n", len(encoding), (g_end-g_start)/1000.0)

log("\nencoding vocab ... ")
g_start = get_currtime()
vocab_enc = map(lambda w: encode_vocab(vocab,w), 
                sorted(vocab.items(), key=word_idx))
g_end = get_currtime()
log("done (%d words, %.3f secs)\n", len(vocab_enc), (g_end-g_start)/1000.0)

log("\ndumping trigrams encoding ... ")
g_start = get_currtime()
dump_encoding(encoding, enc_file_learn)
g_end = get_currtime()
log("done (%d trigrams, %.3f secs)\n", len(encoding), (g_end-g_start)/1000.0)

log("\ndumping vocab encoding ... ")
g_start = get_currtime()
dump_encoding(vocab_enc, enc_file_class)
g_end = get_currtime()
log("done (%d trigrams, %.3f secs)\n", len(vocab_enc), (g_end-g_start)/1000.0)

log("\ndumping vocab preclass to ... ")
g_start = get_currtime()
dump_vocab(vocab, voc_file)
g_end = get_currtime()
log("done (%d words, %.3f secs)\n", len(vocab), (g_end-g_start)/1000.0)

log("\n")
