#!/usr/bin/python

import sys
import re
import nltk
from nltk import word_tokenize, trigrams, pos_tag
from math import sqrt

#the one below needs to use interactive nltk.download() from terminal
#nltk.download("tokenizers/punkt/english.pickle")

fst = lambda w: w[0]
snd = lambda w: w[1]

# extracts the pos tag from a histogram entry
hist_tag = lambda w: snd(fst(w))

# extracts the word index from one vocab entry
word_idx = lambda w: fst(snd(w))

# sentence tokenizer
sentok = nltk.data.load("tokenizers/punkt/english.pickle")

# regular expression to detect english words
word_re = re.compile("^[a-z]+$")

# numeric weight for left, right contexts (word "attributes")
left_w = float(1)
right_w = float(1)

# numeric weight for the word symbol itself
sym_w = float(0.2)

# norm for all encoded vectors (trigrams and vocab)
norm   = sqrt(left_w*2 + sym_w**2 + right_w**2)
norm_v = sqrt(sym_w**2)

def log(fmt, *args):
    sys.stdout.write(fmt % args)
    sys.stdout.flush()

# adds a list of words to histogram (dictionary)
def words2hist(words, hist):
    for w in words:
        if w in hist:
            hist[w] += 1
        else:
            hist[w] = 1
    
# sentence to trigrams mapping (also returns vocabulary found)
def sent2trigs(s):
    tokens = pos_tag(word_tokenize(s))
    is_word = lambda s: word_re.match(s[0]) != None
    words = filter(is_word, [(w[0].lower(),w[1]) for w in tokens])
    hist = dict()  # histogram of words
    words2hist(words, hist)
    return (hist, trigrams(words))

# tells if a pos tag is noun
def is_noun(tag):
    return tag in ("NN","NNP","NNPS","NNS")

# tells if a pos tag is a pronoun
def is_pronoun(tag):
    return tag in ("PRP","WP$","PRP$","WP")

# tells if a pos tag is an adjective
def is_adjective(tag):
    return tag in ("JJ","JJR","JJS")

# tells if a pos tag is a verb
def is_verb(tag):
    return tag in ("VB","VBD","VBG","VBN","VBP","VBZ","MD")

# tells if a pos tag is an adverb
def is_adverb(tag):
    return tag in ("RB","RBR","RBS","WRB")

# filter only the histogram words with a pos tag category in:
# noun, pronoun, adjective, verb, adverb
def filter_hist(w):
    tag = hist_tag(w)
    return is_noun(tag) or is_pronoun(tag) or is_adjective(tag) or is_verb(tag) or is_adverb(tag)

# encode each trigram as an sparse vector, having symbol, left and right 
# encodings; the 3 of them represented by entries of the form 
# (word_index, normal_weight). so this function returns a list of 3 tuples.
# the first parameter is a hash mapping each word to its index. those indexes
# are shifted to the right, depending on the part of the trigram considered
def encode(vocab, trig):
    n = len(vocab)
    left = trig[0]
    word = trig[1]
    right = trig[2]
    return [(0*n + vocab[word][0],  sym_w/norm), 
            (1*n + vocab[left][0],  left_w/norm), 
            (2*n + vocab[right][0], right_w/norm)]

# encode a word in the vocabulary in same way as training trigrams, 
# but just puts the "symbol" part, that is do not use a context of 
# (left,right) words as attributes. this is to be used for the 
# classification stage of the som (once is trained)
def encode_vocab(vocab, w):
    return [(vocab[w[0]][0], sym_w/norm_v)] 

def get_args(argv):
    if len(sys.argv) != 6:
        print("Usage: %s <text_file> <n> <vocab_file> <encoding_file_learn> <encoding_file_class>\n" % sys.argv[0])
        sys.exit(1)
    else:
        return (sys.argv[1], int(sys.argv[2]), sys.argv[3], sys.argv[4], sys.argv[5])

def get_text(text_file):
    f = open(text_file, "r")
    text = f.read()
    f.close()
    return text

def dump_vocab(vocab, file):
    reorder_w = lambda w: (w[1][0], w[0][0], w[1][1], w[0][1])
    pp_voc = lambda w: " %6d %-30s %7d %-10s" % reorder_w(w)
    f = open(file, "w")
    f.write("\n".join(map(pp_voc, sorted(vocab.items(), key=word_idx))))
    f.write("\n")
    f.close()

def dump_encoding(encoding, file):
    pp_enc = lambda e: " ".join(map(lambda t: "%6d %.6f" % t, e))
    f = open(file, "w")
    f.write("\n".join(map(pp_enc, encoding)) + "\n")
    f.close()

# adds two word histograms (dictionaries), reuse h1 for speed
def add_hists(h1, h2):
    for w in h2:
        if w in h1:
            h1[w] += h2[w]
        else:
            h1[w] = h2[w]            
    return h1

# adds two lists, reusing fst one for speed
def add_lists(l1, l2):
    l1.extend(l2)
    return l1

# take most common "n" words, but filtering if they have trigrams
# histogram is assumed to be sorted
def take_n_words(wanted_n, hist, hist_rest, trigs):
    log("requesting %d words in hist of size %d, with %d trigrams\n", 
        wanted_n, len(hist_rest), len(trigs))
    # wanted n may be bigger than hist, take the min
    n = min(wanted_n, len(hist_rest))
    hist_n = hist_rest[:n]
    new_hist_rest = hist_rest[n:]
    # consider words in both hist_n and hist
    words = map(fst, hist_n) + map(fst, hist)
    # consider trigrams referring only to our chosen hists
    in_hist = lambda t: all(map(lambda w: w in words, t))
    trigs_n = filter(in_hist, trigs)
    log("got %d trigs after filtering against hist\n", len(trigs_n))
    middle = map(snd, trigs_n)
    # eliminate words in hist that lack trigrams
    has_trigs = lambda w: w[0] in middle
    hist_n = filter(has_trigs, hist_n)
    log("got %d words after filtering against trigs\n", len(hist_n))
    # filter again the trigrams against reduced hist
    words = map(fst, hist_n) + map(fst, hist)
    trigs_n = filter(in_hist, trigs_n)
    log("got %d trigs after filtering again\n", len(trigs_n))
    return (hist_n, trigs_n, new_hist_rest)

# take most common "n" words, but ensuring they have trigrams
# histogram is assumed to be sorted
def ensure_n_words(n, hist, trigs):
    (hist_n, trigs_n, hist_rest) = take_n_words(n, [], hist, trigs)
    while len(hist_n) < n and len(hist_rest) > 0:
        log("did not make it, will seek for %d more words ...\n", 
            n-len(hist_n))
        (r_hist_n, r_trigs_n, r_hist_rest) = take_n_words(n-len(hist_n), 
                                                          hist_n,
                                                          hist_rest, 
                                                          trigs)
        (hist_n, trigs_n, hist_rest) = (hist_n + r_hist_n, 
                                        trigs_n + r_trigs_n, 
                                        r_hist_rest)
    return (hist_n, trigs_n)    
        
#main
(text_file, n, voc_file, enc_file_learn, enc_file_class) = get_args(sys.argv)
text = get_text(text_file)
accum = lambda ht1,ht2: (add_hists(ht1[0],ht2[0]), add_lists(ht1[1],ht2[1]))
(hist, trigs) = reduce(accum, [sent2trigs(s) for s in sentok.tokenize(text)])
# filter the categories we care about
hist = dict(filter(filter_hist, hist.items()))
# sort histogram by occurrences 
hist = sorted(hist.items(), key=snd, reverse=True)
# ensure we get "n" words (most common), but with trigrams
(hist, trigs) = ensure_n_words(n, hist, trigs)
# reshape hist as vocab: a map of (word,pos_tag) => (index,#occurrs)
vocab = dict(zip(map(fst, hist), zip(xrange(len(hist)), map(snd,hist))))
# proceed to encode trigrams and vocab
encoding = map(lambda t: encode(vocab,t), trigs)
vocab_enc = map(lambda w: encode_vocab(vocab,w), 
                sorted(vocab.items(), key=word_idx))
dump_vocab(vocab, voc_file)
dump_encoding(encoding, enc_file_learn)
dump_encoding(vocab_enc, enc_file_class)
