#!/usr/bin/python

import sys
import re
import nltk
from nltk import word_tokenize, trigrams, pos_tag
from math import sqrt

#the one below needs to use interactive nltk.download() from terminal
#nltk.download("tokenizers/punkt/english.pickle")

fst = lambda w: w[0]
snd = lambda w: w[1]

# sentence tokenizer
sentok = nltk.data.load("tokenizers/punkt/english.pickle")

# regular expression to detect english words
word_re = re.compile("^[a-z']+$")

# numeric weight for left, right contexts (word "attributes")
left_w = float(1)
right_w = float(1)

# numeric weight for the word symbol itself
sym_w = float(0.2)

# norm for all encoded vectors
norm = sqrt(left_w*2 + sym_w**2 + right_w**2)

# sentence to trigrams mapping (also returns vocabulary found)
def sent2trigs(s):
    tokens = pos_tag(word_tokenize(s))
    is_word = lambda s: word_re.match(s[0]) != None
    words = filter(is_word, [(w[0].lower(),w[1]) for w in tokens])
    trigs = trigrams(map(fst, words))
    return (dict(words).items(), trigs)

# encode each trigram as an sparse vector, having symbol, left and right 
# encodings; the 3 of them represented by entries of the form 
# (word_index, normal_weight). so this function returns a list of 3 tuples.
# the first parameter is a hash mapping each word to its index
def encode(vocab, trig):
    left = trig[0]
    word = trig[1]
    right = trig[2]
    return [(vocab[word][0], sym_w/norm), 
            (vocab[left][0], left_w/norm), 
            (vocab[right][0], right_w/norm)]

def get_args(argv):
    if len(sys.argv) != 4:
        print("Usage: %s <text_file> <vocab_file> <encoding_file>\n" % sys.argv[0])
        sys.exit(1)
    else:
        return(sys.argv[1], sys.argv[2], sys.argv[3])

def get_text(text_file):
    f = open(text_file, "r")
    text = f.read()
    f.close()
    return text

def dump_vocab(vocab, file):
    pp_voc = lambda v: " %6d %-30s %-10s" % (v[1][0], v[0], v[1][1])
    word_idx = lambda v: fst(snd(v))
    f = open(file, "w")
    f.write("\n".join(map(pp_voc, sorted(vocab.items(), key=word_idx))))
    f.write("\n")
    f.close()

def dump_encoding(encoding, file):
    pp_enc = lambda e: " ".join(map(lambda t: "%6d %.6f" % t, e))
    f = open(file, "w")
    f.write("\n".join(map(pp_enc, encoding)) + "\n")
    f.close()

#main
(text_file, voc_file, enc_file) = get_args(sys.argv)
text = get_text(text_file)
accum = lambda t1,t2: (dict(t1[0]+t2[0]).items(), t1[1]+t2[1])
(vocab,trigs) = reduce(accum, [sent2trigs(s) for s in sentok.tokenize(text)])
vocab = dict(zip(map(fst, vocab), zip(xrange(len(vocab)), map(snd,vocab))))
encoding = map(lambda t: encode(vocab,t), trigs)
dump_vocab(vocab, voc_file)
dump_encoding(encoding, enc_file)

