# BigData course project
# Encoding algorithm based on the WebSOM paper

#NOTE: This code should be improved. It was just used as a prototype
#NOTE: We need to see how different is this encoding with the one Dario already has

#The algorithm works as follows:
# 1) Preprocess the documents: get rid of stop-words, get histograms, get the most frequent words across documents
# 2) Word encoding: each word has an associated vector Xi = [E(xi-1|xi), xi, E(xi+1|xi)]

from os import listdir
from os.path import isfile, join
import nltk
import operator
import re
import random
import itertools

def pairwise(iterable):
    "s -> (s0,s1), (s1,s2), (s2, s3), ..."
    a, b = itertools.tee(iterable)
    next(b, None)
    return itertools.izip(a, b)

def get_files(my_path):
  onlyfiles = [ join(my_path,f) for f in listdir(my_path) if isfile(join(my_path,f)) ]
  return onlyfiles

def get_words_document(document):
  file = open(document, 'r')
  return file.read()

def get_important_words(words):
  tokens_in_file = nltk.word_tokenize(words)
  stop_words = nltk.corpus.stopwords.words('english')
  tokens_wo_stop_words = [w for w in tokens_in_file if w.lower() not in stop_words]
  return tokens_wo_stop_words

 
my_dir = './data'
files_list = get_files(my_dir)
threshold = len(files_list)
common_words = {}
tokens_wo_stop_words = []
i = 0

##########################
# DOCUMENT PREPROCESSING
##########################
for file in files_list:
  #Get rid of stop-words
  words = get_words_document(file)
  tokens_wo_stop_words.append(get_important_words(words))

  #Get histogram of words
  for word in tokens_wo_stop_words[i]:
    #print 'Analizying word: %s' % word
    if word in common_words:
      common_words[word] += 1
    else:
      common_words[word] = 1

  i += 1

#Get the top n words
sorted_common_words = sorted(common_words.iteritems(), key=operator.itemgetter(1), reverse=True)
list_common_words = [(k,v) for k, v in sorted_common_words]
num_common_words = len(list_common_words)
if num_common_words > 50:
  num_common_words = 50
else:
  num_common_words = len(list_common_words)

top_common_words = list_common_words[:num_common_words]

#Get popular words across all documents
useful_words = []
for word,_ in top_common_words:
  appear = 0
  for i in xrange(len(files_list)):
    if word in tokens_wo_stop_words[i]:
      appear += 1

  if appear >= threshold:
    useful_words.append(word) 

print useful_words


##########################
# WORD ENCODING
##########################
#Create dictionary for each word
word_encoding = {}
for word in useful_words:
  word_encoding[word] = {}
  word_encoding[word]['prev'] = [0 for i in xrange(10)]
  word_encoding[word]['num_prev'] = 0
  word_encoding[word]['xi'] = [float(random.random()) for i in xrange(10)]
  word_encoding[word]['next'] = [0 for i in xrange(10)]
  word_encoding[word]['num_next'] = 0
  
#Analyze the 3-gram
huge_list = []
for my_list in tokens_wo_stop_words:
  huge_list += my_list

#Obtain the Xi dictionary
pairs = pairwise(huge_list)
for pair in pairs:
  if pair[0] in word_encoding and pair[1] in word_encoding:
      word_encoding[pair[0]]['next'] = [p+q for (p,q) in zip(word_encoding[pair[0]]['next'], word_encoding[pair[1]]['xi'])]
      word_encoding[pair[0]]['num_next'] += 1

      word_encoding[pair[1]]['prev'] = [p+q for (p,q) in zip(word_encoding[pair[1]]['prev'], word_encoding[pair[0]]['xi'])]
      word_encoding[pair[0]]['num_prev'] += 1

for words in word_encoding:
  word_encoding[word]['prev'] = [p/word_encoding[word]['num_prev'] for p in word_encoding[word]['prev']]
  word_encoding[word]['next'] = [p/word_encoding[word]['num_next'] for p in word_encoding[word]['next']]

#Convert Xi to a list
final_word_encoding = {}
for word in word_encoding:
  final_word_encoding[word] = word_encoding[word]['prev'] + word_encoding[word]['xi'] + word_encoding[word]['next']

#This is the final vector of words that need to be trained
print final_word_encoding
