# BigData course project
# Encoding algorithm based on the WebSOM paper

#NOTE: This code should be improved. It was just used as a prototype

#The algorithm works as follows:
# 1) Preprocess the documents: get rid of stop-words
# 2) For each document get a histogram of words that are in the word map


from os import listdir
from os.path import isfile, join
import nltk
import operator
import re
import random
import itertools

def get_words_document(document):
  file = open(document, 'r')
  return file.read()

def get_important_words(words):
  tokens_in_file = nltk.word_tokenize(words)
  stop_words = nltk.corpus.stopwords.words('english')
  tokens_wo_stop_words = [w for w in tokens_in_file if w.lower() not in stop_words]
  return tokens_wo_stop_words

 
file = './data/doc_00.txt'

##########################
# DOCUMENT PREPROCESSING
##########################
#Get rid of stop-words
words = get_words_document(file)
filtered_words = get_important_words(words)

#########################
# DOCUMENT ENCODING
#########################
encoded_document = [0 for i in xrange(som_size)]]
for word in filtered_words:
  if word in som:
    encoded_document[som[word]] += 1
    


