import nltk
# coding: utf-8
from copy import deepcopy
import os
import sys
sys.path.append("..")
import sys
import string
from tokenizer import Tokenizer
from random import shuffle
from entropy_calculator import EntropyCalculator

def get_tokens(text):
  tokenizer = Tokenizer(text)
  tokenizer.add_filter(lambda w: not w in string.punctuation)
  return tokenizer.get_words()


def split_into_chapters(tokens, window_size):
  chapters = []
  no_of_chapters = len(tokens) / window_size + 1
  for i in range(no_of_chapters):
    starting_token = i * window_size;
    tokens_in_window = tokens[starting_token:starting_token+window_size]
    chapters.append(tokens_in_window)

  #Este formato lo uso como "estándar" para almacenar capítulos
  chapters = map(lambda chapter:{'text':chapter}, chapters)
  return chapters

def get_entropy(tokens, words, window_size):
  chapters = split_into_chapters(tokens, window_size)
  calculator = EntropyCalculator(chapters)
  return calculator.calculate_entropies(words)

def main():
  f = open("origin.txt")
  text = f.read()
  f.close()

  tokens = get_tokens(text)
  fdist = nltk.FreqDist(tokens)
  shuffled_tokens = deepcopy(tokens)
  shuffle(shuffled_tokens)
  words = list(set(tokens))

  window_size = 1000

  entropies_of_normal_text = get_entropy(tokens, words, window_size)
  entropies_of_shuffled_text = get_entropy(shuffled_tokens, words, window_size)


  information_values = [(word, fdist[word]*abs(entropies_of_normal_text[word] - entropies_of_shuffled_text[word])) for word in words]
  i = 0
  print "Maximum entropy = %.s - %.7f" % max(information_values, key= lambda x: x[1])
  for word, iv in sorted(information_values, cmp=lambda x1,x2: cmp(x2[1], x1[1])):
    print("%s = %.7f" % (word, iv))
    i+=1
    if i == 20:
      break

if __name__ == "__main__":
  main()