# coding: utf-8
import urllib2
import json
from BeautifulSoup import BeautifulStoneSoup
from numpy import array
from numpy.random import standard_normal, rand
import sys
sys.path.append("..")
from tokenizer import Tokenizer
from unique import unique
import math
import copy
import numpy
import nltk
import re

def print_prob(str, prob):
  if rand() < prob:
    print(str)

def add_fdist(f1, f2):
  for tok in f2.samples():
    f1.inc(tok, f2[tok])


class EntropyCalculator:
  def __init__(self, chapters):
    self.chapters = chapters
    self.tokens = None
    self.reverse_tokens = None

    self.get_tokens()

  def get_tokens(self):
    if self.tokens:
      return
    
    self.tokens = []
    for chapter in self.chapters:
      chapter["fdist"] = nltk.FreqDist(chapter["text"])
      self.tokens.extend(chapter["text"])
      
    self.reverse_tokens = dict(zip(self.tokens, range(len(self.tokens))))


  def get_chapter_probabilities_for(self, word):
    frequencies = map(lambda c: c["fdist"][word] / float(c["fdist"].N()),self.chapters)
    sum_freq = sum(frequencies)
    if sum_freq == 0 or any([numpy.isnan(x) for x in frequencies]):
      print frequencies
      print word
      #print frequencies
      print u'Algo está jodido acá para %s' % word

    probabilities = map(lambda f: f/sum_freq, frequencies)

    return probabilities

  def get_entropy_for(self, token):
    probabilities = self.get_chapter_probabilities_for(token)
    sum_prob = reduce(lambda s, pi: s+ pi * math.log(pi) if pi!=0 else s ,probabilities, 0)

    ret_val = -(1 / math.log(len(self.chapters))) * sum_prob

    return ret_val

  def calculate_entropies(self, words):
    entropies = {}
    for word in words:
      entropies[word] = self.get_entropy_for(word)
    return entropies
