# coding: utf-8
import urllib2
import json
from BeautifulSoup import BeautifulStoneSoup
from numpy import array
from numpy.random import standard_normal, rand
import sys
sys.path.append("..")
from tokenizer import Tokenizer
from unique import unique
import math
import copy
import numpy
import nltk
import re
from scipy.sparse import *
from scipy import *

def print_prob(str, prob):
  if rand() < prob:
    print(str)

def add_fdist(f1, f2):
  for tok in f2.samples():
    f1.inc(tok, f2[tok])


class OccurrenceMatrixConstructor:
  def __init__(self, chapters):
    self.chapters = chapters
    self.tokens = 0
    self.reverse_tokens = 0

  def get_tokens(self):
    if self.tokens != 0:
      return
    freq_dist = nltk.FreqDist()
    self.tokens = []
    for chapter in self.chapters:
      tokenizer = Tokenizer(chapter['text'])
      tokenizer.add_filter(lambda t: t.isalpha())
      chapter["fdist"] = tokenizer.get_fdist()
      add_fdist(freq_dist, chapter["fdist"])
      self.tokens.extend(tokenizer.get_words())
    
    self.tokens = freq_dist.keys()[:1000] 
    #self.tokens = unique(self.tokens)

    self.reverse_tokens = dict(zip(self.tokens, range(len(self.tokens))))

  def get_matrix(self):
    try: 
      return self.matrix
    except AttributeError:
      self.get_tokens()
      self.matrix = dok_matrix((len(self.tokens), len(self.chapters)), dtype = float)
      j = 0
      print "Regularizing %d tokens for %d chapters" % (len(self.tokens), len(self.chapters))
      #itero sobre los tokens...asi puedo calcular la matriz y regularizarla
      for day in self.chapters:
        print_prob("Iterating on chapter no %d / %d" %(j, len(self.chapters)), 0.003)
        # Para cada dia, busco las palabras que obtuve del tokenizado, y las agrego a la matriz
        fdist = day["fdist"]
        for token in fdist.samples():
          if self.reverse_tokens.has_key(token):
            i = self.reverse_tokens[token]
            occurrences = fdist[token]  
            #recupero el índice de la palabra
            self.matrix[i,j] = occurrences
        j+=1
      return self.matrix
  def get_chapter_probabilities_for(self, token):
    frequencies = map(lambda c: c["fdist"][token] / float(c["fdist"].N()),self.chapters)#self.get_frequencies_for(token)
    sum_freq = sum(frequencies)
    if sum_freq == 0 or any([numpy.isnan(x) for x in frequencies]):
      #print frequencies
      print u'Algo está jodido acá para %s' % token

    probabilities = map(lambda f: f/sum_freq, frequencies)

    return probabilities

  def get_entropy_for(self, token):
    probabilities = self.get_chapter_probabilities_for(token)
    sum_prob = reduce(lambda s, pi: s+ pi * math.log(pi) if pi!=0 else s ,probabilities, 0)

    ret_val = -(1 / math.log(len(self.chapters))) * sum_prob

    if ret_val > 0.01:
      print_prob("Entropy non null for %s = %f \n frequencies %s" % (token, ret_val, frequencies), 0.00003)
    elif ret_val == 0:
      print_prob("Entropy NULL for %s = %f \n frequencies %s" % (token, ret_val, frequencies), 0.00003)
    return ret_val

  def calculate_entropies(self):
    self.entropies = {}
    for i, token in enumerate(self.tokens):
      self.entropies[token] = self.get_entropy_for(token)
      print_prob("Getting Entropy for token %s (%d / %d) = %f" % (token, i, len(self.tokens), self.entropies[token]), 0.03)
      
      
  def get_regularized_matrix(self):
    try:
      return self.regularized_matrix
    except AttributeError:
      self.get_tokens()
      print "Getting Regular Matrix"
      self.regularized_matrix = self.get_matrix()
      print "Getting entropies.."
      self.calculate_entropies()
      
      for i, token in enumerate(self.tokens):
        print_prob("Regularizing %s" % token, 0.003)
        for j in range(len(self.chapters)):
          entropy = self.entropies[token]
          if entropy != 0:
            self.regularized_matrix[i,j] = numpy.log(1+self.regularized_matrix[i,j])/entropy

      return self.regularized_matrix

