import nltk
import sys
import multiprocessing
sys.path.append("..")
from tokenizer import Tokenizer
import math

class ProcessToken:
  def __init__(self, tokens, fdist, lock, lock2, frequency, conjunct_frequency, words):
    self.tokens = tokens
    self.fdist = fdist
    self.lock = lock
    self.lock2 = lock2
    self.conjunct_frequency = conjunct_frequency
    self.frequency = frequency
    self.words = words

  def __call__(self, token_pair):
    index = token_pair[0]
    token = token_pair[1]
    print index
    if token in self.words:
      self.lock.acquire()
      self.frequency[token] = self.frequency[token] + 1
      self.lock.release()
      for token2 in self.tokens[index+1:index+6]:
          if token2 in self.words:
            self.lock2.acquire()
            
            if not ((token, token2) in self.conjunct_frequency.keys()):
              self.conjunct_frequency[(token, token2)] = 0
            self.conjunct_frequency[(token, token2)]+=1
            
            self.lock2.release()
    return token_pair

class ProcessTokenNonParallel:
  def __init__(self, tokens, fdist, lock, lock2, frequency, conjunct_frequency, words):
    self.tokens = tokens
    self.fdist = fdist
    self.lock = lock
    self.lock2 = lock2
    self.conjunct_frequency = conjunct_frequency
    self.frequency = frequency
    self.words = words

  def __call__(self, token_pair):
    index = token_pair[0]
    token = token_pair[1]
    if token in self.words:
      self.frequency[token] = self.frequency[token] + 1
      for token2 in self.tokens[index+1:index+6]:
          if token2 in self.words:
            if not ((token, token2) in self.conjunct_frequency.keys()):
              self.conjunct_frequency[(token, token2)] = 0
            self.conjunct_frequency[(token, token2)]+=1
    return token_pair




class AssociationMapCalculator:
  def __init__(self, raw_news, stopwords):
    self.raw_news = raw_news
    self.stopwords = stopwords

  def tokenize_and_get_fdist(self): 
    self.fdist = nltk.FreqDist()
    self.tokens = []
    
    for key, raw_text in self.raw_news.items():
      print("Tokenizing %s" % key)
      tokens = Tokenizer(raw_text).get_words()
      self.tokens += tokens
      for token in tokens:
        if not token in self.stopwords: self.fdist.inc(token)

  def get_conjunct_frequency(self, word_set):
    print "Getting conjunct freq"
    self.conjunct_frequency = {}
    self.frequency = {}    
    self.words = word_set

    for word in self.words: 
      self.frequency[word] = 0
    
    for index, token in enumerate(self.tokens):
      if token in word_set:
        self.frequency[token] = self.frequency[token] + 1
        for token2 in self.tokens[index+1:index+6]:
            if token2 in self.words:
              if not ((token, token2) in self.conjunct_frequency.keys()):
                self.conjunct_frequency[(token, token2)] = 0
              self.conjunct_frequency[(token, token2)]+=1

    return self.conjunct_frequency

  def get_association_map(self, words):
    self.get_conjunct_frequency(words)
    
    print "Calculing association"
    corpus_size = len(self.tokens)
    self.assoc_map = {}
    for (w,freq) in self.conjunct_frequency.items():
      w1 = w[0]
      w2 = w[1]
      if freq > 6:
        denom = float(freq) * corpus_size 
        quot =  self.frequency[w1] * self.frequency[w2]
        self.assoc_map[(w1,w2)] = math.log(denom / quot) / math.log(2)
    
    return self.assoc_map