import re
import sys

class Indexer(object):
  def __init__(self, file_to_index):

    # Chapter unigram and bigram frequency dictionary.
    self.chapter_index = {}

    # Regex for chapter match.
    chapter_matcher = re.compile('^Chapter [0-9]{1,2}$')
    
    self.total_words = 0
    self.chapter_word_count = {}

    self.chapter_like = {}

    current_chapter = None
    current_chapter_data = ''

    # Read the file line by line and check if we have reached
    # chapter boundaries.
    in_file = open(file_to_index)
    for line in in_file:
      line = line.strip()
      if chapter_matcher.match(line):
        if len(current_chapter_data) > 0:
          (num_words, index) = self.build_index(current_chapter_data)
          self.chapter_index[current_chapter] = index
          self.chapter_word_count[current_chapter] = num_words
          # We add the number of unique words once more. This will take care of
          # having non zero probability for missing words. Laplace estimator
          self.total_words += num_words + len(index.keys())
        current_chapter_data = ''
        current_chapter = line
      else:
        current_chapter_data += ' ' + line

    if len(current_chapter_data) > 0:
      (num_words, index) = self.build_index(current_chapter_data)
      self.chapter_index[current_chapter] = index
      self.chapter_word_count[current_chapter] = num_words + len(index.keys())
      # We add the number of unique words once more. This will take care of
      # having non zero probability for missing words. Laplace estimator
      self.total_words += num_words + len(index.keys())

    in_file.close()

    # Build the chapter-like index 
    for chapter in self.chapter_word_count.keys():
      self.chapter_like[chapter] = 1
    
    self.total_likes = len(self.chapter_word_count.keys())

  # Returns a dictionary of unigram and bigram frequency for a given text.
  def build_index(self, text):
    index = {}

    # Unigram frequencies.
    words = map(lambda x: x.lower(), text.strip().split(' ')) # strip() to remove the leading space.
    self.update_index(words, index)
    
    # TODO(rohith): Use bigrams frequency if required.
    # Bigram frequencies.
    #bigrams = [' '.join(words[i:i+2]) for i in range(0, len(words) - 1)]
    #bigrams = [bigrams[i].lower() for i in range(0, len(bigrams))]
    #self.update_index(bigrams, index)
    return (len(words), index)


  # Utility function to update the index with the set of words.
  def update_index(self, words, index):
    for word in words:
      if word not in index:
        index[word.lower()] = 0
      index[word] += 1

  def add_like(self, chapter, word, like_value):
    """Adds a like by modifying the chapter like."""
    print 'Add like - %s Term: "%s" Value:%f' % (chapter, word, like_value)
    # Add a like if the user likes the chapter word pair.
    self.chapter_like[chapter] += like_value
    self.total_likes += like_value
    
    # TODO(varun): How do we incrememt the word count? by 1 or by like_value
    chapter_index = self.chapter_index[chapter]
    if word in chapter_index:
      chapter_index[word] += 1
      self.chapter_word_count[chapter] += 1

  def get_chapter_like_probability(self, chapter):
    num_likes = self.chapter_like[chapter]
    return float(num_likes) / self.total_likes

  # Return a sorted list of chapters in accordance with the
  # retrieval probability.
  def retrieve_chapters(self, search_term):
    search_term = search_term.lower()
    chapters = self.chapter_index.keys()
    output = []
    for chapter in chapters:
      # Compute chapter probabilities.
      chapter_word_count = self.chapter_word_count[chapter]
      chapter_prob = self.get_chapter_like_probability(chapter)

      # Probability of search term given word.
      current_prob = 0.0
      chapter_index = self.chapter_index[chapter]
      num_words = len(chapter_index.keys())
      for word in chapter_index:
        search_term_given_word = float(0.001) / (num_words - 1) 
        if search_term == word:
          search_term_given_word = 0.999

        word_chapter_freq = chapter_index[word]
        word_given_chapter = float(word_chapter_freq) / float(chapter_word_count)

        current_prob += search_term_given_word * word_given_chapter 

      # Computing P(chapter) * Sum over words(P(search_term/word) * P(word/chapter))
      current_prob = chapter_prob * current_prob

      output.append((chapter, current_prob))

    return sorted(output, key = lambda value : value[1], reverse = True)

def update_likes(like_results, results, indexer, term):
  """Update the likes."""
  if like_results:
    like_value = round(1.0 / len(like_results), 2)
    print
    for result_num in like_results:
      indexer.add_like(results[int(result_num) - 1][0], term, like_value)
    print

def main():
  indexer = Indexer(sys.argv[1])
  while True:
    term = raw_input('Term: ')
    results = indexer.retrieve_chapters(term)
    count = 1
    result_count = 5
    for result in results[0:result_count]:
      print '%d\t%s\n' % (count, result)
      count += 1
    
    print '================'
    response = raw_input('Like? (y/n): ')
    like_results = []
    while response == 'y':
      user_input = raw_input('<res-num>: ')
      result_num = int(user_input)
      # Validation of the user input.
      if result_num > 0 and result_num <= result_count:
        like_results.append(result_num)

      response = raw_input('Like? (y/n): ')

    update_likes(like_results, results, indexer, term)
   
    
#if __name__ == "__main__":
#  main()
