import nltk
from nltk.corpus import brown
from readability_tool import *
import pickle
from sax import *

# Interface for feature generators. This interface
# has to be implemented by the special generators.
# This is the expectation from the FeatureManager.
class FeatureGenerator:
  def __init__(self, name):
    self.name = name

  def generate(self, text):
    raise NotImplemented


# Specialized FeatureGenerators
#
# Baseline feature:
# Top 200 stemmed discretized trigram feature generator
class StemmedTrigramGenerator(FeatureGenerator):
  def __init__(self, n = 3):
    FeatureGenerator.__init__(self, "StemmedTrigramGenerator")
    self.n = n
    self.stemmer = nltk.PorterStemmer() 

  def generate(self, text):
    # Segment the text into words.
    tokens = nltk.word_tokenize(text)

    # Stem the words.
    stemmed_tokens = [self.stemmer.stem(token).lower() for token in tokens]

    # Create ngrams.
    ngram_final_index = len(stemmed_tokens) - self.n
    stemmed_ngrams = [' '.join(stemmed_tokens[i:i+self.n]) for i in range(0, ngram_final_index+1)]

    # Calculate the frequency of ngrams.
    fdist = nltk.FreqDist(stemmed_ngrams)

    tuple = {}

    sorted_frequency_list = fdist.values()
    sflist_len = len(sorted_frequency_list)
    if sflist_len <= 0:
      return tuple
    median = sorted_frequency_list[len(sorted_frequency_list)/2]

    # Keep top 200 ngrams from the text.
    for ngram in fdist.keys()[0:100]:
      ngram_freq = fdist[ngram]
      tuple['top_200_ngrams_%s' % ngram] = int(float(ngram_freq)*10/float(ngram_freq + median + 0.00000001))

    # Add the tuple to the tuple_set and associate the passed label
    return tuple

class PosNTagGenerator(FeatureGenerator):
  def __init__(self, n = 3):
    FeatureGenerator.__init__(self, 'PosNTagGenerator')
    self.n = n
    self.tagger = nltk.UnigramTagger(brown.tagged_sents())

  def generate(self, text):
    tokens = nltk.word_tokenize(text)

    tagged_tokens = self.tagger.tag(tokens)

    # Consider only the tags.
    # TODO(Goutham) : None returned for certain POS.
    # Currently treating None as a pos tag.
    tags = [str(tag) for word, tag in tagged_tokens]

    ntag_final_index = len(tags) - self.n
    ntags = [' '.join(tags[i:i+self.n]) for i in range(0, ntag_final_index+1)]

    # Calculate frequency of ntags.
    fdist = nltk.FreqDist(ntags)

    sorted_frequency_list = fdist.values()
    ntag_tuple = {}
    if len(sorted_frequency_list) <= 0:
      return ntag_tuple

    median = sorted_frequency_list[len(sorted_frequency_list)/2]

    for ntag in fdist.keys()[0:100]:
      ntag_freq = fdist[ntag]
      ntag_tuple['top_200_ntags_%s' % ntag] = int(float(ntag_freq)*10/float(ntag_freq + median + 0.00000001))

    return ntag_tuple

class StopWordsFrequencyGenerator(FeatureGenerator):
  def __init__(self):
    self.stopwords_list = nltk.corpus.stopwords.words('english')


  def generate(self, text):
    smap = {}
    tokens = nltk.word_tokenize(text)
    total_words = 1.0  # Just so that it doesnt become 0 by any chance
    maximum = 1
    num_stopwords = 0.0
    for token in tokens:
      total_words = total_words + 1
      if token in self.stopwords_list:
        num_stopwords = num_stopwords + 1
        if not smap.has_key(token):
          smap[token] = 1
        else:
          smap[token] = smap[token] + 1
        if smap[token] > maximum:
          maximum = smap[token]
    smap['author_stopwords_percent'] = int(num_stopwords * 10 / total_words)
    l = []
    for k, v in smap.iteritems():
      l.append(v)
    smap['author_stopwords_percent'] = int(num_stopwords * 10 / total_words)
    l_len = len(l)
    if l_len <= 0:
      return smap
    median = sorted(l)[len(l)/2]
    for k, v in smap.iteritems():
      smap[k] = int(v*10/float(v + median + 0.00000001))
    return smap

class TfIafGenerator(FeatureGenerator):
  def __init__(self):
    self.n = 3  # trigrams always.
    self.stemmer = nltk.PorterStemmer()
    self.author_list = [
      'Andrew.Lang.author.dir',
      'Charles.Kingsley.author.dir',
      'Charlotte.Mary.Yonge.author.dir',
      'G.K.Chesterton.author.dir',
      'H.G.Wells.author.dir',
      'Jacob.Abbott.author.dir',
      'John.Morley.author.dir',
      'John.Ruskin.author.dir',
      'R.M.Ballantyne.author.dir',
      'Robert.Louis.Stevenson.author.dir',
      'Samuel.Vaknin.author.dir',
      'Thomas.Carlyle.author.dir',
      'Thomas.Henry.Huxley.author.dir',
      'William.Dean.Howells.author.dir',
      'William.Henry.Giles.Kingston.author.dir'
    ]

    self.tf_iaf_map = pickle.load(open('tfiafs.p', 'r'))

  def generate(self, text):
    # Segment the text into words.
    tokens = nltk.word_tokenize(text)

    # Stem the words.
    stemmed_tokens = [self.stemmer.stem(token).lower() for token in tokens]

    # Create ngrams.
    ngram_final_index = len(stemmed_tokens) - self.n
    stemmed_ngrams = [' '.join(stemmed_tokens[i:i+self.n]) for i in range(0, ngram_final_index+1)]

    temp = {}
    for author_ngram, tf_iaf in self.tf_iaf_map.iteritems():
      temp[author_ngram] = 0
    
    for ngram in stemmed_ngrams:
      for author in self.author_list:
        author_ngram = author + ' ' + ngram
        if temp.has_key(author_ngram):
          temp[author_ngram] = self.tf_iaf_map[author_ngram]
    return temp


# Reading indices
# Sax discretized scores for various indices.  
# GenericReadingIndexGenerator
class GenericReadingIndexGenerator(FeatureGenerator):
  def __init__(self):
    FeatureGenerator.__init__(self, 'GenericReadingIndexGenerator');
    self.rtool = ReadabilityTool()
    self.saxer = {}
    self.saxer['ari'] = Saxer('ARIIndex.sax.p')
    self.saxer['smog'] = Saxer('SmogIndex.sax.p')
    self.saxer['fre'] = Saxer('FleschReadingEaseIndex.sax.p')
    self.saxer['fkgl'] = Saxer('FleschKincaidGradeLevelIndex.sax.p')
    self.saxer['gfi'] = Saxer('GunningFogIndex.sax.p')
    self.saxer['cli'] = Saxer('ColemanLiauIndex.sax.p')

  def generate(self, text):
    self.rtool.set_text(text)
    r_index = {}
    r_index['ari'] = self.saxer['ari'].saxify_value(self.rtool.ARI(text))
    r_index['smog'] = self.saxer['smog'].saxify_value(self.rtool.SMOGIndex(text))
    r_index['fre'] = self.saxer['fre'].saxify_value(self.rtool.FleschReadingEase(text))
    r_index['fkgl'] = self.saxer['fkgl'].saxify_value(self.rtool.FleschKincaidGradeLevel(text))
    r_index['gfi'] = self.saxer['gfi'].saxify_value(self.rtool.GunningFogIndex(text))
    r_index['cli'] = self.saxer['cli'].saxify_value(self.rtool.ColemanLiauIndex(text))
    return r_index
