import nltk
import glob
import re
import fnmatch
import os
import matplotlib

import sys
sys.path.append("..")
import tokenizer
from lemmatizer import Lemmatizer

from nltk.corpus import stopwords
from BeautifulSoup import BeautifulStoneSoup


matplotlib.use('GTKCairo')

wnl = nltk.WordNetLemmatizer()

def get_raw_text(filename):
  f = open(filename)
  original = f.read()
  f.close()

  ind = original.find("<table class='cable'>")
  if not ind==-1:
    sin_header = original[ind:]
    
    raw = nltk.clean_html(sin_header)
    
    raw = BeautifulStoneSoup(raw, convertEntities=BeautifulStoneSoup.XML_SPECIAL_CHARS_TO_ENTITIES).contents[0]
    raw = raw.lower()
    raw = raw.replace('&#x000a;','\n')
    return raw
  else:
    return ""

def tokenize(raw_text):
  #Primero hay que separar en sentencias
  tokens = [token for token in nltk.wordpunct_tokenize(raw_text) if re.match('\w+', token) and len(token) > 1]

  return tokens

def number_of_vowels(word):
    return len([l for l in word if l in "aeiou"])
"""
def get_interesting_tokens(tokens):
  ret = [token for token in tokens if number_of_vowels(token)>=3]
  return ret

def get_tokens_from_files(filenames):
  raw_texts = map(lambda fn: get_raw_text(fn), filenames)
  tokens = map(lambda text: tokenize(text), raw_texts)
  return reduce(lambda t1, t2: t1+t2, tokens)
"""
from nltk.corpus.reader.wordnet import POS_LIST
def pos2wn(pos):
    if pos == 'JJ':
        return 'a'
    elif pos[0].lower() in set(POS_LIST):
        return pos[0].lower()
    else:
        return 'n'

def lemmatize_sentence(sentence):
  stem_words = []
  tokens = nltk.wordpunct_tokenize(sentence)
  pos_tagged = nltk.pos_tag(tokens)
  for (w,p) in pos_tagged:
    stem_words.append(wnl.lemmatize(w,pos2wn(p)))
  return stem_words

def process_sentence(sentence, fdist, lemmatize):
  if lemmatize:
    lemmas = lemmatize_sentence(sentence)
  else:
    lemmas = nltk.wordpunct_tokenize(sentence)

  english_stopwords = stopwords.words('english')
  lemmas = filter(lambda x: (not x in english_stopwords) and len(x)> 1, lemmas)

  for token in lemmas:
    fdist.inc(token.lower())
  return lemmas

def parse_text(raw_text, fdist, lemmatize):
  
  if lemmatize:
    separator = Lemmatizer(raw_text)
  else:
    separator = Tokenizer(raw_text)

  words = separator.get_words()
  for token in words:
    fdist.inc(token.lower())

  return words

def parse_files(filenames, lemmatize=True):
  fdist = nltk.FreqDist()
  total_stems = []  
  for filename in filenames:
    total_stems+= parse_text(get_raw_text(filename), fdist, lemmatize=lemmatize)
    
  return fdist, total_stems

def get_fdist_and_stems():
  filenames = glob.glob('wikis/*.html')
  return parse_files(filenames)

def get_fdist_and_tokens():
  filenames = glob.glob('wikis/*.html')
  return parse_files(filenames, lemmatize=False)

  
def get_files_recursively_into(directory, pattern):
  matches = []
  for root, dirnames, filenames in os.walk(directory):
    for filename in fnmatch.filter(filenames, pattern):
      matches.append(os.path.join(root, filename))
  return matches

def draw_dispersion_plots(filenames):
  english_stopwords = stopwords.words('english')
  fdist = nltk.FreqDist()

  count = 0
  total_count = len(filenames)
  for filename in filenames:
    print("Parsing %d of %d files" %(count, total_count))
    raw_text = get_raw_text(filename)
    nltk_text = nltk.Text(raw_text)
    tokens = [word for word in tokenize(raw_text) if not word in english_stopwords]
    nltk_text.dispersion_plot(tokens[:5])
    

def main():
  filenames = glob.glob('wikis/*.html')
  fdist, stems = parse_files(filenames)
  #draw_dispersion_plots(filenames)    
  fdist.plot(50, cumulative=True)
  
if __name__ == '__main__':
  main()