class Tokenizer
  def __init__(self, text):
    self.text = text
    self.tokens = []
    self.fdist = 0
  def tokenize(raw_text):
  #Primero hay que separar en sentencias
    return [lower(token) for token in nltk.wordpunct_tokenize(raw_text) if re.match('\w+', token) and len(token) > 1]
  
  def get_tokens(self):
    if len(self.tokens) == 0:
      self.tokens = tokenize(raw_text)
    return self.tokens

  def get_fdist(self)
    if self.fdist != 0
      self.get_tokens()
      self.fdist = nltk.FreqDist(self.tokens)
    return self.fdist
 