from BeautifulSoup import BeautifulStoneSoup
import nltk
import re

def get_raw_text(filename):
  f = open(filename)
  original = f.read()
  f.close()
  
  raw = nltk.clean_html(original)
  
  raw = BeautifulStoneSoup(raw, convertEntities=BeautifulStoneSoup.XML_SPECIAL_CHARS_TO_ENTITIES).contents[0]
  #raw = raw.lower()
  raw = raw.replace('&#x000a;','\n')
  return raw

def tokenizar(raw_text):
  spanish_stopwords = [] #stopwords.words('spanish')
  sentences = nltk.sent_tokenize(raw_text)  
  tokens = [token for sentence in sentences for token in nltk.wordpunct_tokenize(raw_text) if re.match('\w+', token) and (not token in spanish_stopwords)]

  return tokens


def tokenize(raw_text):
  #Primero hay que separar en sentencias
  sentences = nltk.sent_tokenize(raw_text)  
  tokens = [token for sentence in sentences for token in nltk.wordpunct_tokenize(raw_text) if re.match('\w+', token)]

  return tokens

def main():
  raw_text = get_raw_text("pepe.txt")
  text = nltk.Text(raw_text)
  print text.generate(100)


  
if __name__ == '__main__':
  main()