import nltk
import glob
from BeautifulSoup import BeautifulStoneSoup
import matplotlib

matplotlib.use('GTKCairo')

wnl = nltk.WordNetLemmatizer()

files = glob.glob('*.html')

for f in files:
	original = open(f,'r').read()
	ind = original.find("<table class='cable'>")
	if not ind==-1:
		sin_header = original[ind:]
		sin_html = nltk.clean_html(sin_header)
		
		sin_codigos_feos = BeautifulStoneSoup(sin_html, 
                   convertEntities=BeautifulStoneSoup.XML_SPECIAL_CHARS_TO_ENTITIES).contents[0]
		
		sin_codigos_feos = sin_codigos_feos.lower().replace('&#x000a;','\n')

		#tokens = nltk.wordpunct_tokenize( sin_codigos_feos)
		sentences = nltk.sent_tokenize(sin_codigos_feos)
		stem_words = []
		for s in sentences:
			tokens = nltk.wordpunct_tokenize(s)
			pos_tagged = nltk.pos_tag(tokens)
			for (w,p) in pos_tagged:
				if not p[0].lower()=='c' and not p[0].lower()=='p' and not p[0].lower()==':' and not p[0].lower()=='j' and not p[0].lower()=='t' and not p[0].lower()=='i' and not p[0].lower()=='d' and not p[0].lower()=='.' and not p[0].lower()=='m':
					stem_words.append(wnl.lemmatize(w,p[0].lower()))
				else:
					stem_words.append(wnl.lemmatize(w))
			print stem_words
		
		#freq = nltk.FreqDist(tokens)
		#freq.plot(50)
	
