import nltk
from nltk_contrib.readability.readabilitytests import *
import pickle
from sax import *

all_books = open('./full_articles.txt').readlines()
smog_indices = []
FleschReadingEase = []
FleschKincaidGradeLevel = []
GunningFogIndex = []
ColemanLiauIndex = []
read_tool = ReadabilityTool()
for book in all_books:
  print 'book: %s' % book
  book_data = open(book.strip(), 'r').read()
  tokens = nltk.word_tokenize(book_data)
  ngram_final_index = len(tokens) - 100
  for i in range(0, ngram_final_index+1, 100):
    txt = ' '.join(tokens[i:i+100])
    smog_indices.append(read_tool.SMOGIndex(txt))
    FleschReadingEase.append(read_tool.FleschReadingEase(txt))
    FleschKincaidGradeLevel.append(read_tool.FleschKincaidGradeLevel(txt))
    GunningFogIndex.append(read_tool.GunningFogIndex(txt))
    ColemanLiauIndex.append(read_tool.ColemanLiauIndex(txt))

sax_data = SaxData(smog_indices, 10, 5)
pickle.dump(sax_data, open('./smog.p', 'w'))

sax_data = SaxData(FleschReadingEase, 10, 5)
pickle.dump(sax_data, open('./FleschReadingEase.p', 'w'))

sax_data = SaxData(FleschKincaidGradeLevel, 10, 5)
pickle.dump(sax_data, open('./FleschKincaidGradeLevel.p', 'w'))

sax_data = SaxData(GunningFogIndex, 10, 5)
pickle.dump(sax_data, open('./GunningFogIndex.p', 'w'))

sax_data = SaxData(ColemanLiauIndex, 10, 5)
pickle.dump(sax_data, open('./ColemanLiauIndex.p', 'w'))
#print sax_data
 
