# -----makingClozeTest.py -----
# Description: make a cloze test
# written by Tomonori Nagano <tnagano@gc.cuny.edu> 
# Date: July 5, 2011

# Required: NTLK and stanford-postagger
# wget http://nlp.stanford.edu/software/stanford-postagger-full-2011-06-19.tgz
# mv stanford-postagger-full-2011-06-19.tgz ~/nltk_data/taggers/stanford-postagger-full-2011-06-19.tgz
# cd ~/nltk_data/taggers
# tar xvf stanford-postagger-full-2011-06-19.tgz

import nltk
from nltk.corpus import gutenberg, brown
from nltk.tag.simplify import simplify_wsj_tag
from nltk.corpus import PlaintextCorpusReader 
import os,sys,getopt,random,re
import tempfile, os.path, textwrap
import time, datetime
import string

from reportlab.platypus import *
from reportlab.platypus.doctemplate import NextPageTemplate 
from reportlab.platypus.flowables import PageBreak 
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.rl_config import defaultPageSize
from reportlab.lib.units import inch

from nltk.tag.stanford import StanfordTagger
from urllib import urlopen

styles = getSampleStyleSheet()
Elements=[]

def main():
	'''
	This script makes a cloze test for a given text.
	'''
 	try:
		# opts is a set of tuples of (flag, argument)
		opts, args = getopt.getopt(sys.argv[1:], "b:p:s:e:q:h", ["blank-index=","part-of-speech=","start=","end=","max-question=","help"])
	except getopt.GetoptError:		  
		# show usage if there is an argument error (like Unix)
		print ("for help use -h or --help\rpossible tags: 'N', 'DET', 'P', 'V', 'ADJ', 'PRO', 'CNJ', 'ADV', 'NP', 'VN', 'VD', 'VG', 'NUM', 'TO', 'WH', 'MOD', 'VBZ'")
		sys.exit(2)

	# defining the default values 
	blankIndex = 15; startIndex = 2; endIndex = 2
	partOfSpeech = ['N', 'NP', 'VN', 'VD', 'VG', 'VBZ']
	for opt, arg in opts:
		if opt in ("-h", "--help"):	  
			print (__doc__)				 
			sys.exit(0)
		elif opt in ("-b", "--blank-index"):
			blankIndex = arg
		elif opt in ("-p", "--part-of-speech"):
			partOfSpeech.extend(arg.split(","))
		elif opt in ("-s", "--start"):
			startIndex = arg
		elif opt in ("-e", "--end"):
			endIndex = arg
			
	orig_text=[]
	if args == []:
		orig_text = brown.words()[:30]
	else:
		for filename in args:
			if filename.startswith("http"):
				html = urlopen(filename).read()
				raw = nltk.clean_html(html).strip()
				root = make_testcorpus(ext=".txt",a=raw)
				newcorpus = PlaintextCorpusReader(root,['a.txt'])
				orig_text = newcorpus.sents()[50:70]
			else:
				raw = open(filename).read()
				root = make_testcorpus(ext=".txt",a=raw)
				newcorpus = PlaintextCorpusReader(root,['a.txt'])
				orig_text = newcorpus.sents()
	text,textOriginal,answerKeys,wordNum = process_text(blankIndex, partOfSpeech, startIndex, endIndex, orig_text)

	print_header(blankIndex, partOfSpeech, startIndex, endIndex, wordNum)
	print_text(text,textOriginal,answerKeys)

	pdfname = 'clozeText_' + time.strftime("%Y_%m_%d_%I_%M%p_%Z",time.gmtime()) + '.pdf'
	doc = SimpleDocTemplate(pdfname)
	doc.build(Elements)

def make_testcorpus(ext='', **fileids):
	root = tempfile.mkdtemp()
	for fileid, contents in fileids.items():
		fileid += ext
		f = open(os.path.join(root, fileid), 'w')
		f.write(textwrap.dedent(contents))
		f.close()
	return root

def del_testcorpus(root):
	for fileid in os.listdir(root):
		os.remove(os.path.join(root, fileid))
	os.rmdir(root)

def header(txt, style=styles["Heading2"], klass=Paragraph, sep=0.3):
    s = Spacer(0.2*inch, sep*inch)
    Elements.append(s)
    para = klass(txt, style)
    Elements.append(para)

def p(txt):
    return header(txt, style=styles["Normal"], sep=0.1)

def print_header(blankIndex, partOfSpeech, startIndex, endIndex, wordNum):
	header("Document Information",styles["Heading1"])
	p("Blank index = " + str(blankIndex)) 
	p("Part-of-speech:" + ", ".join(partOfSpeech))  
	p("Start index = " + str(startIndex)) 
	p("End index = " + str(endIndex)) 
	p("Number of words = " + str(wordNum)) 

def print_text(text,textOriginal,answerKeys):
	header("Answer Key",styles["Heading1"])
	answerKeysIndex = []
	for index, answer in enumerate(answerKeys):
		answerKeysIndex.append("["+str(index+1)+"] "+answer)
	p(", ".join(answerKeysIndex))
	Elements.append(FrameBreak())
	header("Hints",styles["Heading1"])
	random.shuffle(answerKeys)
	p(", ".join(answerKeys))
	Elements.append(FrameBreak())
	header("Text",styles["Heading1"])
	p(" ".join(text))
	Elements.append(FrameBreak())
	header("Original Text",styles["Heading1"])
	p(" ".join(textOriginal))

def process_text(blankIndex, partOfSpeech, startIndex, endIndex, orig_text):
	k=1; text = []; textOriginal = []; answerKeys = []; skipFlag = False
	tagger = StanfordTagger("/Users/tomonori/nltk_data/taggers/stanford-postagger-full-2011-06-19/models/bidirectional-distsim-wsj-0-18.tagger", path_to_jar="/Users/tomonori/nltk_data/taggers/stanford-postagger-full-2011-06-19/stanford-postagger.jar")
	tagged_text = []; wordNum=0
	for l,sent in enumerate(orig_text):
		tagged_text.append(tagger.tag(sent))
		print "sentence ",l," (", str(int(float(l)/len(orig_text)*100))+"% done)"
	for sent in tagged_text:
		for i,(word,tag) in enumerate(sent):
			wordNum = wordNum + 1
			if i < startIndex or i > len(tagged_text)-endIndex:
				textOriginal.append(word)
				text.append(word)
			else:
				textOriginal.append(word)
				if i%blankIndex == 0 or skipFlag==True:
					if re.match('[^A-Z1-9!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~]',word) and simplify_wsj_tag(tag) in partOfSpeech:
						text.append("["+str(k)+"]__________")
						answerKeys.append(word)
						k=k+1
						skipFlag = False
					else:
						text.append(word)
						skipFlag = True
				else:
					text.append(word)
	return(text,textOriginal,answerKeys,wordNum)
	
if __name__ == '__main__':
	main()

