#!/usr/bin/env python2.5

import re, string

def tokenize(s, type = 0):
	"""
		Tokenize a string.

		type == -1: Undoes standard text tokenization
		type == 0:  Standard text tokenization
		type == 1:  Tokenization for the tree grammar
		type == 2:  Tokenization for weighing the files for classification
		type == 3:  Tokenization for the CRF file
		type == 4:  Grammar (.cfg) file
	"""
	not_incl = "([^\w\d\s\.\-/,':;&\(\)])"

	if (type == -1):
		s = re.sub(" &ellie ", "...", s)
		s = re.sub(" - ", "-", s)
		s = re.sub(" -- ", " - ", s)
		s = re.sub(" -LRB- ", "(", s)
		s = re.sub(" -RRB- ", ")", s)
		s = re.sub(r"(\w+) ('\w\w?)", r"\1\2", s)
		s = re.sub(" / ", "/", s)
		s = re.sub(" &period ", ".", s)
		s = re.sub(" &comma", ",", s)
		s = re.sub(" &dollar ", " $", s)
		s = re.sub(" &colon ", ": ", s)
		s = re.sub(" &amp ", " & ", s)
		s = re.sub(" ; ", ";", s)
		s = re.sub(re.compile(not_incl + " "), r'\1', s)

	if (type == -99):	## It's faster to ignore it this way.
		s = re.sub(r"\.", " &period ", s)
		s = re.sub(r"\-+", "-", s)
		s = re.sub("-", " - ", s)
		s = re.sub(r"(\w+)'s", r"\1 's", s)
		s = re.sub("/", " / ", s)
		s = re.sub(",", " &comma ", s)
		s = re.sub(":", " &colon ", s)
		s = re.sub(r"\(", " -LRB- ", s)
		s = re.sub(r"\)", " -RRB- ", s)

	if (type == 1):
		s = re.sub(r"\.\)", "&period)", s)
		s = re.sub(r",\)", "&comma)", s)
		s = re.sub(r"\(\.", "(DOT", s)
		s = re.sub(r"\(,", "(COMMA", s)
		s = re.sub(r"\$,", "(DOLLAR", s)
		s = re.sub(r"\$\)", "&dollar)", s)
		s = re.sub(r":\)", "&colon)", s)
		s = re.sub(r"\(:", "(COLON", s)
	
	if ((type == 2) or (type == 0)):
		s = re.sub(r"&", " &amp ", s)
		s = re.sub(r"\.{3}", " &ellie ", s)
		s = re.sub(r"\.", " &period ", s)
		s = re.sub(r"\-+", "-", s)
		s = re.sub(" - ", " -- ", s)
		s = re.sub(r"(\S)-(\S)", r"\1 - \2", s)
		s = re.sub(r"(\w+)('\w?\w?)", r"\1 \2", s)
		s = re.sub("/", " / ", s)
		s = re.sub(", ", " &comma ", s)
		s = re.sub(r",(\S)", r" &comma \1", s)
		s = re.sub(r"\$", " &dollar ", s)
		s = re.sub(":", " &colon ", s)
		s = re.sub(";", " ; ", s)
		s = re.sub(r"\(", " -LRB- ", s)
		s = re.sub(r"\)", " -RRB- ", s)

		## Pad other punctuation with a subsequent spaces:
		s = re.sub(re.compile(not_incl), r'\1 ', s)

	if (type == 3):
		if ((s == "") or (re.match("%", s))):	return s
		s = re.sub(r"\(", " -LRB- ", s)
		s = re.sub(r"\)", " -RRB- ", s)
		s = re.sub(r"^\.{3} .*", "&ellie CRAFT", s)
		tt = s.split()
		## For tokens only:
		tt[0] = re.sub(r"^\s&\s", " &amp ", tt[0])
		tt[0] = re.sub(r"^\.", "&period", tt[0])
		tt[0] = re.sub("^,", "&comma", tt[0])
		tt[0] = re.sub("^:", "&colon", tt[0])
		tt[0] = re.sub(r"^\$", "&dollar", tt[0])
		## For tags only:
		tt[1] = re.sub(r"PRP\$$", "PRPS", tt[1])
		tt[1] = re.sub(r"WP\$$", "WPS", tt[1])
		tt[1] = re.sub(r"\.$", "DOT", tt[1])
		tt[1] = re.sub(",$", "COMMA", tt[1])
		tt[1] = re.sub(":$", "COLON", tt[1])
		tt[1] = re.sub(r" \$$", " DOLLAR ", tt[1])
		s = string.join(tt, " ")

	if (type == 4):
		if ((s == "") or (re.match("%", s))):	return s
		s = re.sub(r"\(", "-LRB-", s)
		s = re.sub(r"\)", "-RRB-", s)
		s = re.sub(r"& ", "&amp ", s)
		s = re.sub(r"PRP\$", "PRPS", s)
		s = re.sub(r"WP\$", "WPS", s)
		s = re.sub(r"\"\$\"", "\"&dollar\"", s)
		s = re.sub(r"\"\.\"", "\"&period\"", s)
		s = re.sub("\",\"", "&comma", s)
		s = re.sub("\":\"", "&colon", s)
		s = re.sub(r"\.", "DOT", s)
		s = re.sub(r"\$", "DOLLAR", s)
		s = re.sub(",", "COMMA", s)
		s = re.sub(":", "COLON", s)

	return (s)

## Tokenize an input file.
if (__name__ == "__main__"):
	import sys
	default = 0
	file = open(sys.argv[1])
	if (len(sys.argv) > 2):
		type = int(sys.argv[2])
	else:
		type = default
	for line in file:
		if (line == "\n"):	
			print
			continue
		line = line[:-1]
		# if (type == 0):	print "BEFORE:  " + line
		done = tokenize(line, type)
		print done
		if (type == 0):	
			undone = tokenize(done, -1)
			# print "AFTER:  " + undone
			if (undone != line):	print "-" * 20 + "They differ." + "-" * 20
		
	file.close()
