'''
file_name: pos_tagger.py
initiator: Adit
description: POS tagger class for Bahasa Indonesia or Malay
'''

import nltk, os, pickle
from nltk.corpus.reader import TaggedCorpusReader, WordListCorpusReader
from nltk.tag import brill, DefaultTagger, RegexpTagger, AffixTagger, UnigramTagger, BigramTagger, TrigramTagger, TaggerI

''' default resource directory '''
resource_dir = ""
# resource_dir = "//192.168.10.250/share/postag/"

''' POS tagger class '''
class PosTagger(TaggerI):
	'''
	constructor
	parameters:
	- dir -> resource directory (default: resource_dir)
	- lang -> language to be processed (default: id)
	- corpus_path -> path to corpora folder (default: id/corpus/)
	- gazetteer_path -> path to gazetteers folder (default: id/gazetteer/)
	- dictionary_path -> path to dictionaries folder (default: id/dictionary/)
	- binary_path -> path to binaries folder (default: id/binary/)
	'''
	def __init__(self, dir = resource_dir, lang = "id", corpus_path = "corpus/", gazetteer_path = "gazetteer/", dictionary_path = "dictionary/", binary_path = "binary/"):
		''' corpus reader '''
		self.reader = TaggedCorpusReader(dir+lang+'/'+corpus_path, r".*\.txt")
		''' tagged sentences '''
		self.train_sents = self.reader.tagged_sents()
		''' gazetteers '''
		self.gazetteer = self.load_gazetteers(dir+lang+'/'+gazetteer_path)
		''' dictionaries '''
		self.dictionary = self.load_dictionary(dir+lang+'/'+dictionary_path)
		''' patterns for RegexpTagger '''
		self.word_patterns = [
			(r"^-?[0-9]+(.[0-9]+)?$", "CDP"),
			(r"^Rp-?[0-9]+(.[0-9]+)?", "CDP"),
			(r"\.\d+", "CDP"),
			(r"ke-\d+", "CDO")
		]
		''' default tag for unknown words '''
		self.default_tag = "NN"
		''' sequential backoff tagger chain '''
		self.sequential_backoff_tagger = self.sequential_backoff_chain()
		''' Brill transformation rules templates '''
		# self.brill_templates = [
			# brill.SymmetricProximateTokensTemplate(brill.ProximateTagsRule, (1,1)),
			# brill.SymmetricProximateTokensTemplate(brill.ProximateTagsRule, (2,2)),
			# brill.SymmetricProximateTokensTemplate(brill.ProximateTagsRule, (1,2)),
			# brill.SymmetricProximateTokensTemplate(brill.ProximateTagsRule, (1,3)),
			# brill.SymmetricProximateTokensTemplate(brill.ProximateWordsRule, (1,1)),
			# brill.SymmetricProximateTokensTemplate(brill.ProximateWordsRule, (2,2)),
			# brill.SymmetricProximateTokensTemplate(brill.ProximateWordsRule, (1,2)),
			# brill.SymmetricProximateTokensTemplate(brill.ProximateWordsRule, (1,3)),
			# brill.ProximateTokensTemplate(brill.ProximateTagsRule, (-1, -1), (1,1)),
			# brill.ProximateTokensTemplate(brill.ProximateWordsRule, (-1, -1), (1,1))
		# ]
		''' sequential backoff tagger chain with brill transformation rules '''
		# self.brill_tagger = self.brill_transformation_rules()
		''' dumps this object to binary pickle for faster loads '''
		pickle.dump(self, open(dir+lang+'/'+binary_path+"pos_tag_id.pickle", "wb"), -1)
	
	'''
	gazetteers loader
	parameters:
	- gazetteer_path -> path to gazetteers folder
	return:
	- dictionary of gazetteers
	'''
	def load_gazetteers(self, gazetteer_path):
		reader = WordListCorpusReader(gazetteer_path, r".*\.txt")
		gazetteer = nltk.defaultdict(str)
		for word in reader.words():
			word = word.strip()
			gazetteer[word] = "NNP"
		return gazetteer
	
	'''
	dictionaries loader
	parameters:
	- dictionary_path -> path to dictionaries folder
	return:
	- dictionary of dictionaries (lexicon)
	'''
	def load_dictionary(self, dictionary_path):
		reader = WordListCorpusReader(dictionary_path, r".*\.txt")
		dictionary = nltk.defaultdict(str)
		for entry in reader.words():
			entry = entry.strip()
			token = entry.split("\t")
			if len(token) == 2:
				word = token[0].strip()
				temp = token[1].split(' ')
				type = temp[0]
				dictionary[word] = type
		return dictionary
	
	'''
	utility method for creating a chain of backoff taggers
	parameters:
	- tagger_classes -> POS tagger classes
	- backoff -> backoff tagger (default: None)
	return:
	- backoff taggers chain
	'''
	def backoff_tagger(self, tagger_classes, backoff = None):
		if not backoff:
			backoff = tagger_classes[0](self.train_sents)
			del tagger_classes[0]
		for cls in tagger_classes:
			tagger = cls(self.train_sents, backoff = backoff)
			backoff = tagger
		return backoff
	
	'''
	method for creating a complete chain of backoff taggers
	return:
	- complete backoff taggers chain
	'''
	def sequential_backoff_chain(self):
		default_tagger = DefaultTagger(self.default_tag)
		regexp_tagger = RegexpTagger(self.word_patterns, backoff = default_tagger)
		affix_tagger = AffixTagger(self.train_sents, backoff = regexp_tagger)
		dictionary_tagger = UnigramTagger(model = self.dictionary, backoff = affix_tagger)#regexp_tagger)
		gazetteer_tagger = UnigramTagger(model = self.gazetteer, backoff = dictionary_tagger)
		
		return self.backoff_tagger([UnigramTagger, BigramTagger, TrigramTagger], backoff = gazetteer_tagger)
	
	'''
	method for adding Brill transformation rules to complete backoff taggers chain
	return:
	- complete backoff taggers chain with Brill transformation rules
	'''
	def brill_transformation_rules(self):
		trainer = brill.FastBrillTaggerTrainer(self.sequential_backoff_tagger, self.brill_templates)
		return trainer.train(self.train_sents, max_rules = 100, min_score = 3)
	
	'''
	method for tokenizing sentence (interface to NLTK)
	parameters:
	- sentence -> input sentence
	return:
	-  list of tokens
	'''
	def word_tokenize(self, sentence):
		sentence = sentence.replace('.', " .")
		sentence = sentence.replace('/', " / ")
		return nltk.word_tokenize(sentence)
	
	'''
	POS tagging method
	parameters:
	- words -> list of tokens
	return:
	- list of tuple of tagged tokens
	'''
	def tag(self, words):
		return self.sequential_backoff_tagger.tag(words)
		# return self.brill_tagger.tag(words)
	
	'''
	method for computing POS tagger accuracy on training/testing data (interface to NLTK)
	parameters:
	- test_sents -> tagged sentences
	return:
	- accuracy
	'''
	def accuracy(self, test_sents):
		return nltk.tag.accuracy(self.sequential_backoff_tagger, test_sents)

'''
CODES BEYOND THIS LINE IS FOR TESTING PURPOSES ONLY
'''
if __name__ == '__main__':
	def tag_sentence(sentence):
		tagger = PosTagger()
		words = tagger.word_tokenize(sentence)
		
		tagged = tagger.tag(words)
		
		print "SENTENCE TAGGING RESULT:"
		print tagged
	
	def for_qa(tagger, filename):
		lines = open(filename+".txt",'r').readlines()
		txt = open(filename+"_tag.txt",'w')
		# csv = open(filename+"_tag.csv",'w')
		
		for line in lines:
			tokens = tagger.word_tokenize(line.strip())
			tags = tagger.tag(tokens)
			for tag in tags:
				theword = tag[0]
				thetag = str(tag[1])
				txt.write(theword + '/' + thetag + ' ')
				if tag[0] == ',':
					theword = "<comma>"
				elif tag[0] == "'":
					theword = "<quote>"
				elif tag[0] == "\"":
					theword = "<double-quote>"
				elif ',' in tag[0]:
					theword = theword.replace(',', "<comma>")
				elif "'" in tag[0]:
					theword = theword.replace("'", "<quote>")
				if tag[1] == ',':
					thetag = "<comma>"
				elif tag[1] == "'":
					thetag = "<quote>"
				elif tag[1] == "\"":
					thetag = "<double-quote>"
				# csv.write(theword + ',' + thetag + "\n")
			txt.write("\n")
		txt.close()
		# csv.close()
	
	def for_qa_range(filename, range):
		tagger = PosTagger()
		i = 1
		while i <= range:
			for_qa(tagger, filename+str(i))
			i += 1
	
	def compare(manual, tagged, errors):
		# print "comparing "+tagged
		man_lines = open(manual+".txt").readlines()
		tag_lines = open(tagged+".txt").readlines()
		# errorlist = []
		i = 0
		match = 0
		count = 0
		while i < len(tag_lines):
			man_words = man_lines[i].strip().split(' ')
			tag_words = tag_lines[i].strip().split(' ')
			# print str(i) + " - " + str(len(man_words)) + ' ' + str(len(tag_words))
			j = 0
			while j < len(tag_words):
				# print man_words[j].split('/')
				man = man_words[j].split('/')[1].strip().upper()
				tag = tag_words[j].split('/')[1].strip().upper()
				if man == tag or (tag[:2] == "VB" and man[:2] == "VB"):
					match += 1
				# else:
					# print tag_words[j]+'\''
					# errorstring = man_words[j].split('/')[0].strip() + " : " + man_words[j].split('/')[1].strip() + " - " + tag_words[j].split('/')[1].strip() + "\n"
					# if errorstring not in errorlist:
						# errorlist.append(errorstring)
				count += 1
				j += 1
			i += 1
		# errors = open(errors+".txt", 'w')
		# errors.write("WORD : MAN - TAG\n")
		# for error in errorlist:
			# errors.write(error)
		# errors.close()
		return (float(match) / float(count)) * 100
	
	def compare_csv(manual, tagged):
		man_lines = open(manual+".csv").readlines()
		tag_lines = open(tagged+"_tag.csv").readlines()
		i = 0
		match = 0
		while i < len(tag_lines):
			man = man_lines[i].strip().split(',')[1].strip().upper()
			tag = tag_lines[i].strip().split(',')[1].strip().upper()
			
			if man == tag or man == '?' or (tag == "VB" and man[:2] == "VB"):
				match += 1
			i += 1
		return (float(match) / float(i)) * 100
	
	def compare_range(manual, tagged, score, start, range):
		log = open(score+".txt", 'w')
		i = start
		sum = 0
		while i <= range:
			accuracy = compare(manual+str(i), tagged+str(i))
			sum += accuracy
			log.write(str(accuracy)+"\n")
			i += 1
		log.write("AVG:\n")
		log.write(str(sum/float(range))+"\n")
		log.close()
	
	sentence = "Tutankhamun (firaun egalitarian) dinobatkan menjadi raja oleh SBY saat masih berumur sembilan tahun, namun pada usia 19 dia meninggal dunia di ruang 7602 di istananya."
	tag_sentence(sentence)
	
	sentence = "Hal itu semakin diperkuat dengan sasaran penembakan yang terjadi umumnya terhadap para pekerja pendatang kalangan etnis tertentu dari luar Aceh seperti warga asal pulau Jawa, sehingga mereka merasa ketakutan dan secara beramai-ramai nantinya akan meninggalkan Aceh."
	tag_sentence(sentence)
	
	# for_qa_range(resource_dir+"id/test/news/20111121/I/",12)
	# compare_range(resource_dir+"id/test/news/20120111/manual/", resource_dir+"id/test/news/20120111/", resource_dir+"id/test/score5", 1, 20)
	
	tagger = PosTagger()
	for_qa(tagger, resource_dir+"id/test/all/raw")
	print compare(resource_dir+"id/test/all/man", resource_dir+"id/test/all/raw_tag", resource_dir+"id/test/all/err")
	
	# FINAL AVERAGE ACCURACY: 95.08%
