######################################################################
#
#    Author :  Ao Zhang
#    E-mail :  nlp410.cn@gmail.com
#
#    Date   :  Mar. 24, 2012
#
#    Description :
#
######################################################################

import sys
from os import linesep
from os.path import join as path_join
from os.path import dirname

sys.path.append( path_join( dirname(__file__), '..' ) )
from niu_module import counter

class stemming:
	stemming_list         = {}
	stemmed               = {}
	word_translation_stem = {}
	total_dest_lang_stem  = {}

class generate_lexical_table:
	total_source_lang = {}
	total_dest_lang   = {}
	word_translation  = {}
	stem = stemming()

	def generate_lexical_table( self, 
			src_lang_file,
			dst_lang_file,
			alignment_file,
			lex_s2d_file,
			lex_d2s_file,
			file_encoding='utf8'):
		try:
			src_lang_ifs = open( src_lang_file, encoding = file_encoding )
			dst_lang_ifs = open( dst_lang_file, encoding = file_encoding )
			alignment_ifs = open( alignment_file, encoding = file_encoding )

			src_lang_lines  = src_lang_ifs.readlines()
			dst_lang_lines  = dst_lang_ifs.readlines()
			alignment_lines = alignment_ifs.readlines()

			src_lang_ifs.close()
			dst_lang_ifs.close()
			alignment_ifs.close()

			if not( len(src_lang_lines) == len(dst_lang_lines) == len(alignment_lines) ):
				print( 'the number of lines of these three are not equal' )
				return False

			count = counter.counter( 10000 )
			for index in range( len(src_lang_lines) ):
				'''process one of these sentences'''
				count.adder()

				src_lang  = src_lang_lines[index].strip().split( ' ' )
				dst_lang  = dst_lang_lines[index].strip().split( ' ' )
				aln_table = alignment_lines[index].strip().split( ' ' )

				if len(src_lang) == 0 or len(dst_lang) == 0 or len(aln_table) == 0:
					self.blank_line_warning( count.get_number() )
					continue

				src_lang_aln_count = [0] * len( src_lang )
				dst_lang_aln_count = [0] * len( dst_lang )

				for num_pair in aln_table:
					str_num_pair = num_pair.split( '-' )
					src_word_id = int(str_num_pair[0])
					dst_word_id = int(str_num_pair[1])

					if src_word_id >= len(src_lang) or dst_word_id >= len(dst_lang):
						self.out_bound_warning( count.get_number(),\
								len(src_lang), len(dst_lang), index )
						break

					src_lang_aln_count[ src_word_id ] += 1
					dst_lang_aln_count[ dst_word_id ] += 1
					self.words_count( src_lang[ src_word_id ], dst_lang[ dst_word_id ] )

				src_lang_index = 0
				for slac in src_lang_aln_count:
					if slac == 0:
						self.words_count( src_lang[ src_lang_index ], 'NULL' )
					src_lang_index += 1

				dst_lang_index = 0
				for dlac in dst_lang_aln_count:
					if dlac == 0:
						self.words_count( 'NULL', dst_lang[ dst_lang_index ] )
					dst_lang_index += 1

				count.print_info()

			#end: for index in range( ... )
			count.done()

			lex_s2d = []
			lex_d2s = []
			format_str = '%s %s %f' + linesep
			for s_word in self.word_translation:
				for d_word in self.word_translation[s_word]:
					lex_s2d.append( format_str % (d_word, s_word, \
							self.word_translation[s_word][d_word] / self.total_source_lang[ s_word ]) )
					lex_d2s.append( format_str % (s_word, d_word, \
							self.word_translation[s_word][d_word] / self.total_dest_lang[ d_word ]) )

			lex_s2d.sort()
			lex_d2s.sort()

			lex_s2d_ofs = open( lex_s2d_file, 'w', encoding = file_encoding )
			lex_d2s_ofs = open( lex_d2s_file, 'w', encoding = file_encoding )

			lex_s2d_ofs.writelines( lex_s2d )
			lex_d2s_ofs.writelines( lex_d2s )

			lex_s2d_ofs.close()
			lex_d2s_ofs.close()
		except IOError as ioe:
			print( ioe )
			sys.exit( 1 )
##		except Exception as ee:
##			print( ee.trace )
##			sys.exit( 1 )
	#end: generate_lexical_table

	def words_count( self, src_word, dst_word ):
		if src_word in self.total_source_lang:
			self.total_source_lang[ src_word ] += 1
		else:
			self.total_source_lang[ src_word ]  = 1

		if dst_word in self.total_dest_lang:
			self.total_dest_lang[ dst_word ] += 1
		else:
			self.total_dest_lang[ dst_word ]  = 1

		if src_word in self.word_translation:
			if dst_word in self.word_translation[ src_word ]:
				self.word_translation[src_word][dst_word] += 1
			else:
				self.word_translation[src_word][dst_word]  = 1
		else:
			self.word_translation[src_word] = {}
			self.word_translation[src_word][dst_word]  = 1

	def blank_line_warning( self, number ):
		print( '\nWARNING :')
		print( '        there is a blank line at %d.'%number )

	def out_bound_warning( self, line_num, src_len, dst_len, index ):
		print( '\nWARNING :' )
		print( '        Out of bound in %d line.'%line_num )
		print( '        source lang words: %d. current index: %d.'%(src_len, index) )
		print( '        dest lang words: %d. current index: %d.'%(dst_len, index) )

#end: class generate_lexical_table

if __name__ == '__main__':
	if len( sys.argv ) == 6:
		glt = generate_lexical_table()
		glt.generate_lexical_table( sys.argv[1],
				sys.argv[2],
				sys.argv[3],
				sys.argv[4],
				sys.argv[5],
				'utf8' )
	else:
		print( 'Usage :' )
		print( '%s src-file dest-file align-file s2d.out d2s.out encoding(default utf8)'%sys.argv[0] )
