# C:/Python27/TextResuming/main.py
# This is main file
# Author : @ismailsunni

from  __future__ import division
from core.pos_tagger import PosTagger
import core.text as text
import core.is_parser as is_parser
import core.util as util
import core.node_tree as nt
import core.depth_algorithm as da
import testing as coref
import yaml

parameters_path = "parameters.yaml"
path_synonym_file = "synonyms.yaml"
version = '0.1'

def load_parameters():
	'''Load parameters from parameters_path.'''
	
	try:
		f = open(parameters_path)
		parameters = yaml.load(f)
		f.close()
		return parameters
	except Exception, e:
		raise e
		return None

def preprocess_coref(format_start_input, format_end_input, start_idx = 1, end_idx = 10):
	'''preprocess using coreference.
		
		@parameters
		@format_start_input : prefix of file
		@format_end_input : suffix of file
		@start_idx : starting index
		@end_idx : last index.
		'''

	nomer = [i+start_idx for i in xrange(end_idx - start_idx + 1)]
	for no in nomer:
		coref.testSolve(format_start_input + str(no) + format_end_input)
	print "coreference succesfull"

def main_depth_algorithm(max_segment = 8, sim_type = 1, depth_threshold = 4, synonyms = 0, stopwords = 0, start_idx = 1, end_idx = 10, format_start_input = "", format_end_input = "", additional_format_ouput = "_out", lang = 'ENG'):
	'''main algorithm for segmentation.
		
		@parameters
		@max_segment : maximum segment in a cluster.
		@sim_type : similarity function used. 1 = intersect NP, 2 = intersect NP + Tf Idf
		@depth_threshold : limit for maximum depth of a node tree
		@synonyms : 1 = replace word with synonym, 0 = No
		@stopwords : 1 = remove stopwords, 0 = No
		@start_idx : starting index
		@end_idx : last index
		@format_start_input : prefix of file, path is included
		@format_end_input : suffix of file without extension
		@additional_format_ouput : additional word to file name in result
		@lang : ENG = English, INA = Indonesian
		'''

	nomer = [i+start_idx for i in xrange(end_idx - start_idx + 1)]
	
	fileInputs = []
	for no in nomer:
		fileInputs.append(format_start_input + str(no) + format_end_input)

	for fileInput in fileInputs:
		try:
			print 'preprocessing ' + fileInput
			# Read
			list_of_string = is_parser.load_index_news(fileInput+'.txt')

			# Create segmented_news
			segmented_news1 = text.create_segmented_news(list_of_string)	
			
			if lang == 'ENG':
				# Create Dict term TF IDF
				dict_Tf_Idf = segmented_news1.create_dict_term_TF_IDF()

				# Get list_of_list_term
				list_of_list_term = segmented_news1.get_base_list_term()
			
			elif lang == 'INA':
				list_tag = ['NP', 'NNP', 'NN', 'VB', 'FW']
				# Get list_of_list_term
				list_of_list_term = segmented_news1.get_list_of_list_word_INA_tagged(list_tag)
				
				# Create Dict term TF IDF
				dict_Tf_Idf = segmented_news1.get_dict_TF_IDF_word_INA_tagged(list_tag)
			
			else:
				print 'bad dum tss'
			
			# synonyms
			if synonyms != 0:
				list_of_list_term = da.convert_synonim_term(path_synonym_file, list_of_list_term)

			# stop word
			if stopwords != 0:
				list_of_list_term = da.remove_stop_word(list_of_list_term)

			# Get base list of node
			list_of_node = []
			for i in xrange(0,segmented_news1.get_num_segment()):
				new_node_tree = nt.node_tree(None, None, 0, [i])
				list_of_node.append(new_node_tree)
			
			# pre-merge node tree (0 and 1), (n-3, n-2 and n-1)
			if len(list_of_node) < 2:
				pass
			elif len(list_of_node) == 2:
				list_of_node = [list_of_node[0].merge_node(list_of_node[1])]
			elif len(list_of_node) == 3:
				list_of_node = [list_of_node[0].merge_node(list_of_node[1].merge_node(list_of_node[2]))]
			elif len(list_of_node) == 4:
				list_of_node = [list_of_node[0].merge_node(list_of_node[1].merge_node(list_of_node[2].merge_node(list_of_node[3])))]
			elif len(list_of_node) > 5:
				node_tree_first = list_of_node[0].merge_node(list_of_node[1])
				node_tree_last = list_of_node[len(list_of_node) - 3].merge_node(list_of_node[len(list_of_node) - 2].merge_node(list_of_node[len(list_of_node) - 1]))
				
				pre_merge_list_of_node = []
				pre_merge_list_of_node.append(node_tree_first)	
				
				for i in xrange(2, len(list_of_node) - 3):
					pre_merge_list_of_node.append(list_of_node[i])

				pre_merge_list_of_node.append(node_tree_last)
				list_of_node = pre_merge_list_of_node

			retval = da.get_tree_segmentation(list_of_node, list_of_list_term, dict_Tf_Idf)

			while (len(retval['list_of_node']) > 1) and (retval['max_sim_value'] > 0):
				retval = da.get_tree_segmentation(retval['list_of_node'], list_of_list_term, dict_Tf_Idf)
				if retval['max_sim_value'] > 0:
					list_of_node = retval['list_of_node']

			list_segm = []
			for _node_tree in list_of_node:
				new_segm = da.get_segment_candidate(_node_tree, max_segment)
				list_segm.append(new_segm)

			not_alone_list_segm = []
			for lst_sgm in list_segm:
				not_alone_list_segm.append(da.merge_alone_segment(lst_sgm, list_of_list_term, dict_Tf_Idf))	

			# merge
			retval = []
			for i in xrange(0, len(not_alone_list_segm)):
				retval.extend(da.merge_segment_candidate(not_alone_list_segm[i], list_of_node[i].depth, max_segment))
			
			final_segment = da.get_final_segmentation(retval)

			# Save			
			print 'saved output: ', segmented_news1.save_segmented(fileInput + additional_format_ouput + '.txt', final_segment)

		except Exception, e:
			util.debug('main12_depth_algorithm ' + str(e))

def compare_ms(start_idx = 1, end_idx = 10, auto_format_start = "", auto_format_end = "", manual_format_start = "", manual_format_end = ""):
	'''Compare main sentence manual result and auto result.
	
		@parameters
		@start_idx : starting index
		@end_idx : last index
		@auto_format_start : prefix for auto result
		@auto_format_end : suffix for auto result
		@manual_format_start : prefix for manual result
		@manual_format_end : suffix for manual result
		'''

	util.debug('compare main sentence', 0)
	
	result = [] # (list_ms_auto, list_ms_maual, akurasi)
	
	nomer = [i+start_idx for i in xrange(end_idx - start_idx + 1)]
	
	for i in nomer:
		file_auto = auto_format_start + str(i) + auto_format_end + '.txt'
		file_manual = manual_format_start + str(i) + manual_format_end + '.txt'
		
		list_ms_auto = is_parser.get_list_ms(file_auto)
		list_ms_manual = is_parser.get_list_ms(file_manual)
		mutual_ms = len(set(list_ms_auto) & set(list_ms_manual))
		
		if len(list_ms_manual) == 0:
			temp = 0
		else:
			temp = mutual_ms/len(list_ms_manual)
		result.append((list_ms_auto, list_ms_manual, mutual_ms, temp))
	
	return result

def compare_first_idx(start_idx = 1, end_idx = 10, auto_format_start = "", auto_format_end = "", manual_format_start = "", manual_format_end = ""):
	'''Compare first index manual result and auto result.
	
		@parameters
		@start_idx : starting index
		@end_idx : last index
		@auto_format_start : prefix for auto result
		@auto_format_end : suffix for auto result
		@manual_format_start : prefix for manual result
		@manual_format_end : suffix for manual result
		'''

	util.debug('compare_result', 0)	
	
	result = [] # (list_1st_idx_auto, list_1st_idx_auto, akurasi)
	
	nomer = [i+start_idx for i in xrange(end_idx - start_idx + 1)]
	
	for i in nomer:
		file_auto = auto_format_start + str(i) + auto_format_end + '.txt'
		file_manual = manual_format_start + str(i) + manual_format_end + '.txt'
		list_1st_idx_auto = is_parser.get_first_segment_idx(file_auto)
		list_1st_idx_manual = is_parser.get_first_segment_idx(file_manual)
		mutual_ms = len(set(list_1st_idx_auto) & set(list_1st_idx_manual))
		
		if len(list_1st_idx_manual) == 0:
			temp = 0
		else:
			temp = mutual_ms/len(list_1st_idx_manual)
		
		result.append((list_1st_idx_auto, list_1st_idx_manual, mutual_ms, temp))
	
	return result
	
def get_list_link():
	'''a little script for getting retrieve link or title. Please edit in "edit here" part'''
	
	list_link = []
	nomer = [i+1 for i in xrange(120)]
	for i in nomer:
		news = is_parser.load_index_news('test/Indonesia/1-120 Manual (ID)/' + str(i)+'_ID_Seg_Manual.txt') #edit here
		list_link.append(news[1])	#0 for retrieving title, 1 for retrieving link, edit here
	util.print_index_list_dict(list_link)
	util.write_list('link_ina.txt', list_link, False)

def main_compare(compare_type = 1, start_idx = 1, end_idx = 10, auto_format_start = "", auto_format_end = "", manual_format_start = "", manual_format_end = ""):
	'''Compare first index manual result and auto result.
	
		@parameters
		@compare_type : 1 = compara main sentence, 2= compare 1st idx
		@start_idx : starting index
		@end_idx : last index
		@auto_format_start : prefix for auto result
		@auto_format_end : suffix for auto result
		@manual_format_start : prefix for manual result
		@manual_format_end : suffix for manual result
		'''
		
	if compare_type == 1:
		result = compare_ms(start_idx, end_idx, auto_format_start, auto_format_end, manual_format_start, manual_format_end)
		f = open('result compare main sentence.txt', 'w')
	elif compare_type == 2:
		f = open('result compare segment 1st idx.txt', 'w')
		result = compare_first_idx(start_idx, end_idx, auto_format_start, auto_format_end, manual_format_start, manual_format_end)
	else:
		return None
	
	# save result
	f.write('Hasil\n')
	f.write('No \t Auto \t Manual \t Akurasi\n')
	i = 0
	sum = 0
	
	for res in result:
		i += 1
		f.write(str(i) + '\t' + str(res[0]) + '\t' + str(res[1]) + '\t' + str(res[2]) + '/' + str(len(res[1])) + '=' + str(res[3]) + '\n')
		sum += res[3]
	
	f.write('\nRata-rata = ' + str(sum/i))
		
	f.close()
	
	print 'Rata-rata = ' + str(sum/i)
	
def main():
	'''Main function'''
	
	print 'Text Resuming'
	print 'Pilih 1 untuk preprocess dengan coref'
	print 'Pilih 2 untuk melakukan segmentasi dan mendapatkan akurasi'
	print 'Pilih 3 untuk melakukan perbandingan'
	input_variable = str(input ("Masukan pilihan: "))

	parameters = load_parameters()
	try:
		params = []
		for item in parameters[:7]:
			params.append(int(item))
		for item in parameters[7:14]:
			params.append(item)
		for item in parameters[14:16]:
			params.append(int(item))
		for item in parameters[16:18]:
			params.append(item)
		for item in parameters[18:20]:
			params.append(int(item))
		for item in parameters[20:]:
			params.append(item)
	except Exception, e:
		raise e
		util.debug(str(e))

	if input_variable == '1':
		print 'preprocessing...'
		preprocess_coref(params[16], params[17], params[18], params[19])
		print 'finished'
	elif input_variable == '2':
		print 'segmentating...'
		main_depth_algorithm(params[0], params[1], params[2], params[3], params[4], params[5], params[6], params[7], params[8], params[9], params[20])
		print 'finished'
	elif input_variable == '3':
		print 'comparing...'
		main_compare(1, params[14], params[15], params[10], params[11], params[12], params[13])
		main_compare(2, params[14], params[15], params[10], params[11], params[12], params[13])
		print 'finished'
	else:
		print 'input salah, exit program'


if __name__ == '__main__':
	main()
	#get_list_link()