# C:/Python27/TextResuming/depth_alogrithm.py
# This is depth_alogrithm file
# Author : @ismailsunni

import word_weighting as ww
import util as util
import text as text
import node_tree as nt
import preprocess as pp

def get_similarity_node_tree(node_tree_1, node_tree_2, list_term, dict_TF_IDF = {}, sim_type = 1):
		'''Return similarity of two node_tree.'''

		term_1 = set(node_tree_1.get_term(list_term))
		term_2 = set(node_tree_2.get_term(list_term))

		set_sim = ww.get_similarity_by_set(term_1, term_2)
		tf_idf_sim = ww.get_similarity_by_TF_IDF(term_1, term_2, dict_TF_IDF)
		
		if sim_type == 1:
			return set_sim
		elif sim_type == 2:
			return set_sim * tf_idf_sim
		else:
			return set_sim
		
def get_tree_segmentation(list_of_node, list_of_list_term, dict_TF_IDF, sim_type = 1):
	'''Get tree represent a segmentation'''

	# Variables
	# From argument
	list_index = nt.get_list_content(list_of_node)		# store list index
	list_term = []			# store term in index in each segment
	num_segment = len(list_index)

	# To be returned
	list_of_sim_value = []
	retval_list_of_node = []
	retval = {}

	if num_segment == 1:
		retval['max_sim_value'] = 0
		retval['list_of_node'] = list_of_node
		retval['list_term'] = list_term
		retval['list_of_sim_value'] = list_of_sim_value

		return retval

	# util.debug('assign list_term')
	# assign list_term
	for segm_index in list_index:
		segm_term = set()
		for index in segm_index:
			segm_term |= set(list_of_list_term[index])

		list_term.append(segm_term)

	# Get list similarity
	for i in xrange(0, num_segment - 1):
		set_sim = ww.get_similarity_by_set(list_term[i], list_term[i+1])
		tf_idf_sim = ww.get_similarity_by_TF_IDF(list_term[i], list_term[i+1], dict_TF_IDF)
		if sim_type == 1:
			sim_value = set_sim
		elif sim_type == 2:
			sim_value = set_sim * tf_idf_sim
		else:
			sim_value = set_sim
		list_of_sim_value.append(sim_value)

	max_sim_value = max(list_of_sim_value)
	index_max_sim = list_of_sim_value.index(max_sim_value)

	# Merge
	i = 0
	while i < num_segment:
		if i == index_max_sim:
			new_node_tree = list_of_node[i].merge_node(list_of_node[i+1])
			retval_list_of_node.append(new_node_tree)

		elif i == index_max_sim + 1:
			pass

		else:
			retval_list_of_node.append(list_of_node[i])
		i += 1

	retval['max_sim_value'] = max_sim_value
	retval['list_of_node'] = retval_list_of_node
	retval['list_term'] = list_term
	retval['list_of_sim_value'] = list_of_sim_value

	return retval

def get_segment_candidate(_node_tree, max_segment = 8, depth_threshold = 4):
	'''get segment candidate from node_tree'''
	
	retval = []
	if _node_tree.depth <= depth_threshold and _node_tree.get_size() <= max_segment:
		retval.append(_node_tree)

	else:
		left = _node_tree.left
		right = _node_tree.right
		retval.extend(get_segment_candidate(left, max_segment))
		retval.extend(get_segment_candidate(right, max_segment))

	return retval

def merge_segment_candidate(list_of_node, max_depth, max_segment = 8):
	'''Merge segment candidate according to size and max_segment.'''

	cur_num_segm = 0
	retval = []
	retval.append([list_of_node[0]])
	cur_num_segm = list_of_node[0].get_size()

	for i in xrange(0, len(list_of_node) - 1):
		if not (list_of_node[i].get_size() - list_of_node[i+1].get_size() > max_depth/5) and not (cur_num_segm + list_of_node[i+1].get_size() > max_segment):
			retval[-1].append(list_of_node[i+1])
			cur_num_segm += list_of_node[i+1].get_size() 
		else:
			retval.append([list_of_node[i+1]])
			cur_num_segm = list_of_node[i+1].get_size()
	
	return retval

def merge_alone_segment(list_of_node, list_term, dict_TF_IDF = {}):
	'''Merge alone segment to highest similarity neighbour.'''

	if len(list_of_node) <= 1:
		return list_of_node

	elif len(list_of_node) == 2 :
		if list_of_node[0].get_size() == 1 or list_of_node[1].get_size() == 1:
			return [list_of_node[0].merge_node(list_of_node[1])]
		else:
			return list_of_node
	else:
		retval = []
		prev_node = list_of_node[0]
		curr_node = list_of_node[1]
		next_node = list_of_node[2]
		next_idx = 2
		loop = True
		while loop:
			if curr_node.get_size() == 1:
				# compare
				prev_sim = get_similarity_node_tree(prev_node, curr_node, list_term, dict_TF_IDF)
				next_sim = get_similarity_node_tree(curr_node, next_node, list_term, dict_TF_IDF)
				# merge
				if prev_sim >= next_sim:
					if next_idx == len(list_of_node) - 1:
						new_node = prev_node.merge_node(curr_node)
						retval.append(new_node)
						retval.append(next_node)
						loop = False

					else:
						new_node = prev_node.merge_node(curr_node)
						prev_node = new_node
						curr_node = next_node
						next_idx += 1
						next_node = list_of_node[next_idx]
				else:
					if next_idx == len(list_of_node) - 1:
						retval.append(prev_node)
						new_node = curr_node.merge_node(next_node)
						retval.append(new_node)
						loop = False
					else:
						new_node = curr_node.merge_node(next_node)
						curr_node = new_node
						next_idx += 1
						next_node = list_of_node[next_idx]	
			else:
				
				if next_idx == len(list_of_node) - 1:
					retval.append(prev_node)
					retval.append(curr_node)
					retval.append(next_node)
					loop = False					
				else:
					retval.append(prev_node)
					prev_node = curr_node
					curr_node = next_node
					next_idx += 1
					next_node = list_of_node[next_idx]

		return retval

def get_final_segmentation(list_of_list_node):
	'''Return list of segment index each cluster.'''

	retval = []
	
	for list_of_node in list_of_list_node:
		final_segm = []
		for node in list_of_node:
			final_segm.extend(node.content)
		retval.append(final_segm)

	return retval

def convert_synonim_term(path_synonym_file, list_of_list_term):
	'''Convert word to its synonym.'''

	synonym_data = util.load_yaml_file(path_synonym_file)

	for synonym in synonym_data.keys():
		for list_term in list_of_list_term:
			for idx_term, term in enumerate(list_term):
				if term == synonym:
					list_term[idx_term] = synonym_data[synonym]

	return list_of_list_term

def remove_stop_word(list_of_list_term):
	'''Remove stop word.'''

	retval = []
	for list_term in list_of_list_term:
		new_list_term = pp.removeStopWord(list_term)
		retval.append(new_list_term)

	return retval

def main():
	import __main__
	util.debug(__main__.__file__)
	util.debug('main')

if __name__ == '__main__':
	main()
