'''
# Reponsible to reading in timeseries files from each fabric, and plots them and also exports them as pgfplots
'''
import os, sys
import matplotlib.pyplot as plt
import numpy as np

sys.path.append("../")

def normalized_cdf(array_of_values, nbins):
	hist, bin_edges = np.histogram(array_of_values, bins=nbins, density=True)
	return (bin_edges, np.cumsum(hist))

def transform_file_to_normal(filename, xbins):
	print xbins
	with open(filename, "r") as f:
		first_line = True
		row_name_to_index = {}
		index_to_row_name = {}
		read_data = {}
		total_lines = 0
		for line in f:
			parsed_line = line.split(' ')
			if first_line:
				first_line = False
				for i in range(len(parsed_line)):
					if parsed_line[i] == '\n':
						break
					else:
						row_name_to_index[parsed_line[i]] = i
						index_to_row_name[i] = parsed_line[i]
						read_data[parsed_line[i]] = []
			else:
				total_lines += 1
				
				for i in range(len(parsed_line)):
					if parsed_line[i] == '\n':
						break
					else:
						if i == 0:
							read_data[index_to_row_name[i]].append(int(parsed_line[i]))
						else:
							if "stretch" in index_to_row_name[i]:
								stretch = float(parsed_line[i])
								stretch = ((stretch - 5.) / 3.) + 1.
								read_data[index_to_row_name[i]].append(stretch)
							else:
								read_data[index_to_row_name[i]].append(float(parsed_line[i]))
		print read_data.keys()
		sample_entries = []
		print("relevant vector : {}".format(read_data[index_to_row_name[0]]))
		for i in range(total_lines):
			if read_data[index_to_row_name[0]][i] in xbins:
				print i
				sample_entries.append(i)
		str_builder = ""
		for i in range(len(row_name_to_index.keys())):
			row_name = index_to_row_name[i]
			str_builder += "{} ".format(row_name)
		str_builder += "\n"
		for entry in sample_entries:
			for j in range(len(row_name_to_index.keys())):
				row_name = index_to_row_name[j]
				str_builder += "{} ".format(read_data[row_name][entry])
			str_builder += "\n"
		with open("/Users/minyee/static_topology_engineer/plot_data/ju1-dls08_6month_fsn_resampled.txt", "w+") as wf:
			wf.write(str_builder)

					



# returns a list of tuples from a read file
def read_timeseries_file(filename):
	timeseries = []
	with open(filename, "r") as f:
		read_first_line = False
		for line in f:
			if not read_first_line:
				read_first_line = True
				continue
			splitted_line = line.split(' ')
			ts = int(splitted_line[0])
			mlu = float(splitted_line[1])
			lu90tmp = float(splitted_line[2])
			if splitted_line[2] == "nan":
				lu90tmp = 0.
			lu50tmp = float(splitted_line[3])
			if splitted_line[3] == "nan":
				lu50tmp = 0.
			lu90 = max(lu90tmp, lu50tmp)
			lu50 = min(lu90tmp, lu50tmp)
			ave_hop_count = float(splitted_line[4])
			perf_tuple = (mlu, lu90, lu50, ave_hop_count, ts)
			timeseries.append(perf_tuple)
	return timeseries

# implicitly assumes that performance timeseries tuple are arranged 
# based on increasing timestamps
def find_common_interval(perf_timeseries):
	min_timestamps_for_each_timeseries = {}
	max_timestamps_for_each_timeseries = {}
	for perf_timeseries_key in perf_timeseries:
		min_timestamp = min([x[4] for x in perf_timeseries[perf_timeseries_key]])
		max_timestamp = max([x[4] for x in perf_timeseries[perf_timeseries_key]])
		min_timestamps_for_each_timeseries[perf_timeseries_key] = min_timestamp
		max_timestamps_for_each_timeseries[perf_timeseries_key] = max_timestamp
	time_interval_lb = 0
	time_interval_ub = sys.maxint
	for perf_timeseries_key in perf_timeseries:
		time_interval_lb = max(time_interval_lb , min_timestamps_for_each_timeseries[perf_timeseries_key])
		time_interval_ub = min(time_interval_ub , max_timestamps_for_each_timeseries[perf_timeseries_key])
	print("{} {}".format(time_interval_lb, time_interval_ub))
	return (time_interval_lb, time_interval_ub)

# nsamples - the number of entries, includes the first and last element
def get_inverse_cdf(array_of_values, nsamples):
	nsamples = min(nsamples, len(array_of_values))
	assert(nsamples <= 100)
	array_num_entries = len(array_of_values)
	assert(array_num_entries > 0)
	index_skip = float(array_num_entries) / nsamples
	sorted_array = sorted(array_of_values)
	curr_index_float = 0.
	xbin = [0, ]
	ybin = []
	for i in range(1, nsamples + 1, 1):
		percentile_value = int(100. * (i / float(nsamples)))
		xbin.append(percentile_value)
	for percentile in xbin:
		index = int(float(percentile) / 100 * (array_num_entries - 1))
		ybin.append(sorted_array[index])
	assert(len(xbin) == len(ybin))
	#xbin.append(100)
	#ybin.append(sorted_array[-1])
	return xbin, ybin

def export_inverse_cdf_to_pgffile(filename, xbins, ybins_mlu, ybins_lu90, ybins_lu50, ybins_ahc):
	str_builder = "percentile mlu lu90 lu50 ave_hop_count \n"
	assert(len(xbins) == len(ybins_mlu))
	assert(len(xbins) == len(ybins_lu90))
	assert(len(xbins) == len(ybins_lu50))
	assert(len(xbins) == len(ybins_ahc))
	num_entries = len(xbins)
	for i in range(num_entries):
		str_builder += "{} {} {} {} {} \n".format(xbins[i], ybins_mlu[i], ybins_lu90[i], ybins_lu50[i], ybins_ahc[i])
	with open(filename, "w+") as f:
		f.write(str_builder)
	return


def process_box_and_whiskers(data_vector, lower_whisker_index=0., lower_box_index=0.05, upper_box_index=0.95, upper_whisker_index=1.):
	length = len(data_vector)
	assert(upper_whisker_index <= 1)
	assert(upper_whisker_index > upper_box_index)
	assert(upper_box_index > lower_box_index)
	assert(lower_box_index > lower_whisker_index)
	assert(lower_whisker_index >= 0)
	lower_whisker = data_vector[int(lower_whisker_index * float(length))]
	lower_box = data_vector[int(lower_box_index * float(length))]
	median = data_vector[int(0.5 * float(length))]
	upper_box = data_vector[int(upper_box_index * float(length))]
	upper_whisker = data_vector[min(int(upper_whisker_index * float(length)), length - 1)]
	return (lower_whisker, lower_box, median, upper_box, upper_whisker)
	
def write_box_and_whisker(filename, keys_array, boxwhiskerprocesseddata):
	str_builder = "index median box_top box_bottom whisker_top whisker_bottom\n"
	index = 0
	print("\n\n")
	for key in keys_array:
		data = boxwhiskerprocesseddata[key]
		str_builder += "{} {} {} {} {} {}\n".format(index, data[2], data[3], data[1], data[4], data[0])
		print(key)
		print("lower whisker={:0.4f}, lower quartile={:0.4f}, median={:0.4f}, upper quartile={:0.4f}, upper whisker={:0.4f}".format(data[0], data[1], data[2], data[3], data[4]))
		index += 1
	print("\n\n")
	with open(filename, "w+") as f:
		f.write(str_builder)
	return

def main(dcn_name, all_filenames, output_dir, nsamples, write_to_file=False):
	os.chdir("./{}/".format(dcn_name))
	perf_timeseries_collection = {}
	for filename in all_filenames:
		perf_timeseries = read_timeseries_file(filename)
		perf_timeseries_collection[filename] = perf_timeseries
	(interval_lb, interval_ub) = find_common_interval(perf_timeseries_collection)

	#for filename in all_filenames:
		#starting_index = (interval_lb - perf_timeseries_collection[filename][0][4]) / 300
		#ending_index = (interval_ub - perf_timeseries_collection[filename][0][4]) / 300
		#perf_timeseries_collection[filename] = perf_timeseries_collection[filename][starting_index : ending_index + 1]

	## Export files to target output directory
	if write_to_file:
		for filename in all_filenames:
			perf_timeseries = perf_timeseries_collection[filename]
			mlu_timeseries = [x[0] for x in perf_timeseries]
			lu90_timeseries = [x[1] for x in perf_timeseries]
			lu50_timeseries = [x[2] for x in perf_timeseries]
			ahc_timeseries = [x[3] for x in perf_timeseries]
			xbins, mlus = get_inverse_cdf(mlu_timeseries, nsamples)
			#transform_file_to_normal("/Users/minyee/static_topology_engineer/plot_data/ju1-dls08_6month_fsn.txt", xbins)
			#exit()
			_, lu90s = get_inverse_cdf(lu90_timeseries, nsamples)
			_, lu50s = get_inverse_cdf(lu50_timeseries, nsamples)
			_, ahcs = get_inverse_cdf(ahc_timeseries, nsamples)
			newfilename = filename.replace('timeseries', 'invcdf_{}'.format(dcn_name))
			# check if file exists, if not, then we write. if yes, do not overwrite
			#if not os.path.isfile(output_dir + newfilename):
				#print("writing: {}".format(newfilename))
			export_inverse_cdf_to_pgffile(output_dir + newfilename, xbins, mlus, lu90s, lu50s, ahcs)


	## Plot MLU
	legends = []
	fig = plt.figure()
	plt.title("MLU", fontsize=13)
	normed_mlu = {}
	mlu_boxplots = {}
	percentage_below_one = []
	for filename in all_filenames:
		#normed_perf = []
		#for i in range(len(perf_timeseries_collection[filename])):
		#	normed_perf.append(perf_timeseries_collection[filename][i][0]/perf_timeseries_collection["timeseries_static_perfect"][i][0])
		#normed_perf = sorted(normed_perf)
		#normed_mlu[filename] = normed_perf
		perf_timeseries = sorted([x[0] for x in perf_timeseries_collection[filename]])
		#if "robust" in filename:
		#	plt.plot(perf_timeseries, marker='x')
		#else:
		length = len(perf_timeseries)
		num_above_one = 0
		for i in range(length):
			if perf_timeseries[length - 1 - i] <= 1:
				num_above_one = i
				break
		percentage_below_one.append(1. - (float(num_above_one) / length))
		print("percentage below 1 filename: {} is {}".format(filename, 1. - (float(num_above_one) / length)))
		print("ave MLU filename: {} is {}".format(filename, sum(perf_timeseries) / float(length)))
		plt.plot(perf_timeseries)
		legends.append(filename)
		mlu_boxplots[filename] = process_box_and_whiskers(perf_timeseries)
	plt.legend(legends)
	#fig = plt.figure()
	#plt.title("Normed MLU", fontsize=13)
	#for filename in all_filenames:
		#plt.plot(normed_mlu[filename])
	#plt.legend(legends)
	#fig = plt.figure()
	#plt.title("P(MLU <= 1)", fontsize=13)
	#plt.bar(range(len(all_filenames)), percentage_below_one, align='center', alpha=0.5)
	#plt.xticks(range(len(all_filenames)), all_filenames)

	## Plot LU90
	legends = []
	fig = plt.figure()
	plt.title("LU90", fontsize=13)
	lu90_boxplots = {}
	for filename in all_filenames:
		perf_timeseries = sorted([x[1] for x in perf_timeseries_collection[filename]])
		plt.plot(perf_timeseries)
		legends.append(filename)
		lu90_boxplots[filename] = process_box_and_whiskers(perf_timeseries)
	plt.legend(legends)

	## Plot LU50
	legends = []
	fig = plt.figure()
	plt.title("LU50", fontsize=13)
	lu50_boxplots = {}
	for filename in all_filenames:
		perf_timeseries = sorted([x[2] for x in perf_timeseries_collection[filename]])
		plt.plot(perf_timeseries)
		legends.append(filename)
		lu50_boxplots[filename] = process_box_and_whiskers(perf_timeseries)
		print("ave LU50 filename: {} is {}".format(filename, sum(perf_timeseries) / float(length)))
	plt.legend(legends)

	## Plot AHC
	legends = []
	fig = plt.figure()
	plt.title("Ave Hop Count", fontsize=13)
	ahc_boxplots = {}
	for filename in all_filenames:
		perf_timeseries = sorted([x[3] for x in perf_timeseries_collection[filename]])
		plt.plot(perf_timeseries)
		legends.append(filename)
		ahc_boxplots[filename] = process_box_and_whiskers(perf_timeseries)
		print("ave ahc filename: {} is {}".format(filename, sum(perf_timeseries) / float(length)))
	plt.legend(legends)
	plt.show()


	write_box_and_whisker(output_dir + "mlu_bp.txt", all_filenames, mlu_boxplots)
	write_box_and_whisker(output_dir + "lu50_bp.txt", all_filenames, lu50_boxplots)
	write_box_and_whisker(output_dir + "ahc_bp.txt", all_filenames, ahc_boxplots)
	return


if __name__ == "__main__":
	
	dcn_name = "ju1_dls08"
	write_to_file = True
	#dcn_name = "ju2_dls03"
	print("Running Analysis Tool for DCN : {}".format(dcn_name))
	output_dir = "/Users/minyee/static_topology_engineer/plot_data/"
	filenames = [
		#"perfect_perfect",
		#"robusttoe_r4032t2016c4_perfect",
		#"robusttoe_r4032t2016c4_robustweightedte_r12t12c4",
		#"robusttoe_r4032t2016c4_robustte_r12t12c4",
		#"robusttoe_r4032t2016c4_wcmp",
		#"robusttoe_r4032t2016c4_oblrouting",
		#"static_perfect",
		#"static_robustweightedte_r12t12c4",
		#"static_robustte_r12t12c4",
		#"static_wcmp",
		"avetoe_r4032t2016_perfect",
		"maxtoe_r4032t2016_perfect",
		"robusttoe_r4032t2016c1_perfect",
		"maxtoe_r2016t2016_perfect",
		"avetoe_r2016t2016_perfect",
		"robusttoe_r2016t2016c4_perfect",
		"maxtoe_r288t2016_perfect",
		"avetoe_r288t2016_perfect",
		"robusttoe_r288t2016c4_perfect",
		"maxtoe_r12t288_perfect",
		"avetoe_r12t288_perfect",
		"robusttoe_r12t288c4_perfect",
		#"static_perfect",
		#"static_oblrouting",
		#"fattree_ecmp_taper0p100",
		#"fattree_ecmp_taper0p200",
		#"fattree_ecmp_taper0p500",
		#"fattree_ecmp_taper0p600",
		#"fattree_ecmp_taper0p800",
		#"fattree_ecmp_taper1p000",
	]

	filenames = [
		#"perfect_perfect",
		#"robusttoe_r4032t2016c4_perfect",
		"robusttoe_r4032t2016c4_robustweightedte_r12t12c4",
		"robusttoe_r4032t2016c4_robustweightedte_r288t288c4",
		"robusttoe_r4032t2016c4_robustweightedte_r576t576c4",
		"robusttoe_r4032t2016c4_robustweightedte_r864t864c4",
		"robusttoe_r4032t2016c4_wcmp",
		"static_robustweightedte_r12t12c4",
		"static_robustweightedte_r288t288c4",
		"static_robustweightedte_r576t576c4",
		"static_robustweightedte_r864t864c4",
		"static_wcmp",
		#"robusttoe_r4032t2016c4_robustte_r12t12c4",
		#"robusttoe_r4032t2016c4_wcmp",
		#"robusttoe_r4032t2016c4_oblrouting",
		#"static_perfect",
		#"static_robustweightedte_r288t288c4",
		#"static_robustte_r12t12c4",
		#"static_wcmp",
	]
	filenames = [
		#"static_avete_r12t12c4",
		#"static_avete_r12t12",
		#"static_avete_r288t288",
		#"static_avete_r576t576",
		#"static_avete_r864t864",
		#"avetoe_r4032t2016_avete_r12t12",
		#"avetoe_r4032t2016_avete_r288t288",
		#"avetoe_r4032t2016_avete_r576t576",
		#"avetoe_r4032t2016_avete_r864t864",
		#"static_maxte_r12t12",
		#"static_maxte_r288t288",
		#"static_maxte_r576t576",
		#"static_maxte_r864t864",
		#"maxtoe_r4032t2016_maxte_r12t12",
		#"maxtoe_r4032t2016_maxte_r288t288",
		#"maxtoe_r4032t2016_maxte_r576t576",
		#"maxtoe_r4032t2016_maxte_r864t864",
		"robusttoe_r4032t2016c4_avete_r12t12",
		"robusttoe_r4032t2016c4_avete_r288t288",
		"robusttoe_r4032t2016c4_avete_r576t576",
		"robusttoe_r4032t2016c4_avete_r864t864",
		"robusttoe_r4032t2016c4_maxte_r12t12",
		"robusttoe_r4032t2016c4_maxte_r288t288",
		"robusttoe_r4032t2016c4_maxte_r576t576",
		"robusttoe_r4032t2016c4_maxte_r864t864",
	]
	filenames = [
		#"robusttoe_r4032t2016c4_perfect",
		#"robusttoe_r2016t2016c4_robustweightedte_r288t288c4",
		#"robusttoe_r2016t2016c6_robustweightedte_r288t288c6",
		#"robusttoe_r2016t2016c4_robustweightedte_r864t864c4",
		#"robusttoe_r2016t2016c6_robustweightedte_r864t864c6",
		#"static_robustweightedte_r288t288c4",
		#"static_robustweightedte_r288t288c6",
		#"static_robustweightedte_r864t864c4",
		#"static_robustweightedte_r864t864c6",
		#"robusttoe_r4032t2016c4_robustweightedte_r576t576c4",
		#"robusttoe_r4032t2016c4_robustweightedte_r288t288c4",
		#"robusttoe_r4032t2016c4_robustweightedte_r12t12c4",
		#"robusttoe_r4032t2016c4_robustweightedte_r1t12c4",
		#"robusttoe_r4032t2016c4_robustweightedte_r1t12c4",
		#"static_robustweightedte_r576t576c4",
		#"static_robustweightedte_r288t288c4",
		#"static_robustweightedte_r12t12c4",
		#"static_robustweightedte_r1t12c4",
		"robusttoe_r4032t2016c4_robustweightedte_r1t12c12",
		"static_robustweightedte_r1t12c12",
		#"robusttoe_r4032t2016c4_robustweightedte_r288t288c1",
		#"robusttoe_r4032t2016c4_robustweightedte_r576t576c1",
		#"robusttoe_r4032t2016c4_robustweightedte_r12t12c1",
		#"robusttoe_r4032t2016c4_robustweightedte_r1t12c1",
		#"static_robustweightedte_r288t288c1",
		#"static_robustweightedte_r576t576c1",
		#"static_robustweightedte_r12t12c1",
		#"static_robustweightedte_r1t12c1",
		#"robusttoe_r2016t2016c4_robustweightedte_r12t12c6",
		#"robusttoe_r2016t2016c6_robustweightedte_r12t6c6",
		#"robusttoe_r2016t2016c4_robustweightedte_r288t288c6",
		#"robusttoe_r2016t2016c4_robustweightedheuristicte_r288t288c6",
		#"robusttoe_r2016t2016c4_robustweightedte_r288t288c6",
		#"robusttoe_r4032t2016c4_robustweightedte_r1t12c1",
		#"robusttoe_r4032t2016c4_wcmp",
		#"static_robustte_r1t12c1",
		#"avetoe_r4032t2016_avete_r1t12",
		#"static_avete_r1t12",
		#"avetoe_r4032t2016_avete_r1t12",
		#"static_perfect",
		#"static_wcmp",
		#"static_robustweightedte_r288t288c4",
		#"static_robustweightedte_r1t12c4",
		#"static_robustweightedte_r1t12c1",
		#"robusttoe_r4032t2016c4_perfect",
		#"static_perfect",
		#"perfect_perfect",
	]
	nsamples = 100
	
	for i in range(len(filenames)):
		filenames[i] = "timeseries_" + filenames[i]
	main(dcn_name, filenames, output_dir, nsamples, write_to_file=write_to_file)
	print("Completed Analysis Tool for DCN : {}".format(dcn_name))