#!/usr/bin/env python

import argparse
import sys
import os
import copy
import pylab

try:
	import matplotlib.pyplot as plt
except ImportError:
	print "matplotlib library missing"
	sys.exit(2)
try:
	import pandas as pd
except ImportError:
	print "pandas library missing"
	sys.exit(2)

def is_int(s):
    try: 
        int(s)
        return True
    except ValueError:
        return False



def main():
	parser = argparse.ArgumentParser(usage = './grap_results.py  -flag <argument>\n\nColumn Numbers are as follows: \n1:file_name, \n2:algorithm, \n3:n_points, \n4:n_features, \n5:n_clusters, \n6:count, \n7:cpu_time, \n8:gpu_time, \n9:speed_up, \n10:cpu_iter, \n11:gpu_iter, \n12:p1, \n13:p2, \n14:consensus,\n15:static_count,\n16:opti_level\n17:percent_p1\n18:percent_p2\n19:streak_count\n20:error' )

	#user condition flags
	parser.add_argument('-i'   , type=str, action = 'store',  dest = 'file_path'   ,default = None, help = 'Input Results file that requires parsing for creating graphs')
	parser.add_argument('-k'   , type=int, action = 'store',  dest = 'n_clusters'  ,default = None, help = 'filter for \'n\' clusters')
	parser.add_argument('-file', type=str, action = 'store',  dest = 'file_name'   ,default = None, help = 'filter for  file_name : -file_name example.txt')
	parser.add_argument('-p1'  , type=int, action = 'store',  dest = 'p1'          ,default = None, help = 'filter for Phase 1 iterations')
	parser.add_argument('-p2'  , type=int, action = 'store',  dest = 'p2'          ,default = None, help = 'filter for Phase 2 iterations')
	parser.add_argument('-con' , type=int, action = 'store',  dest = 'consensus'   ,default = None, help = 'filter for thread consensus')
	parser.add_argument('-sta' , type=int, action = 'store',  dest = 'static_count',default = None, help = 'filter for static thread count')
	parser.add_argument('-opt' , type=int, action = 'store',  dest = 'opti_level'  ,default = None, help = 'filter for optimization level')
	parser.add_argument('-x'   , type=str, action = 'store',  dest = 'x_axis'      ,default = None, help = 'Input x_axis columns numbers')
	parser.add_argument('-y'   , type=str, action = 'store',  dest = 'y_axis'      ,default = None, help = 'Input y_axis columns numbers')
	parser.add_argument('-o'   , type=str, action = 'store',  dest = 'output_file' ,default = None, help = 'Specify an output file name (optional)')
	parser.add_argument('-pp1',  type=int, action = 'store',  dest = 'percent_p1'  ,default = None, help = 'filter by Phase 1 percentage')
	parser.add_argument('-pp2',  type=int, action = 'store',  dest = 'percent_p2'  ,default = None, help = 'Filter by Phase 2 percentage')
	parser.add_argument('-streak', type=int, action = 'store',  dest = 'streak_count' ,default = None, help = 'filter data by streak count')
	parser.add_argument('-sort', type=int, action = 'store',  dest = 'sorting'     ,default = None, help = 'Specify a column number to sort entire Data Frame')


	#flags for future implementations
	
	"""parser.add_argument('-points'  ,  action = 'store_true',default = False,  dest = 'n_points'   )
	parser.add_argument('-features',  action = 'store_true',default = False,  dest = 'n_features' )
	parser.add_argument('-count'   ,  action = 'store_true',default = False,  dest = 'count' 	  )
	parser.add_argument('-cput'    ,  action = 'store_true',default = False,  dest = 'cpu_time'   )
	parser.add_argument('-gput'    ,  action = 'store_true',default = False,  dest = 'gpu_time'   )
	parser.add_argument('-cpu_itr' ,  action = 'store_true',default = False,  dest = 'cpu_iter'   )
	parser.add_argument('-gpu_itr' ,  action = 'store_true',default = False,  dest = 'gpu_iter'   )
	parser.add_argument('-spd'     ,  action = 'store_true',default = False,  dest = 'speed_up'   )"""

	#parse arguments	
	args = parser.parse_args()

	head_list = {1:'file_path', 2:'algorithm', 3:'n_points', 4:'n_features', 5:'n_clusters', 6:'count', 7:'cpu_time', 8:'gpu_time', 9:'speed_up', 10:'cpu_iter', 11:'gpu_iter', 12:'p1', 13:'p2', 14:'consensus',15:'static_count',16:'opti_level',17:'percent_p1',18:'percent_p2',19:'streak_count', 20:'error'}

	#create a list of all the flags the user wants plotted
	flag_list = [False]* 22
	flag_list[0]  = not (args.file_name    == None   )
	flag_list[1]  = not (args.file_path    == None   )
	#flag_list[2]  = not (args.algorithm    == None  )
	#flag_list[3]  = not (args.n_points     == False )
	#flag_list[4]  = not (args.n_features   == False )
	flag_list[5]  = not (args.n_clusters   == None   )
	#flag_list[6]  = not (args.count        == False )
	#flag_list[7]  = not (args.cpu_time     == False )
	#flag_list[8]  = not (args.gpu_time     == False )
	#flag_list[9]  = not (args.speed_up     == False )
	#flag_list[10] = not (args.cpu_iter     == False )
	#flag_list[11] = not (args.gpu_iter     == False )
	flag_list[12] = not (args.p1           == None  )
	flag_list[13] = not (args.p2           == None  )
	flag_list[14] = not (args.consensus    == None  )
	flag_list[15] = not (args.static_count == None  )
	flag_list[16] = not (args.opti_level   == None  )
	flag_list[17] = not (args.percent_p1   == None  )
	flag_list[18] = not (args.percent_p2   == None  )
	flag_list[19] = not (args.sorting      == None  )
	#flag_list[20] = not (args.error        == None  )
	

	#print flag_list  #DEBUG

	if(not args.file_path):
		print "Input file not provided ./grap_results.py -i <file_path>"
		sys.exit(2)  #unix system error code '2' for syntax errors

	#check if the file exists
	if not os.path.isfile(args.file_path):
		print "File non-existent!"
		sys.exit(2)
	
	#print "reading file now ..."  #DEBUG

	#create the data_frame (df) using pandas.read_csv
	# seprator/delimiter is whitespace represented by '\s+' for ignoring consecuitive whitespaces
	# by default the first row is assigned as header information for the data frame, hence header set to None
	df = pd.read_csv(str(args.file_path),sep = '\s+',header=None)
	# csv_read cannot assign column names on initialization of the df
	df.columns = ['file_name', 'c1', 
				  'algorithm', 'c2',
				  'n_points', 'n_features', 'n_clusters', 'count', 'c3', 
	              'cpu_time', 'gpu_time', 'speed_up', 'c4',
	              'cpu_iter', 'gpu_iter', 'c5', 
	              'p1', 'p2', 'consensus','static_count','opti_level','c6',
	              'percent_p1', 'percent_p2','streak_count']
	# dropping all columns with the colon seprator
	df = df.drop(['c1','c2','c3','c4','c5','c6'], axis = 1)	

	#print df	 DEBUG

	#generating error column
	df['error'] = (df['count'] / df['n_points'])*100
	#print df['error']   #DEBUG
	#print args.sorting
	#print flag_list[21]

	#print head_list[int(args.sorting)]

	if(args.sorting):
		if((args.sorting > 19)):
			print "Invalid Sorting. Column does not exist!"
			sys.exit(2)
		else:
			df = df.sort( head_list[int(args.sorting)] )
			df = df.reset_index(drop=True)

	#print df

	flag_filter = "df["
	flag_count = 0
	#check if any of the flags are provided
	if ((args.n_clusters==None) and
	    (args.file_name == None) and
	    (args.p1 == None) and
	    (args.p2 == None)  and
	    (args.consensus == None) and 
	    (args.static_count == None) and 
	    (args.opti_level == None) and 
	    (args.sorting == None) and 
	    (args.percent_p1 == None) and
	    (args.percent_p2 == None) and
	    (args.streak_count == None)
       ):
		print "No flags provided. Please view see help for the command"
		sys.exit(2)

	exclud_sort = (args.n_clusters==None) and (args.file_name == None) and (args.p1 == None) and  (args.p2 == None)  and  (args.consensus == None)  and (args.static_count == None) and (args.opti_level == None) and (args.percent_p1 == None) and (args.percent_p2 == None) and (args.streak_count == None)
	#print exclud_sort
	#print exclud_sort
	

	#routiines to construct filter strings for user provided flags
	if(args.file_name):
		file_name = df.iloc[0][0]	
		file_name = file_name.split('/')
		file_name = file_name[len(file_name)-1]
		path = (df.iloc[0][0]).strip(file_name)
		new_file_path = path + args.file_name
		file_check  = "(df[head_list[1]] == " + new_file_path +')'
		if flag_count > 0:
			flag_filter = flag_filter + " & "
		flag_filter = flag_filter + file_check
		flag_count = flag_count + 1

	if (args.n_clusters):
		cluster_check =  "(df[head_list[5]] == " + str(args.n_clusters) +')'
		if flag_count > 0:
			flag_filter = flag_filter + " & "
		flag_filter = flag_filter + cluster_check
		flag_count = flag_count + 1

	if (args.p1):
		p1_check =  "( df[head_list[12]] == " + str(args.p1) + ')'

		if flag_count > 0:
			flag_filter = flag_filter + " & "
		flag_filter = flag_filter + p1_check
		flag_count = flag_count + 1

		#print p1_check #DEBUG 	 	
		
	if (args.p2):
		p2_check =  "( df[head_list[13]] == " + str(args.p2) + ')'
		if flag_count > 0:
			flag_filter = flag_filter + " & "
		flag_filter = flag_filter + p2_check
		flag_count = flag_count + 1
		#print p2_check #DEBUG	 	

	if (args.consensus):
		consensus_check = "( df[head_list[14]] == " + str(args.consensus) + ')'

		if flag_count > 0:
			flag_filter = flag_filter + " & "
		flag_filter = flag_filter + consensus_check
		flag_count = flag_count + 1

		#print consensus_check 	#DEBUG

	if (args.static_count):
		static_check =  "( df[head_list[15]] == " + str(args.static_count) + ')'
		
		if flag_count > 0:
			flag_filter = flag_filter + " & "
		flag_filter = flag_filter + static_check
		flag_count = flag_count + 1

		#print static_check  #DEBUG
	
	if (args.opti_level):
		opti_check =  "( df[head_list[16]] == " + str(args.opti_level) + ')'
		if flag_count > 0:
			flag_filter = flag_filter + " & "
		flag_filter = flag_filter + opti_check
		flag_count = flag_count + 1
		#print opti_check	#DEBUG

	if (args.percent_p1):
		perc_p1_check =  "( df[head_list[17]] == " + str(args.percent_p1) + ')'
		if flag_count > 0:
			flag_filter = flag_filter + " & "
		flag_filter = flag_filter + perc_p1_check
		flag_count = flag_count + 1
		#print perc_p1_check	#DEBUG


	if (args.percent_p2):
		perc_p2_check =  "( df[head_list[18]] == " + str(args.percent_p2) + ')'
		if flag_count > 0:
			flag_filter = flag_filter + " & "
		flag_filter = flag_filter + perc_p2_check
		flag_count = flag_count + 1
		#print perc_p2_check	#DEBUG


	if (args.streak_count):
		streak_check =  "( df[head_list[19]] == " + str(args.streak_count) + ')'
		if flag_count > 0:
			flag_filter = flag_filter + " & "
		flag_filter = flag_filter + streak_check
		flag_count = flag_count + 1
		#print streak_check	#DEBUG
	
	#closing up the filter operation
	flag_filter = flag_filter + "]"

	#print flag_filter

	if( exclud_sort == False):
		results =  eval(flag_filter)
	else:
		results = df

	#print results #DEBUG

	new_y = list()	
	if results.empty:
		print "No results match for provided parameter combination"
		sys.exit(1)
	#else:
		#print results #DEBUG

	#------------DEBUG start --------------#
	#display all the user selected parametes
	#print "You have selected :"
	#for i in range(2,16):
	#	if(flag_list[i]):
	#		print str(i) + ":\t" + head_list[i]
	#-----------DEBUG end------------------#

	graph_count = 0

	#configuring the x axis
	x = 0

	#print args.x_axis == 'r'   #DEBUG

	if(args.x_axis):
		if(not (args.x_axis == 'r')):
			if( (int(args.x_axis) < 0 and int(args.x_axis) > 17 )):
				print "Invalid x_axis column"
				sys.exit(1)
		x = int(args.x_axis)
		#print x
	else:
		print "Missing x_axis parameter"
		sys.exit(2)


	label = [None, 						#0
			 'data file name',  		#1
			 'algorithm:',				#2
			 'n(points)',				#3
			 'n(features)',				#4
			 'clusters(k)',				#5
			 'error count',				#6
			 'cpu time(sec)', 			#7
			 'gpu_time(sec)', 			#8
			 'speed up(cpu_t/gpu_t)', 	#9
			 'cpu iterations', 			#10
			 'gpu_iterations', 			#11
			 'phase 1 iteration period', #12
			 'phase 2 iteration period', #13
			 'block level consensus', 	#14
			 'static thread count', 	#15
			 'optimization level', 		#16
			 '% phase 1',				#17
			 '% phase 2',				#18
			 'streak count',			#19
			 '% error']					#20

	fig =plt.figure()
	#what do you want on the y axis
	if(args.y_axis):
		y = args.y_axis
		y =  y.split(',')
		#print y  #DEBUG
	
	#print len(results.columns)

	scale_flag = False

	for i  in range(0,len(y)):
		#print is_int(y[i])  #DEBUG
		if(is_int(y[i])):
			new_y.append( int(y[i]) )
			tmp = int(y[i])
			if tmp == 20:
				scale_flag = True
			if (tmp > 0 and tmp < len(results.columns)+1 ):
				#plotting the parameters
				
				#DEBUG
				#print head_list[int(x)] + "," + head_list[int(y[i])]
				#print len(results[head_list[int(y[i])]])
				#print len(range(1,len(results)+1))


				#pandas df plot function to plot columns of data-frame

				if(x == 'r'):
					results.plot(y = head_list[int(y[i])], style='-o', label = label[ int(y[i])  ] )
				else:
					results.plot(x = head_list[int(x)], y = head_list[tmp], style='-o', label = label[ tmp  ] )
			else:
				print "Input " + y[i] + " is invalid. Excluding!"
		else: 
			print "missing y_axis parameters"
			sys.exit(2)

	#if scale_flag == True:
	#	pylab.ylim([0,100])
	#print new_y
	#print x

	#print the label for x_axis on the graph
	if(x == 'r'):
		plt.xlabel('number of experiments')
	else:
		plt.xlabel(label[int(x)])

	#incrementing Graph count
	graph_count = graph_count + 1

	#declaring strings for y_axis and super_title
	y_title = str()
	sup_title = str()

	#plot the legend
	plt.legend()

	#generating string for the title
	for i in range(0,len(new_y)): 
		y_title = y_title + str(results.columns.values[new_y[i]-1]) + " "
		if(i < len(new_y)-1):
			y_title = y_title + "& "
		#DEBUG
		#print results.columns.values[new_y[i]]
	#print y_title

	#printing the title to the graph
	if(x == 'r'):
		plt.title(  "num. of experiments"+ " vs." + y_title )		
	else:
		plt.title(  results.columns.values[int(x)-1]+ " vs." + y_title )

	#for i in range(0,len(flag_list)):
	#	if flag_list[i] == True:
	#		sup_title = suptitle + results.columns.values[i] + " " +  sys.argv[1:]

	#printing the super_title for the graph
	fig.suptitle(str(results.iloc[0][1]) +" "+str(results.iloc[0][2])+ " "+ str(sys.argv[1:]))

	#print new_y #DEBUG

	#list to store file_names in ./Graph/ directory
	file_list = list()


	#create a directory for storing Graphs if it does not exist
	if not os.path.exists("Graphs"):
		os.makedirs("Graphs")

	#save file with user input name if flag+ name provided
	if(args.output_file):
		graph_name = './Graphs/' + args.output_file + '.png'
	#save the Graph as .png file in GraphX.png where X is a incrementing Number
	else:
		#check dir for file with the name GraphX.png
		onlyfiles = [ f for f in os.listdir('./Graphs/') if os.path.isfile(os.path.join('./Graphs/',f)) if f.endswith('.png') if f.startswith('Graph')]

		print onlyfiles #DEBUG

		if(len(onlyfiles) > 0):
			for i in range(0,len(onlyfiles)):
				#remove Graph and .png from string to extract the X number
				tmp = onlyfiles[i].strip("Graph")
				tmp = tmp.strip('.png')
				#Check if the remainder X is a number only
				if(is_int(tmp)):
					file_list.append(int(tmp))
			#print sorted(onlyfiles, reverse = True)	#DEBUG
			
			#print file_list  #DEBUG
			
			file_list = sorted(file_list, reverse = True)
			graph_count = int(file_list[0])+1 
			
			#print graph_count  #DEBUG

		graph_name = './Graphs/Graph' + str(graph_count) + '.png'
		# print "Saving figure ..."  #DEBUG
	
	plt.savefig(graph_name, bbox_inches = 'tight')
	#print "Done!" #DEBUG

if __name__ == "__main__":
	main()