#!/usr/bin/python

import os
import time
import abc
import csv

jarPath = "../bin/"
jarFile = jarPath + "jcachemark.jar"
dataBuffer = "buffer.data"
perfBuffer = "buffer.perf"

cmdJava = ['java', '-jar', jarFile]
perfEvents = ['L1-dcache-loads', 'L1-dcache-load-misses', 'L1-dcache-stores', 'L1-dcache-store-misses', 'L1-dcache-prefetches', 'L1-dcache-prefetch-misses']
cmdPerfE = ','.join(perfEvents)
cmdPerf = ['perf', 'stat', '-x,', '-o', perfBuffer, '--append', '-e', cmdPerfE]


class benchmark:
	def __init__(self, name = "benchmark", withPerf = False):
		self.__args = []
		self.__name = name
		self.__withPerf = withPerf
		with open(self.__getPerfFileName__(), 'w') as pf:
			writer = csv.writer(pf)
			writer.writerow(['CMD'] + perfEvents)

		with open(self.__getResultFileName__(), 'w') as rf:
			pass
#			writer = csv.writer(rf)
#			writer.writerow(self.__args.keys())

	def addArg(self, arg):
		self.__args.append(str(arg))

	def setArg(self, index, value):
		self.__args[index] = str(value)

	def setArgNew(self, **args):
		print('values', args.viewvalues())
		print('keys', args.viewkeys())
		for a in args:
			print(a, args[a])

	def __getResultFileName__(self):
		return self.__name + "_result.csv"

	def __getPerfFileName__(self):
		return self.__name + "_perf.csv"

	def __getCMD__(self, withPerf):
		cmdParts = list()
		if(withPerf):
			cmdParts += list(cmdPerf)
		cmdParts += list(cmdJava)
		cmdParts += self.__args
		return " ".join(cmdParts)

	def __storeResult__(self):
		results = list()
		with open(dataBuffer) as data:
			for line in data:
				results.append(line.split())

		open(dataBuffer, 'w').close()

		with open(self.__getResultFileName__(), 'a') as output:
			writer = csv.writer(output)
			for row in results:
				writer.writerow(row)

	def __storePerf__(self):
		if(self.__withPerf == False):
			return
		perf = list()
#		perf.append(self.__getCMD__(False))
		with open(perfBuffer) as data:
			for line in data:
				if(line[0] == '#'):
					continue
				record = line.split(',')
				if(len(record) != 2):
					continue

				perf.append(record[0])

		if(len(perf) != len(perfEvents)):
			print("Something wrong with perf results!\nlen(perf):%d, len(perfEvents):%d" % (len(perf), len(perfEvents)))
		open(perfBuffer, 'w').close()

		perf = [self.__getCMD__(False)] + perf

		with open(self.__getPerfFileName__(), 'a') as output:
			writer = csv.writer(output)
			writer.writerow(perf)

	def __cleanBuffer__(self):
		open(dataBuffer, 'w').close()
		open(perfBuffer, 'w').close()

	def __store__(self):
		data = self.__storeResult__()
		perf = self.__storePerf__()

	def run(self):
		print(self.__getCMD__(False))
		self.__cleanBuffer__()
		os.system(self.__getCMD__(self.__withPerf))

		self.__storeResult__()
		self.__storePerf__()

	def test(self):
#		print(cmdPerf)
		print(self.__storePerf__())

def falseSharing(objType):
	bm = benchmark()
	bm.addArg("fs")
	# hard coded object("hard") or extends object("soft")
	bm.addArg(objType)
	# object size
	bm.addArg("16,104,+8")
	# thread number
	bm.addArg("1,8,+1")
	# iteration time
	bm.addArg(str(50 * 1000 * 1000))
	bm.run()

def falseSharingSep(objType):
	bm = benchmark()
	bm.addArg("fs")
	bm.addArg(objType)
	bm.addArg("16,104,+8")
	bm.addArg("1,8,+1")
	bm.addArg(str(50 * 1000 * 1000))

	fName = bm.getOutputFileName() + ".sep"
	bm.nameFile = fName
#	for es in range(16, 105, 8):
	for es in range(64, 65, 8):
		for tn in range(1, 9, 1):
			bm.setArg(2, es)
			bm.setArg(3, tn)
			bm.run()
			time.sleep(2)

def stride():
	bm = benchmark()
	bm.addArg("stride")
	# stride size, 1-30, int, 4 bytes - 120 bytes
	bm.addArg("1,30,+1")
	# thread number
	bm.addArg("1,8,+1")
	# iteration time
	bm.addArg(str(100 * 1000 * 1000))
	bm.run()

def srw():
	bm = benchmark()
	bm.addArg("srw")
	# array size:
	# 32-bit: 2k -> 2M, 2k
	# 64-bit: 4k -> 4M, 4k
	aFrom = 512
	aTo = 512 * 1024
	aStep = 512
	bm.addArg(str(aFrom) + "," + str(aTo) + ",+" + str(aStep))
	# thread number
	bm.addArg("1,8,+1")
	# read
	bm.addArg("r")
	# iteration time
	bm.addArg(str(1000 * 10))
	bm.run()
	# change to write
	bm.setArg(3, "w")
	bm.run()
	

def test():
	print("test")
	b = benchmark()
	b.run()

def repeatTime():
	bm = benchmark()
	bm.addArg("RepeatRun")
	bm.addArg("1")
	bm.addArg("1024")
	for rt in range(0, 31):
		bm.setArg(1, 2 ** rt)
		bm.run()

def repeatLen():
	bm = benchmark()
	bm.addArg("RepeatRun")
	bm.addArg("1")
	bm.addArg("1024")
	for rt in range(0, 21):
		bm.setArg(2, 2 ** rt)
		bm.run()

def arrayReadStride():
	bm = benchmark()
	bm.addArg("ArrayRead")
	# 1 GB working set
	bm.addArg(str(100 * 1024 * 1024))
	# for L1 cache
	start = 1024
	end = 128 * 1024
	step = 1024
	bm.addArg("%d,%d,+%d" % (start, end, step))
	bm.addArg("1")
	bm.run()
	# for L2 cache
	start = 32 * 1024
	end = 10 * 1024 * 1024 
	step = 32 * 1024
	bm.setArg("%d,%d,+%d" % (start, end, step))
	bm.run()

def arrayRead():
	bm = benchmark()
	bm.addArg("ArrayRead")
	bm.addArg(str(16 * 1024))
	bm.addArg(str(2048 * 1024))
	bm.addArg(str(6144 * 1024))
	bm.addArg(str(1024 * 1024 * 1024))
	bm.addArg("0,3,+1")
	bm.run()

def fsObject():
	bm = benchmark()
	bm.addArg("FSObject")
	bm.addArg("1")
	bm.addArg("1")
	bm.run()

def fsArray():
	bm = benchmark()
	bm.addArg("FSArray")
	bm.addArg("2")
	bm.addArg("1,10,+1")
	bm.run()

def fsByteBuffer(perf = False):
	bm = benchmark(name = 'fsByteBuffer', withPerf = perf)
	bm.addArg("FSByteBuffer")
	bm.addArg("2")
	bm.addArg("1,10,+1")
	for stride in range(1,11):
		bm.setArg(2, stride)
		print(bm.run())

def main():
#	repeatLen()
#	repeatTime()
#	arrayRead()
#	fsObject()
#	repeatLen()
#	fsArray()
#	fsByteBuffer(True)
	bm = benchmark(name = 'test', withPerf = True)
	bm.setArgNew(zz = '1st', bb = '2nd', cc = '3rd')

if __name__ == '__main__':
	main()
