#Written by: Arun Balasubramanian
#This code simulates the logic for local and internode preemptive scheduling for CAN peer to peer technique in remote desktop grids
#This is done by invoking a separate thread for functionality within a node. The communication between nodes is implemented through interthread communication
import time
import thread
import sys
import random
import string
beta=50
alpha=0
class  job:
	def __init__(self, jobid=None, est_time=None, cpu_speed=None, memory=None, disk_size=None, wait_time=None, submit_time=None, p_job=None, cores=None, job_finishtime=None):
		if jobid==None:
			self.jobid = -1
			self.est_time = 0
			self.cpu_speed = 0
			self.memory = 0
			self.disk_size = 0
			self.wait_time = 0
			self.submit_time = 0
			self.p_job = -999999999
			self.cores = 0
			self.job_finishtime = 0
			self.inqueue=0
			self.running=0
			self.counter=0
			self.chosen = 0
			self.completed =0
			self.total_runtime=0
		else: 	
			self.jobid = jobid
			self.est_time = float(est_time)
			self.cpu_speed = cpu_speed
			self.memory = memory 
			self.disk_size = disk_size
			self.wait_time = float(wait_time)
			self.submit_time = float(submit_time)
			self.p_job = float(p_job)
			self.cores = int(cores)
			self.job_finishtime = float(job_finishtime)
			self.inqueue=0
			self.running=0
			self.counter=0
			self.chosen = 0
			self.completed=0
			self.total_runtime=0

class neighbor:
	def __init__(self, node=None, top_job=None, min_job=None):
		if node!=None:
			self.node = node
			self.top_job = top_job
			self.min_job = min_job
		else:
			self.node = node()
			self.top_job = job()
			self.min_job = job()
				
		 	
class node:
	def __init__(self, nodeID=None, cpu_speed=None, memory=None, disk_size=None, join_time=None, cores=None, global_time=None):
		if nodeID!=None:
			self.nodeID = nodeID
			self.jobs = []
			self.cpu_speed = cpu_speed
			self.memory = memory
			self.disk_size = disk_size
			self.join_time = join_time
			self.cores = int(cores)
			self.global_time = float(global_time)
			self.curr_jobs = 0
			self.res_cores = int(cores)
			self.nbors = []
		else:
			self.nodeID = -1
			self.jobs = []
			self.cpu_speed = 0
			self.memory = 0
			self.disk_size = 0
			self.join_time = 0
			self.cores = 0
			self.global_time = 0
			self.curr_jobs = 0
			self.res_cores = 0
			self.nbors = []

	def fetch_next_job(self):
		maxp_job=-99999999999
		top = job()
		for j in self.jobs:
			if j.inqueue==1 and j.completed==0:
				if maxp_job < j.p_job:
					maxp_job = j.p_job
					top = j
		return top
		
#	def append_job(self, jobid):
#		self.jobid.append(jobid)	
	def calculate_priority(self, job):
#		for jid in jobid:
		#	for j in self.jobs:
		#		if j.inqueue == 1 or j.running ==1:		
		#			if self.global_time >= j.submit_time:
	#	if job.est_time >0:
	#	print "Priority of job "+str(job.jobid)+" is being calculated"
		job.p_job = ((alpha * (job.wait_time))-(beta* job.est_time))
	#	else:
	#		job.p_job = -1
		return 

	def preempt(self, job1):
		job1.running=0
		job1.inqueue=1
		self.res_cores += job1.cores
		self.curr_jobs -= 1
		job1.chosen=0
		job1.est_time -= (self.global_time - job1.counter)
		job1.total_runtime += (self.global_time - job1.counter)
		job1.counter = self.global_time
		if job1.est_time <=0:
		#	j.wait_time = ((n.global_time - j.submit_time) - j.total_runtime)
			job1.completed=1
			job1.inqueue=0 	
		return

	def min_currjobs(self):
		min_currjob = job()
		minp=9999999999
		for j in self.jobs:
			if j.running ==1:
	#			print "RUNNING JOB"
				if j.chosen==0:
					if minp > j.p_job:
						min_currjob = j
						minp = j.p_job
						
		for j in self.jobs:
			if min_currjob.jobid == j.jobid:
				j.chosen=1
#		print "minjob details"+ str(min_currjob.jobid)
		return min_currjob						

	def push(self,nbor_node,push_job):
#		print "INSIDE PUSH"
		nbor_node.node.jobs.append(job(push_job.jobid, push_job.est_time, push_job.cpu_speed, push_job.memory, push_job.disk_size, push_job.wait_time, push_job.submit_time, push_job.p_job, push_job.cores, push_job.job_finishtime));
		nbor_node.node.curr_jobs+=1
		nbor_node.node.res_cores-=push_job.cores
		for j in nbor_node.node.jobs:
			if j.jobid== push_job.jobid:
				j.inqueue=0
				j.running=1
				j.counter = nbor_node.node.global_time
#				print "JOB: "+str(push_job.jobid)+"PUSHED from "+str(self.nodeID)+" to"+str(nbor_node.node.nodeID)
		for j in self.jobs:
			if j.jobid==push_job.jobid:
				j.inqueue=0
				j.completed=1
				j.running=0
				
	def pull(self, nbor_node, pull_job):
#		print "INSIDE PULL"
		self.jobs.append(job(pull_job.jobid, pull_job.est_time, pull_job.cpu_speed, pull_job.memory, pull_job.disk_size, pull_job.wait_time, pull_job.submit_time, pull_job.p_job, pull_job.cores, pull_job.job_finishtime));
		self.curr_jobs+=1
		self.res_cores-=pull_job.cores
		for j in self.jobs:
			if j.jobid==pull_job.jobid:
				j.inqueue=0
				j.running=1
				j.counter = self.global_time

		for j in nbor_node.node.jobs:
			if j.jobid==pull_job.jobid:
				j.inqueue=0
				j.completed=1
				j.running=0
#				print "JOB: "+str(pull_job.jobid)+"PULLED from "+str(nbor_node.node.nodeID)+" to"+str(self.nodeID)
				
	def scheduler(self):
		#the scheduler performs the following scheduling operation everytime it is invoked
		jobs_considered = []
		i=0
	#	j = job()
		for jid in self.jobs:
			if (jid.inqueue==1 or jid.running==1) and jid.completed==0:
				jid.chosen=0
				self.calculate_priority(jid)
		for jid in self.jobs:
			jid.chosen = 0
			if jid.running == 1:		#if the job is currently running in queue, check its status and perform necessary actions
				jid.total_runtime += (self.global_time - jid.counter)
				jid.est_time -= (self.global_time - jid.counter)
				jid.counter = self.global_time
		#			print "Est TIME of job:"+j.jobid+"is: "+str(j.est_time)
				if jid.est_time <= 0:	#if the job is completed, mark as complete and invoke the scheduler
					jid.running=0
					jid.inqueue=0
					jid.completed=1
				#	print "ottal runtime: "+str(jid.total_runtime)
					self.curr_jobs -= 1
					self.res_cores += jid.cores	#free up the cores
				else:
					self.calculate_priority(jid)
			elif jid.inqueue == 1:		# if the job is waiting in queue, update its waiting time
				jid.wait_time = ((self.global_time - jid.submit_time) - jid.total_runtime)
				self.calculate_priority(jid)
		#			print "Est TIME of job:"+j.jobid+"is: "+str(j.est_time)
			elif ((jid.submit_time < self.global_time) and (jid.inqueue==0 and jid.completed==0)): 	# if the job has arrived, push it in queue and update the wait time
				jid.inqueue = 1
				jid.wait_time = (self.global_time - jid.submit_time)
				self.calculate_priority(jid)
				 
		j=self.fetch_next_job()
		minjob = self.min_currjobs()
		#	print "MINJOB.JOBID:"+str(minjob.jobid)
		temp_cores = self.res_cores
		
		while ((j.p_job > minjob.p_job and minjob.jobid!=-1 and j.jobid!=-1) or (j.cores<=temp_cores and j.jobid!=-1)):
	#		print "cores available: "+str(temp_cores)+"Cores required: "+str(j.cores)
			if temp_cores >= j.cores:
				for j1 in jobs_considered:
					self.preempt(j1)	
				j.running = 1
				j.inqueue = 0
				j.chosen = 1
				j.wait_time = ((self.global_time - j.submit_time) - j.total_runtime)
				self.res_cores -= j.cores
				self.curr_jobs+=1
				j.counter = self.global_time
				j=self.fetch_next_job()
				temp_cores = self.res_cores
				minjob = self.min_currjobs()
				jobs_considered = []
			else:
				jobs_considered.append(minjob)
				temp_cores+=minjob.cores
				if minjob.cores>= j.cores:
					self.preempt(minjob)
					j.running = 1
					j.inqueue = 0
					j.chosen = 1
					for j1 in jobs_considered:
						j1.chosen=0
					j.wait_time = ((self.global_time - j.submit_time) - j.total_runtime)
					self.res_cores -= j.cores
					j.counter = self.global_time
					self.curr_jobs+=1
					j=self.fetch_next_job()
					temp_cores = self.res_cores
					jobs_considered = []
					minjob = self.min_currjobs()
				elif temp_cores >= j.cores:
					for j1 in jobs_considered:
						self.preempt(j1)
					jobs_considered=[]	
					j.running = 1
					j.inqueue = 0
					j.chosen = 1
					j.wait_time = ((self.global_time - j.submit_time) - j.total_runtime)
					j.counter = self.global_time
					self.res_cores -= j.cores
					self.curr_jobs+=1
					j=self.fetch_next_job()
					minjob = self.min_currjobs()
					temp_cores = self.res_cores
				else:					
					minjob = self.min_currjobs()
		self.backfilling()
	#	print "OUTSIDE WHILE"
		self.update_neighbors()
		#Find the node to push the job
		temp_pjob = 0
		flag=0
		flag_for_push=0
		dest_node=neighbor(node(),job(),job())
		if j.jobid!=-1:
			for nbor in self.nbors:
				if nbor.top_job.p_job < j.p_job:
					if dest_node.top_job.p_job > nbor.top_job.p_job or dest_node.top_job.p_job==0:
							if nbor.node.res_cores >= j.cores:
								dest_node=nbor
								flag=1
		if flag==1:
			self.push(dest_node,j)
		flag=0
		dest_node=neighbor(node(),job(),job())
		for nbor in self.nbors:
			if nbor.top_job.jobid != -1: 
				if nbor.top_job.p_job > j.p_job:
					if dest_node.top_job.p_job < nbor.top_job.p_job: 	
						if self.res_cores >= nbor.top_job.cores:
							dest_node=nbor
							flag=1
		if flag==1:
			self.pull(dest_node,dest_node.top_job)
		
		
		return
		
	def add_neighbors(self):
		nbor_list = file.readline()		#generate a random neighbor
		new_nbors = string.split(nbor_list)
		for nbor_nodeID in new_nbors:  
			if nbor_nodeID != str(self.nodeID):
				for new_node in nodeList:
			#	print "Comparing "+str(new_node.nodeID)+"and "+str(nbor_nodeID)
					if str(new_node.nodeID) == str(nbor_nodeID):
						temp_topjob = new_node.fetch_next_job()
						temp_minjob = new_node.min_currjobs()
						print "Neighbor "+str(new_node.nodeID)+"added"
						self.nbors.append(neighbor(new_node, temp_topjob, temp_minjob))
		return				 
		
	def update_neighbors(self):
		for nbor in self.nbors:
			for node1 in nodeList:
				if str(node1.nodeID) == str(nbor.node.nodeID):
					nbor.node = node1
					nbor.top_job = node1.fetch_next_job()
					#print "priority of updated neighbor job: "+str(nbor.top_job.p_job)
					nbor.min_job = node1.min_currjobs()
		return


	def jobs_completed(self):
		flag=0
		for j in self.jobs:
			if j.completed==0:
				flag=1
		if flag==1:
			return 0
		else:
			return 1
			
			

fp = open("1-IAT4S-CC.in", "r")
line = fp.readline()
buff = list()
#string, nodeID, jobid, cpu_speed, memory, disk_size, join_time, cores, global_time = 
buff = line.split(' ')
#print buff[0]
nodeList = []
jobList = []
mark=0
while buff[0] != "exit":
	mark=0
	if buff[0] == "join":
		nodeList.append(node(buff[1],buff[5], buff[6], buff[7], buff[9], buff[8], 0))
	elif buff[0] == "submitJob":
		for n in nodeList:
			if n.nodeID == buff[1]:
				if n.cores >= int(buff[9]):
					if len(n.jobs) < 8:
						n.jobs.append(job(buff[2], buff[3], buff[6], buff[7], buff[8], 0, buff[10], 0, buff[9], 0))
						mark=1
						break
		if mark==0:
			for n1 in nodeList:
				if int(n1.nodeID) != int(buff[1]):
					if n1.cores >= int(buff[9]):
						if len(n1.jobs) < 8:
							n1.jobs.append(job(buff[2], buff[3], buff[6], buff[7], buff[8], 0, buff[10], 0, buff[9], 0))
							break
						
		
				
	line = fp.readline()
	buff = line.split(' ')
#	print buff[0]
count=0
file=open("neighbors.dat")
for n in nodeList:
	print "NODEID: "+ str(n.nodeID)
	count+=1
	n.add_neighbors()
print "Count:"+ str(count)
file.close()
count = 0
jobs_over=0
while jobs_over==0:
	jobs_over=1
	for n in nodeList:
	#	print "\nstarted: "+str(n.nodeID)
		if n.jobs_completed()==0:
			count=0
			tot_jobs=0
			n.global_time += 150000
			for j in n.jobs:
				if j.completed==1:
					count+=1
				tot_jobs+=1
				
			n.scheduler()	
		#print "\nend of while loop"			
		count +=1
	for n1 in nodeList:
		if n1.jobs_completed()==0:
			jobs_over=0
			

count=0
wait_time=0
sorted_waittimes=list()
for n in nodeList:
	for j in n.jobs:
		if j.est_time<=0:
			wait_time += j.wait_time
			count+=1
			sorted_waittimes.append(j.wait_time)
print "Average wait time:"+ str(wait_time/count)
sorted_waittimes.sort()
median = sorted_waittimes.pop(2500) + sorted_waittimes.pop(2501)
median = median/2
print "\n Median waittime: "+str(median)
print "\n count: "+str(count)

#except:
#	print "Error:Unable to start"

 

