import random, math, time
import sys, os
import game_interface

if os.path.isfile("./player1/NeuralNetwork.py"): os.chdir("./player1/")
elif os.path.isfile("./player2/NeuralNetwork.py"): os.chdir("./player2/")
import NeuralNetwork, Decision, Classification
os.chdir("../")

class MoveGenerator():
	def __init__(self):
		#places we've been
		self.steps = []
		self.spaces = {}
		
		#the game parameters
		self.parameters = {}
		#normalized dictionary representation of the game parameters for use in the neural network
		self.physics = {}
		#basis for comparison of game parameters
		self.parametersDefault = {"plantNutrition":20.0, "plantPoison":10.0, "costObservation":1.0, "costMovement":1.0, "lifeStarting":100.0}
		#since we have to discover the game parameters for ourselves, we keep track of what's known
		self.setParameters(self.parametersDefault)
		
		#short-term memory is what we've seen of the last plant we visited
		#self.imagesMemoryShort = None
		
		#see the file for how I'm saving memories---very simple format
		self.pathMemories = "./memories" if os.path.isfile("./memories") else \
			("./player1/memories" if os.path.isfile("./player1/memories") else \
			"./player2/memories")
		self.dataSource = Classification.DataSource(self.pathMemories)
			
		self.pathStaticClassifier = "./superclassifier" if os.path.isfile("./superclassifier") else \
		("./player1/superclassifier" if os.path.isfile("./player1/superclassifier") else \
		("./player2/superclassifier" if os.path.isfile("./player2/superclassifier") else None))
		
		if self.pathStaticClassifier == None:
			print "NO CLASSIFIER FOUND... BUILDING FROM MEMORIES"
			
			testingSetA = set(random.sample(self.dataSource.examples, 100))
			testingSetB = set(random.sample(self.dataSource.examples - testingSetA, 100))
			testingSetC = set(random.sample(self.dataSource.examples - (testingSetA | testingSetB), 100))
			trainingSet = self.dataSource.examples - (testingSetA | testingSetB | testingSetC)
			
			self.classifier = Classification.BoostedCommittee(set([ \
			Classification.HMMClassifier(2, 2), \
			Classification.BoostedCommittee(set([Classification.NaiveBayesClassifier() for i in range(6)])), \
			Classification.BoostedCommittee(set([Classification.NearestCentroidClassifier() for i in range(6)])), \
			Classification.BoostedCommittee(set([Classification.ID3Classifier(12) for i in range(6)])), \
			]))
			
			#recursive --- will use the first testing set to boost itself and the second testing set to boost the subcommittees
			self.classifier.boost(trainingSet, [testingSetA, testingSetB])
			
			print "CLASSIFIER PERFORMANCE:", self.classifier.test(testingSetC)
			
			Classification.writeClassifierToFile(self.classifier, "./superclassifier")
		else: self.classifier = Classification.getClassifierFromFile(self.pathStaticClassifier)
		
		self.brain = NeuralNetwork.Brain()
		self.pathBrain = "./brain" if os.path.isfile("./brain") else \
		("./player1/brain" if os.path.isfile("./player1/brain") else \
		"./player2/brain")
		self.brain.path = self.pathBrain
		if os.path.isfile(self.brain.path): self.brain.load()
		
		self.lifeEndLastTurn = None
		
		#the decisions we've made since we last ate a plant---
		#when we know more about the plant, we praise() or scold(),
		#based on this set of recent decisions
		self.decisionsRecent = set()
	
	def setParameters(self, parametersNew):
		#safe setter for parameters to make sure that we don't save nonsense
		#this guarantee is important, since we don't decide
		#the parameters they send us
		for parameter in parametersNew:
			assert parameter in self.parametersDefault
			if type(parametersNew[parameter]) != type(float()) or not parametersNew[parameter] > 0.0: print parameter, parametersNew[parameter]
			assert type(parametersNew[parameter]) == type(float())
			assert parametersNew[parameter] > 0.0
			self.physics[parameter] = parametersNew[parameter] / (parametersNew[parameter] + self.parametersDefault[parameter])
			self.parameters[parameter] = parametersNew[parameter]
	
	def getMoveFromView(self, view):
		#need to keep track of time so that we don't delay the game
		turnTimeStart = time.time()
		
		if len(self.steps) > 0 and self.steps[-1] == (view.GetXPos(), view.GetYPos()): self.scold()
		
		x, y = str(view.GetXPos()), str(view.GetYPos())
		
		if x not in self.spaces: self.spaces[x] = {}
		if y not in self.spaces[x]: self.spaces[x][y] = set()
		
		#after action last turn
		#we use these variables as inputs to the NN
		wasPoisoned, wasNourished = 0.0, 0.0
		
		if self.lifeEndLastTurn != None:
			if view.GetLife() < self.lifeEndLastTurn - self.parameters["costMovement"]: #wasPoisoned!!
				assert view.GetLife() + self.parameters["costMovement"] + self.parameters["plantPoison"] == self.lifeEndLastTurn
				wasPoisoned = 1.0
				#we set self.imagesMemoryShort to a set() when we see a plant,
				#so make sure that self.imagesMemoryShort != None!!!
				#assert self.imagesMemoryShort != None
				#self.imagesMemoryLong["P"].extend(self.imagesMemoryShort)
				#self.imagesMemoryShort = None
				
				self.scold()
				self.decisionsRecent = set()

			elif view.GetLife() > self.lifeEndLastTurn - self.parameters["costMovement"]: #wasNourished!!
				assert view.GetLife() + self.parameters["costMovement"] - self.parameters["plantNutrition"] == self.lifeEndLastTurn
				wasNourished = 1.0
				#we set self.imagesMemoryShort to a set() when we see a plant,
				#so make sure that self.imagesMemoryShort != None!!!
				#assert self.imagesMemoryShort != None
				#self.imagesMemoryLong["N"].extend(self.imagesMemoryShort)
				#self.imagesMemoryShort = None
				
				self.praise()				
				self.decisionsRecent = set()
		
		self.lifeEndLastTurn = view.GetLife()
		
		#prepare dictionary of world-context for decisions through neural network
		context = self.physics.copy()
		
		def evaluateLife():
			if self.lifeEndLastTurn < 0.0: return 0.0
			return float(self.lifeEndLastTurn)/float((self.parameters["lifeStarting"] + self.lifeEndLastTurn))
		
		context["lifeCurrent"] = evaluateLife()
		
		#default: don't eat the plant
		eatsPlant = False
		
		#apparently, this is how we find out if there's a plant here
		if view.GetPlantInfo() == game_interface.STATUS_UNKNOWN_PLANT:
			pN, qN = 0.0, 0.0
			#reset short-term memory
			#self.imagesMemoryShort = set()
			
			while time.time() - turnTimeStart < 0.9 \
			and self.lifeEndLastTurn > self.parameters["costObservation"]:
				#use our classifier to evaluate the plant
				classifications = [self.classifier.classify(Classification.Example([f for f in observation])) for observation in self.spaces[x][y]]
				
				qN = (sum([1.0 if label == "N" else 0.0 for label, probability in classifications]) / float(len(self.spaces[x][y]))) \
				if len(self.spaces[x][y]) > 0 else self.dataSource.priorsByLabel["N"]
				
				pN = (sum([probability if label == "N" else ((1.0 - probability) if label == "P" else 0.0) for label, probability in classifications]) / float(len(self.spaces[x][y]))) \
				if len(self.spaces[x][y]) > 0 else self.dataSource.priorsByLabel["N"]
				
				whetherGetsImage = Decision.Validation({"lifeCurrent":context["lifeCurrent"], "evaluation":pN, "stability":qN, "inverseObservations":math.exp(-float(len(self.spaces[x][y]))), "costObservation":context["costObservation"], "time":0.5/(0.5+time.time() - turnTimeStart)}, "getsImage")
				whetherGetsImage.through(self.brain)
				self.decisionsRecent.add(whetherGetsImage)
				
				if not whetherGetsImage.selection(): break
				
				#remember what we observe
				newObservation = view.GetImage()
				self.spaces[x][y].add(newObservation)
				#self.imagesMemoryShort.add(newObservation)
				
				#keep track of how much life we spend on observation
				self.lifeEndLastTurn -= self.parameters["costObservation"]
				context["lifeCurrent"] = evaluateLife()
			
			#decide whether to eat the plant, given what we think about it
			whetherEatsPlant = Decision.Validation({"lifeCurrent":context["lifeCurrent"], "evaluation":pN, "stability":qN, "wasPoisoned":wasPoisoned, "wasNourished":wasNourished, "plantPoison":context["plantPoison"], "plantNutrition":context["plantNutrition"]}, "eatsPlant")
			whetherEatsPlant.through(self.brain)
			self.decisionsRecent.add(whetherEatsPlant)
			
			eatsPlant = (True) if (whetherEatsPlant.selection()) else (False)
				
		#remember where we are now for future reference
		directions = [game_interface.UP, game_interface.LEFT, game_interface.DOWN, game_interface.RIGHT]
		compass = ["n","w","s","e"]
		exploration = {}
		for c in compass: exploration[c] = 0.0
		
		#tells NN whether we're going back exactly whence we came
		backtrack = {}
		
		#tells NN whether we're continuing in the same direction
		momentum = {}
		
		#tells NN whether we're turning clockwise, just in case periodicity of movement is helpful
		clockwise = {}
		
		#tells NN whether we've been to exactly that spot before
		history = {}
		
		neighbors = {}
		neighbors["n"], neighbors["w"], neighbors["s"], neighbors["e"] = \
		(view.GetXPos(), view.GetYPos() + 1), (view.GetXPos() - 1, view.GetYPos()), \
		(view.GetXPos(), view.GetYPos() - 1), (view.GetXPos() + 1, view.GetYPos())
		
		for c in compass:
			backtrack[c] = 0.0
			momentum[c] = 0.0
			clockwise[c] = 0.0
			history[c] = 1.0 if neighbors[c] in self.steps else 0.0
		
		if len(self.steps) > 0:
			assert abs(self.steps[-1][0] - view.GetXPos())\
			+ abs(self.steps[-1][1] - view.GetYPos()) == 1
			
			if self.steps[-1][0] > view.GetXPos():
				backtrack["e"] = 1.0
				momentum["w"] = 1.0
				clockwise["n"] = 1.0
			elif self.steps[-1][0] < view.GetXPos():
				backtrack["w"] = 1.0
				momentum["e"] = 1.0
				clockwise["s"] = 1.0
			
			if self.steps[-1][1] > view.GetYPos():
				backtrack["n"] = 1.0
				momentum["s"] = 1.0
				clockwise["w"] = 1.0
			elif self.steps[-1][1] < view.GetYPos():
				backtrack["s"] = 1.0
				momentum["n"] = 1.0
				clockwise["e"] = 1.0
		
			for step in self.steps:
				if step[0] > view.GetXPos(): exploration["e"] += 1.0
				elif step[0] < view.GetXPos(): exploration["w"] += 1.0
				
				if step[1] > view.GetYPos(): exploration["n"] += 1.0
				elif step[1] < view.GetYPos(): exploration["s"] += 1.0
			
			for c in compass: exploration[c] /= float(len(self.steps))
		
		#tells NN stuff about here, in case it might be useful
		here = {
		"wasPoisoned":wasPoisoned,\
		"wasNourished":wasNourished,\
		"foundNewPlant":1.0 if view.GetPlantInfo() == game_interface.STATUS_UNKNOWN_PLANT else 0.0,\
		"foundOldPlant":1.0 if view.GetPlantInfo() in [game_interface.STATUS_POISONOUS_PLANT, game_interface.STATUS_NUTRITIOUS_PLANT] else 0.0,\
		"willEatPlant":1.0 if eatsPlant else 0.0
		}
		
		whitherMoves = Decision.Enumeration(here, "goes")
		for c in compass:
			there = {"clockwise":clockwise[c], "backtrack":backtrack[c], "momentum":momentum[c], "exploration":exploration[c], "knowledge":history[c]}
			whitherMoves.option(there, "direction")
		whitherMoves.through(self.brain)
		self.decisionsRecent.add(whitherMoves)
		move = whitherMoves.options.index(whitherMoves.selection())
		
		assert move in range(4)
		assert type(eatsPlant) == type(True)
		
		self.steps.append((view.GetXPos(), view.GetYPos()))
		
		return (move, eatsPlant)
	
	#UPDATE AND SAVE THE AGENT
	def scold(self):
		#teach the NN not to repeat those decisions!
		for decision in self.decisionsRecent:
			if isinstance(decision, Decision.Enumeration):
				for option, representation in zip(decision.options, decision.toDicts()):
					#never repeat that mistake!! (gets closer to inverting the margin)
					target = 0.1 if option == decision.selection() else (0.9 * option.activation[0]) + 0.1
					output = option.activation[0]
					delta = [target - output]
					self.brain.feedBackward(delta, decision.outputsType, NeuralNetwork.Stimulus.fromDict(representation))
			elif isinstance(decision, Decision.Validation):
				assert len(decision.activations) == 1
				#never repeat that mistake!! (inverts the margin)
				target = (0.1) if (decision.selection()) else (0.9)
				output = decision.activations[0]
				delta = [target - output]
				self.brain.feedBackward(delta, decision.outputsType, NeuralNetwork.Stimulus.fromDict(decision.context))
			else:
				assert decision.outputsType == "goes"
				selection = decision.activations.index(max(decision.activations))
				target = [decision.activations[a] if a != selection else 0.1 for a in range(len(decision.activations))]
				output = decision.activations
				deltas = [t - o for t, o in zip(target, output)]
				self.brain.feedBackward(deltas, decision.outputsType, NeuralNetwork.Stimulus.fromDict(decision.context))
		
		self.brain.update(0.01)
		#commit to disk
		self.brain.save()
	
	def praise(self):
		#teach the NN to repeat those decisions!
		for decision in self.decisionsRecent:
			if isinstance(decision, Decision.Enumeration):
				for option, representation in zip(decision.options, decision.toDicts()):
					#boosts decision-margin
					target = 0.9 if (option == decision.selection()) else 0.1
					output = option.activation[0]
					delta = [target - output]
					self.brain.feedBackward(delta, decision.outputsType, NeuralNetwork.Stimulus.fromDict(representation))
			elif isinstance(decision, Decision.Validation):
				assert len(decision.activations) == 1
				#boosts decision-margin
				target = 0.9 if (decision.selection()) else 0.1
				output = decision.activations[0]
				delta = [target - output]
				self.brain.feedBackward(delta, decision.outputsType, NeuralNetwork.Stimulus.fromDict(decision.context))
			else:
				assert decision.outputsType == "goes"
				selection = decision.activations.index(max(decision.activations))
				target = [0.9 if a == selection else 0.1 for a in range(len(decision.activations))]
				output = decision.activations
				deltas = [t - o for t, o in zip(target, output)]
				self.brain.feedBackward(deltas, decision.outputsType, NeuralNetwork.Stimulus.fromDict(decision.context))
		
		#see above, and contrast the learning rates
		self.brain.update(0.01)
		#commit to disk
		self.brain.save()
	
	def __del__(self): pass
		#won't necessarily get called... but helpful if it does
		#open(self.pathMemories, 'w').write(str(self.dataSource).strip())

		
move_generator = MoveGenerator()
def get_move(view):
	'''Returns a (move, bool) pair which specifies which move to take and whether
	or not the agent should try and eat the plant in the current square.	view is
	an object whose interface is defined in python_game.h.	In particular, you can
	ask the view for observations of the image at the current location.	Each
	observation comes with an observation cost.
	'''
	return move_generator.getMoveFromView(view)

def init_point_settings(plantNutrition, plantPoison, costObservation, lifeStarting, costMovement):
  '''Called before any moves are made.  Allows you to make customizations based
  on the specific scoring parameters in the game.'''
  move_generator.setParameters({"plantNutrition":float(abs(plantNutrition)), "plantPoison":float(abs(plantPoison)), "costObservation":float(costObservation), "lifeStarting":float(lifeStarting), "costMovement":float(costMovement)})