import game_state
import game_player
import oware
import os
import game

SCALE_CAPTURE = 0.5 # > 0
SCALE_DANGER = 0.5	# > 0
FORCED_STOP_BONUS = 0.3	# > 0
SCALE_CAPTURE_LESSEN = 0.5  # (0, 1)
SCALE_DANGER_LESSEN = 0.5	# (0, 1)
FORCED_STOP_BONUS_LESSEN = -1

_step = 0
_evaluation_id = 0
_debug = False
_hacked = False
_tournament_init = False
_traning_mode = False
mT = None

class OwarePlayer(game_player.GamePlayer):
	# Make a note of our name (will be the module name)
	# and player ID (will be a valid player ID for an OwareState).
	def __init__(self, name, game_id):
		global SCALE_CAPTURE, SCALE_DANGER, SCALE_CAPTURE_LESSEN, SCALE_DANGER_LESSEN, \
		      FORCED_STOP_BONUS, FORCED_STOP_BONUS_LESSEN
		game_player.GamePlayer.__init__(self, name, game_id)
		self.myself = game_id
		self.opponent = game_id % 2 + 1
	
	# EXAMPLE: Loads a file from the same directory this module is stored in
	#  and returns its contents.  Pattern any file operations you do in your
	#  player on this model.
	#
	# NB: Make a note of the working directory before you cd to the module
	#  directory, and restore it afterward!  The rest of the program may break
	#  otherwise.
	def load_file(self, fname):
		wd = os.getcwd()
		os.chdir("players/oware")
		fin = open(fname)
		contents = fin.read()
		fin.close()
		os.chdir(wd)
		return contents
		
	def get_opponent_dangerous_pits(self, state):
		pits_to_capture = 0
		
		opponent = state.player % 2 + 1
		
		#how many pits already taken?
		zero_pits_ori = 0
		for pit in range(6):
			if state.get_pit_count(opponent, pit) == 0:
				zero_pits_ori+=1
		
		moves = OwarePlayer.successor_moves(state)
		
		if len(moves) == 0: # so it is sure no pits will be taken any more
			return 0
		
		situations = []
		for move in moves :
			tmp_state = state.make_copy()
			tmp_state.move(move)
			
			zero_pits = 0
			for pit in range(6):					
				if tmp_state.get_pit_count(opponent, pit) == 0:
					zero_pits+=1
					
			situations.append(zero_pits)
		
		pits_taken = max(situations) - zero_pits_ori
		
		if pits_taken < 0:
			pits_taken = 0
		
		return pits_taken
	
	#simple successor_moves
	@staticmethod
	def successor_moves(state):
		moves = []
		for i in range(6):
			move = oware.OwareMove(state.player, i)
			# first condition saves us looping over all other moves
			#  (in is_valid_move()) to find out if there are non-killing
			#  moves when not necessary
			if (state.get_pit_count(state.player, i) > 0 \
						and not state.kills_opponent(move)) \
					or state.is_valid_move(move):
				moves.append(move)
		return moves
	
	def evaluate(self, state):
		global SCALE_CAPTURE, SCALE_DANGER, SCALE_CAPTURE_LESSEN, SCALE_DANGER_LESSEN, \
		      FORCED_STOP_BONUS, FORCED_STOP_BONUS_LESSEN\
		      ,_evaluation_id, _debug
		score_diff = state.get_keep_count(self.myself) \
				          - state.get_keep_count(self.opponent)
		
#		SCALE_CAPTURE = SCALE_DANGER = SCALE_CAPTURE_LESSEN = SCALE_DANGER_LESSEN = .7
		promising_degree = 0
		dangerous_degree = 0

		dangerous_pits = self.get_opponent_dangerous_pits(state)
		
		value = abs(score_diff)
		if state.player == self.myself: # I am about to take opponent's pits
			promising_degree = dangerous_pits
			if score_diff > 0:
				value *= 1 + SCALE_CAPTURE * promising_degree / 6.0
			else:
				value *= 1 - SCALE_CAPTURE_LESSEN * promising_degree / 6.0
				value = -value
		else: # Opponent's about to take my pits
			dangerous_degree = dangerous_pits
			if score_diff > 0:
				value *= 1 - SCALE_DANGER_LESSEN * dangerous_pits / 6.0
			else:
				value *= 1 + SCALE_DANGER * dangerous_pits / 6.0
				value = -value
		_evaluation_id += 1		
		
		if _debug:
			print('[{5}{6:>6}]s_d:{0}, taken:{1} d: {3} p {4} value {2}' \
				  .format(score_diff, dangerous_pits, value, dangerous_degree, promising_degree,\
						'I' if state.player == self.myself else 'O'\
						, _evaluation_id))
		
		return (value, _evaluation_id)

	
	def minimax_move(self, state):
		global _step, _debug
		
		return self.tournament_move(state)
	
		r = self.get_max(state, 5, True)
		successor = r[2]
		
		if _debug:
			_step+=1 
			print('[Step:{1}] Choose Node:[{0:6}]'.format(r[1], _step))
		
		return successor[2]
	
	def get_min(self, state, h):
		successors = state.successors()
		if successors == None or len(successors) == 0 or h == 0:
			return self.evaluate(state)
		
		h-=1
		worst_id = None
		worst_successor = None
		worst_value = None
		
		for succesor in successors:
			tmp_state = succesor[1].make_copy()
			r = self.get_max(tmp_state, h)
			if worst_successor == None or r[0] < worst_value:
				worst_value = r[0]
				worst_id = r[1]
				worst_successor = succesor
		
		return (worst_value, worst_id)
		
	def get_max(self, state, h, returnNode = False):
		successors = state.successors()
		if successors == None or len(successors) == 0 or h == 0:
			return self.evaluate(state)
		
		h-=1
		best_id = None
		best_successor = None
		best_value = None
		_debug = []
		
		for successor in successors:
			tmp_state = successor[1].make_copy()
			r = self.get_min(tmp_state, h)
			if best_successor == None or best_value < r[0]:
				best_value = r[0]
				best_id = r[1]
				best_successor = successor
			_debug.append((r[0], r[1], successor))
		
		if 	returnNode == True:
			return (best_value, best_id, best_successor)
		else:
			return (best_value, best_id)
		
			
	# IMPLEMENT ME!
	def alpha_beta_move(self, state):
		global _step
			
		r = self.ab_max_move(state, None, 5, True)
		best_successor = r[2]
		
		if _debug:
			_step+=1 
			print('[Step:{1}] Choose Node:[{0:6}]'.format(r[1], _step))
		
		return best_successor[2]
	
	def ab_min_move(self, state, parent, h):
		successors = state.successors()
		if successors == None or len(successors) == 0 or h == 0:
			return self.evaluate(state)
		
		h -= 1
		
		current_node_value = None
		cuurent_node_id = None
		
		for successor in successors:
			tmp_state = successor[1].make_copy()
			r = self.ab_max_move(tmp_state, current_node_value, h)
			value = r[0]
			#update and continue
			if current_node_value == None or current_node_value > value:
				current_node_value = value
				cuurent_node_id = r[1]
			
			#prune	
			if parent != None and current_node_value < parent:
				break
						
		return (current_node_value, cuurent_node_id)
	
	def ab_max_move(self, state, parent, h, returnNode = False):
		successors = state.successors()
		if successors == None or len(successors) == 0 or h == 0:
			return self.evaluate(state)
		
		h -= 1
		
		current_node_value = None
		current_best_node = None
		current_best_id = None
		_debug = []
		
		for successor in successors:
			tmp_state = successor[1].make_copy()
			r = self.ab_min_move(tmp_state, current_node_value, h)
			value = r[0]
			#update and continue
			if current_node_value == None or current_node_value < value:
				current_node_value = value
				current_best_node = successor
				current_best_id = r[1]
			_debug.append((r[0], r[1], successor))
				
			#prune	
			if parent != None and current_node_value > parent:
				break
			
		if returnNode == False:				
			return (current_node_value, current_best_id) 
		return (current_node_value, current_best_id, current_best_node)
	
	# IMPLEMENT ME!
	def tournament_move_old(self, state):
		global SCALE_CAPTURE, SCALE_DANGER, SCALE_CAPTURE_LESSEN, SCALE_DANGER_LESSEN
		
		SCALE_CAPTURE = 0.2
		SCALE_DANGER = 0.8
		SCALE_CAPTURE_LESSEN = 0.2
		SCALE_DANGER_LESSEN = 0.8
		
		#_hack
		if state.controller.expansions < 10000:
			state.controller.expansions = 10000
			
		return self.alpha_beta_move(state)
	
	def tournament_move(self, state):
		global _tournament_init, mT, _traning_mode
		
		
		if _tournament_init == False:
			wd = os.getcwd()
			mT = game.load_module("anch_tournament", \
				"./players/oware/anch", wd)
			_tournament_init = True
			mT._training_mode = _traning_mode
			
		move = mT.AnchTournamentPlayer(state, self.myself).tournamentMove()
		
		if move == None:
			return self.alpha_beta_move(state)
		
		return move
		
	
