import game_state
import game_player
import oware
import os

SCALE_CAPTURE = 0.5
SCALE_DANGER = 0.5
FORCED_STOP_BONUS = 0.3
SCALE_CAPTURE_LESSEN = -1
SCALE_DANGER_LESSEN = -1
FORCED_STOP_BONUS_LESSEN = -1

class OwarePlayer(game_player.GamePlayer):
	# Make a note of our name (will be the module name)
	# and player ID (will be a valid player ID for an OwareState).
	def __init__(self, name, game_id):
		global SCALE_CAPTURE, SCALE_DANGER, SCALE_CAPTURE_LESSEN, SCALE_DANGER_LESSEN, \
		      FORCED_STOP_BONUS, FORCED_STOP_BONUS_LESSEN
		game_player.GamePlayer.__init__(self, name, game_id)
		self.myself = game_id
		self.opponent = game_id % 2 + 1
		SCALE_CAPTURE = 0 if SCALE_CAPTURE <=0 else SCALE_CAPTURE
		SCALE_DANGER = 0 if SCALE_DANGER <=0 else SCALE_DANGER
		FORCED_STOP_BONUS = 0 if FORCED_STOP_BONUS <=0 else FORCED_STOP_BONUS
		#Using the formula 1/(1+1/x) is to map x between (0,1)
		SCALE_CAPTURE_LESSEN = 0 if SCALE_CAPTURE == 0 else 1/(1 + 1/SCALE_CAPTURE)
		SCALE_DANGER_LESSEN = 0 if SCALE_DANGER == 0 else 1/(1 + 1/SCALE_DANGER)
		FORCED_STOP_BONUS_LESSEN = 0 if FORCED_STOP_BONUS == 0 else 1/(1 + 1/FORCED_STOP_BONUS)
	
	# EXAMPLE: Loads a file from the same directory this module is stored in
	#  and returns its contents.  Pattern any file operations you do in your
	#  player on this model.
	#
	# NB: Make a note of the working directory before you cd to the module
	#  directory, and restore it afterward!  The rest of the program may break
	#  otherwise.
	def load_file(self, fname):
		wd = os.getcwd()
		os.chdir("players/oware")
		fin = open(fname)
		contents = fin.read()
		fin.close()
		os.chdir(wd)
		return contents
	
	def get_dangerous_pits(self, state, player):
		pits_to_capture = 0
		
		#how many pits already taken?
		zero_pits_ori = 0
		for pit in range(6):
			if state.get_pit_count(player, pit) == 0:
				zero_pits_ori+=1
				
		moves = OwarePlayer.successor_moves(state)
		situations = []		
		for move in moves:
			temp_state = state.make_copy()
			temp_state.move(move)
			zero_pits = 0
			for pit in range(6):
				if state.get_pit_count(player, pit) == 0:
					zero_pits+=1
			situations.append(zero_pits)
			
		return 0 if len(moves) == 0 else max(situations) - zero_pits_ori
	
	#simple successor_moves
	@staticmethod
	def successor_moves(state):
		moves = []
		for i in range(6):
			move = oware.OwareMove(state.player, i)
			# first condition saves us looping over all other moves
			#  (in is_valid_move()) to find out if there are non-killing
			#  moves when not necessary
			if (state.get_pit_count(state.player, i) > 0 \
						and not state.kills_opponent(move)) \
					or state.is_valid_move(move):
				moves.append(move)
		return moves
	
	def evaluate(self, state):
		global SCALE_CAPTURE, SCALE_DANGER, SCALE_CAPTURE_LESSEN, SCALE_DANGER_LESSEN, \
		      FORCED_STOP_BONUS, FORCED_STOP_BONUS_LESSEN
		      
		score_diff = state.get_keep_count(self.myself) \
				          - state.get_keep_count(self.opponent)
		
		
#		get Minimum available occupations upon next time
		promising_degree = 0
		dangerous_degree = 7
		
		moves = OwarePlayer.successor_moves(state)
		for move in moves :
#			No matter what move the opponent takes, 
#			find the minimum possible profit
			min_pits_to_capture = 7
			temp_state_copy = state.make_copy()
			temp_state_copy.move(move)
			
			#Count the potential pits can be captured
			pits_to_capture = 0
			
			for move_opp in OwarePlayer.successor_moves(temp_state_copy):
				temp_opponet_state = temp_state_copy.make_copy()
				temp_opponet_state.move(move_opp)
								
				pits_to_capture = \
				    self.get_dangerous_pits(temp_opponet_state, self.opponent)
					
				if pits_to_capture <min_pits_to_capture:
					 min_pits_to_capture = pits_to_capture
		
			#assume I will take the most profitable move			  
			if promising_degree < min_pits_to_capture:
			  	promising_degree = min_pits_to_capture
			  	
			#---------------------------------------------------
			  	
			#on the other hand, if I take this move, how many pits
			#of mine will be taken? 
			current_danger = \
			 self.get_dangerous_pits(temp_state_copy, self.myself)
			
			#consider the minimum danger
			if dangerous_degree < current_danger:
				dangerous_degree = current_danger
		
		dangerous_degree = 0 if dangerous_degree == 7 else dangerous_degree
		
		#Now we amplify or lessen the favorableness
		multipler = 1 if score_diff >= 0 else -1
		value = abs(score_diff)
		if multipler != -1 :
			scale_lessen = SCALE_DANGER_LESSEN
			scale_amplify = SCALE_CAPTURE
			degree_lessen = dangerous_degree
			degree_amplify = promising_degree
		else:
			scale_lessen = SCALE_CAPTURE_LESSEN
			scale_amplify = SCALE_DANGER
			degree_lessen = promising_degree
			degree_amplify = dangerous_degree
		
		
		if len(moves) != 0 : #if I have a move to go
			lessen = value * scale_lessen * degree_lessen / 6
			amplify = value * scale_amplify * degree_amplify / 6
		else: 
			#the condition of forced stop
			if multipler == 1: 
				# If I have more score than opponent, then I win, very good
				amplify = value * FORCED_STOP_BONUS
				lessen = 0
			else:
				# If I have less score than opponent, then I lost, not very good
				amplify = 0
				lessen = value * FORCED_STOP_BONUS_LESSEN
		
		value += amplify - lessen
		value *= multipler
#		print('s_d:{0}, lessen: {2} amplify {3} value {1}' \
#			  .format(score_diff, value,  lessen, amplify))
		  	
		return value

	
	def minimax_move(self, state):
		
		# "successors" is a list of (player, state, move) tuples
		successors = state.successors()
		keys = []
		states = dict()
		
		#assume that it is sure that we have a next move
		for successor in successors:
			value = self.get_min(successor[1])
			keys.append(value)
			states.update({value: successor})
		
		# 
		successor = states.get(max(keys))
		return successor[2]
	
	def get_min(self, state):
		successors = state.successors()
		if successors == None or len(successors) == 0:
			return self.evaluate(state)
		
		values = []
		for successor in successors:
			values.append(self.get_max(successor[1]))
		
		return min(values)
		
	def get_max(self, state):
		successors = state.successors()
		if successors == None or len(successors) == 0:
			return self.evaluate(state)
		
		values = []
		for successor in successors:
			values.append(self.get_min(successor[1]))
		return max(values)
			
	# IMPLEMENT ME!
	def alpha_beta_move(self, state):		
		return self.minimax_move(state)
		
	# IMPLEMENT ME!
	def tournament_move(self, state):
		pass
