#pragma once

#include <climits>
#include <cmath>
#include "GameStateEvaluator.h"

/*	MINIMAX CLASS

	Given a GameStateEvaluator that judges all GameStates from a single perspective,
	and a beginning GameState,
	This algorithm will navigate the tree of possibilities going forward from the current board.

	It is responsible for finding the maximum score for the GameStateEvaluator on the assumption
	that every opponent (using the Evaluator's criteria) will make decisions to minimize
	the same score.

	This is not a Singleton class.  Create one Minimax per AI player,
	preferably when the player is, itself, instantiated.

	Restrictions:
		GameStateEvaluator->evaluate() must return within [INT_MIN+1, INT_MAX-1].
		evaluate() is assumed to be deterministic.
*/
class Minimax {
public:
	/*	Non-default constructor.
		Establishes who this algorithm is going to be thinking for.
		@param whoseTurnItIs someone with an evaluate(GameState*) method
	*/
					Minimax(
						GameStateEvaluator*	whoseTurnItIs
					) : myPlayer(whoseTurnItIs)
					{
					} // end constructor

	/*	Entry point of the Minimax algorithm.
		@param startingState the current turn, which is prompting a decision of us
	*/
	GameState*		getBestAction(
						GameState*	startingState
					) {
						depthLeft =			MAX_DEPTH;
						candidateAction =	startingState;

						// The initial values of alpha and beta were chosen to be "one off" extrema,
						// to overcome a limitation of twos-complement integer representation:
						// It seems like negative, negative-infinity is also negative-infinity in C++.
						// This caused a bug in the (scoreCandidate >= beta) short-circuit
						// mechanism of the getBestScore() algorithm, whereby the first branch
						// of any 'opponent' node would be the only branch visited.
						auto result = getBestScore(INT_MIN + 1, INT_MAX - 1, true);
						return candidateAction;
					} // end method

private:
	static const
	unsigned int		MAX_DEPTH = 3;
	ExpectedValue		alpha;
	ExpectedValue		beta;
	GameState*			candidateAction;
	unsigned short		depthLeft;
	GameStateEvaluator*	myPlayer;

				/*	Recursive engine of the Minimax algorithm.
					Determines the best score that can result from any of the branching actions.
					Has implied parameters this->candidateAction and this->depthLeft.
						candidateAction is the deliberated state from which actions may be selected.
						depthLeft is a countdown-limiter on the levels of recursion.
					@param alpha				a lower bound for the return value
					@param beta					an upper bound for the return value
					@param positivePerspective	true if and only if this decision node belongs to our AI
												Said to be a positive perspective since the state evaluation
												does not become negated when this parameter is true.
					This method also has an implied return value in this->candidateAction.
						candidateAction will hold the 'path to be taken' when the top level
						of recursion resolves.
					@return the best score that can be expected from submitting candidateAction
				*/
	ExpectedValue	getBestScore(
						ExpectedValue	alpha,
						ExpectedValue	beta,
						bool			positivePerspective
					) {
						ExpectedValue		bestScore = INT_MIN;
						GameState*			bestAction;
						StateList::iterator	end;
						StateList::iterator	i;
						{
							// Leaves, in the decision tree, are the only nodes consulted for their values,
							// although branches at the limit of recursion are considered leaves for this purpose.
							// Non-leaf nodes (branches!) are polled for their forward, adjacent states,
							// which will be recursed.
							// This code block equates "the state of having no available actions"
							// with "the state of being a leaf", so even if the GameState has an isGameOver()
							// method, it's redundant to this assumption.
							StateList& possibleActions =	candidateAction->listAvailableActions();
							i =								possibleActions.begin();
							end =							possibleActions.end();
							if (i == end || !depthLeft)
								return (positivePerspective
									? myPlayer->evaluate(candidateAction)
									: -myPlayer->evaluate(candidateAction)
								);

						}

						--depthLeft; // We're fluxuating the depth counter as an alternative to +1param.  Worth it?

						do {
							candidateAction = *i;

							// In 'classic' NegaMax, the next node's evaluations are always the diametric opposite
							// of this node's evaluations - but my implementation asserts that the perspective
							// only changes when moving from opponents to 'you' and from 'you' to opponents,
							// in a 3+ player environment.
							bool newPositivePerspective = (candidateAction->getActivePlayer() == myPlayer);
							auto scoreCandidate = (newPositivePerspective == positivePerspective)
								? getBestScore(alpha, beta, newPositivePerspective)
								: -getBestScore(-beta, -alpha, newPositivePerspective);

							if (scoreCandidate > bestScore)
							{
								bestScore = scoreCandidate;
								bestAction = *i;
							} // end if: found and recorded a better action

							if (scoreCandidate >= beta)
								break; // This level is completed early if the bounds 'cross each other'.
							else if (scoreCandidate > alpha)
								alpha = scoreCandidate;

						} while (end != ++i); // end loop: evaluation of the next level of states

						++depthLeft; // We're fluxuating the depth counter as an alternative to +1param.  Worth it?

						candidateAction = bestAction; // This information is currently discarded for all but the root node.
						return bestScore;
					} // end method
}; // end class
