package miniMax;

import java.util.Iterator;
import java.util.LinkedList;
import java.util.Vector;
import domain.Agent;
import domain.AgentSmart;
import domain.Board;
import domain.Debug;
import domain.Flag;
import domain.SquareIce;
import domain.Game.action;

public class MiniMax {

	private Agent	_smart;
	private Agent	_human;

	public MiniMax(Agent smart, Agent human) {
		this._smart = smart;
		this._human = human;
	}

	public LinkedList<action> runMiniMaxSearch(Board board, Vector<Agent> agents, Vector<Flag> flags, int maxSteps, boolean withCutOff) {
		LinkedList<action> ans = new LinkedList<action>();
		double alpha 		= Double.NEGATIVE_INFINITY;		// -Infinity
		double beta  		= Double.POSITIVE_INFINITY;		// +Infinity
		Agent agentMax		= this._smart;
		Agent agentMin		= this._human;

		StateNode root = new StateNode(board, agents, flags, ans, agentMax, agentMin);
		// root now has clones of board, agents & flags.

		AlgorithmResult res = alphaBeta(root.getAgentGood(), root, alpha, beta, maxSteps, withCutOff);

		ans.addLast(res.get_act());

		return ans;
	}

	private AlgorithmResult alphaBeta(Agent agentInTurn, StateNode node, double alpha, double beta, int counter, boolean withCutOff) {
		if (endCondition(node, counter)){
			return new AlgorithmResult(node.getValue(), node.getActionThatGotMeHere());
		}
		else{
			Vector<StateNode> children = expand(agentInTurn, node, false);

			StateNode chosenNode = children.firstElement();
			if (isGoodAgentTurn(agentInTurn)){
				for (Iterator<StateNode> iter = children.iterator(); iter.hasNext();) {
					StateNode child = (StateNode) iter.next();
					Agent min = child.getAgentBad();
					AlgorithmResult res = alphaBeta(min, child, alpha, beta, counter-1, withCutOff);
					double score = res.get_value();
					if (score > alpha){
						alpha 		= score;		// we have found a better best move.
						chosenNode	= child;		// save the node that this move causes you to reach.
					}

					if (withCutOff){
						if (alpha >= beta){
							AlgorithmResult tmp = new AlgorithmResult(alpha, child.getActionThatGotMeHere());	// cut off
							Debug.println("CUT OFF");
							printResult(res, agentInTurn, node);
							return tmp;
						}
					}
				}
				AlgorithmResult res = new AlgorithmResult(alpha, chosenNode.getActionThatGotMeHere());	// this is our best move
				printResult(res, agentInTurn, node);
				return res;
			}
			else if (isBadAgentTurn(agentInTurn)){
				//counter--;
				for (Iterator<StateNode> iter = children.iterator(); iter.hasNext();) {
					StateNode child = (StateNode) iter.next();
					Agent max = child.getAgentGood();
					AlgorithmResult res = alphaBeta(max, child, alpha, beta, counter-1, withCutOff);
					double score = res.get_value();
					if (score < beta){
						beta		= score;		// opponent has found a better worse move
						chosenNode	= child;		// save the node that this move causes you to reach.
					}

					if (withCutOff){
						if (alpha >= beta){
							AlgorithmResult tmp = new AlgorithmResult(beta, child.getActionThatGotMeHere());		// cut off
							Debug.println("CUT OFF");
							printResult(res, agentInTurn, node);
							return tmp;
						}
					}
				}

				AlgorithmResult res = new AlgorithmResult(beta, chosenNode.getActionThatGotMeHere());				// this is the opponent's best move
				printResult(res, agentInTurn, node);
				return res;
			}
			else{	// No one's turn?!?
				System.err.println(getClass().getName()+": Not supposed to get here...");
				return null;
			}
		}
	}

	private void printResult(AlgorithmResult res, Agent agentInTurn, StateNode node) {
		Debug.println("Result returned to "+ node.getPath() + ": " + agentInTurn.getName() + " "+ res);
	}
	
	private void printResult(AlgorithmResultMaxiMax res, Agent agentInTurn, StateNode node) {
		Debug.println("Result returned to "+ node.getPath() + ": " + agentInTurn.getName() + " "+ res);
	}

	private Vector<StateNode> expand(Agent agentInTurn, StateNode node, boolean ignoreIce) {
		Debug.println("\n-------------------------------------\nSimulate all actions from state "+node.getPath()+":\n-------------------------------------");
		//Extract:
		StateNode after_up      = node.simulateNode(agentInTurn, action.UP, ignoreIce);
		StateNode after_right   = node.simulateNode(agentInTurn, action.RIGHT, ignoreIce);
		StateNode after_down    = node.simulateNode(agentInTurn, action.DOWN, ignoreIce);
		StateNode after_left    = node.simulateNode(agentInTurn, action.LEFT, ignoreIce);
		StateNode after_shoot   = node.simulateNode(agentInTurn, action.SHOOT, ignoreIce);
		Debug.println("Simulatation finished.\n-------------------------------------\n");

		Debug.println(after_up + "\n" + after_right + "\n" + after_down + "\n" + after_left + "\n" + after_shoot + "\n");

		Vector<StateNode> children = new Vector<StateNode>();
		children.addElement(after_up);
		children.addElement(after_right);
		children.addElement(after_down);
		children.addElement(after_left);
		children.addElement(after_shoot);

		return children;
	}

	public LinkedList<action> runMaxiMaxSearch(Board board, Vector<Agent> agents, Vector<Flag> flags, int maxSteps) {
		LinkedList<action> ans = new LinkedList<action>();
		Agent agentMax1	= this._smart;
		Agent agentMax2	= this._human;

		StateNode root = new StateNode(board, agents, flags, ans, agentMax1, agentMax2);
		// root now has clones of board, agents & flags.

		AlgorithmResultMaxiMax res = maxiMax(root.getAgentGood(), root, maxSteps);

		ans.addLast(res.get_act());

		return ans;
	}

	private AlgorithmResultMaxiMax maxiMax(Agent agentInTurn, StateNode node, int counter) {
		if (endCondition(node, counter)){
			AlgorithmResultMaxiMax ans = new AlgorithmResultMaxiMax(node.getValueGoodAgent(), node.getValueBadAgent(), node.getActionThatGotMeHere());
			printResult(ans, agentInTurn, node);
			return ans;
		}
		else{
			Vector<StateNode> children = expand(agentInTurn, node, false);

			AlgorithmResultMaxiMax maxResultTillNow = new AlgorithmResultMaxiMax(Double.NEGATIVE_INFINITY, Double.NEGATIVE_INFINITY, null);
			for (Iterator<StateNode> iter = children.iterator(); iter.hasNext();) {
				StateNode child = (StateNode) iter.next();

				// setting opponent:
				Agent opponent = setOpponent(agentInTurn, child);

				// recursive call:
				AlgorithmResultMaxiMax res = maxiMax(opponent, child, counter-1);
				res.setActionThatGotMeHere(child, children);
				
				// updating best move if a better one was found:
				if (isGoodAgentTurn(agentInTurn) && res.get_goodValue() > maxResultTillNow.get_goodValue()){
					maxResultTillNow	= res;
				}
				else if (isBadAgentTurn(agentInTurn) && res.get_badValue() > maxResultTillNow.get_badValue()){
					maxResultTillNow	= res;
				}
			}
			
			printResult(maxResultTillNow, agentInTurn, node);
			return maxResultTillNow;
		}
	}

	private Agent setOpponent(Agent agentInTurn, StateNode child) {
		Agent opponent = null;
		if (isGoodAgentTurn(agentInTurn)){
			opponent = child.getAgentBad();
		}
		else if (isBadAgentTurn(agentInTurn)){
			opponent = child.getAgentGood();
		}
		return opponent;
	}

	public LinkedList<action> runExpectiMiniMaxSearch(Board board, Vector<Agent> agents, Vector<Flag> flags, int maxSteps, double probability) {
		LinkedList<action> ans = new LinkedList<action>();
		Agent agentMax	= this._smart;
		Agent agentMin	= this._human;

		StateNode root = new StateNode(board, agents, flags, ans, agentMax, agentMin);
		// root now has clones of board, agents & flags.

		AlgorithmResult res = expecti(root.getAgentGood(), root, maxSteps, probability);

		ans.addLast(res.get_act());

		return ans;

	}


	private AlgorithmResult expecti(Agent agentInTurn, StateNode node, int counter, double probability) {
		if (endCondition(node, counter)){
			AlgorithmResult res = new AlgorithmResult(node.getValue(), node.getActionThatGotMeHere());
			printResult(res, agentInTurn, node);
			return res;
		}
		else{
			Vector<StateNode> children = expand(agentInTurn, node, false);

			double maxResultTillNow = Double.NEGATIVE_INFINITY;
			double minResultTillNow = Double.POSITIVE_INFINITY;
			StateNode chosenNode = children.firstElement();

			if (isGoodAgentTurn(agentInTurn)){
				//counter--;

				if (isNodeIcyForAgent(node, agentInTurn) && !agentInTurn.is_ignoringIce()){


					Vector<StateNode> childrenWithIce = children;
					Vector<StateNode> childrenWithoutIce = expand(agentInTurn, node, true);


					for (int i = 0; i < childrenWithIce.size(); i++) {
						StateNode childWithIce 		= childrenWithIce.elementAt(i);
						StateNode childWithoutIce 	= childrenWithoutIce.elementAt(i);

						Agent min = childWithIce.getAgentBad();

						AlgorithmResult resWithIce = null;
						AlgorithmResult resWithoutIce = null;

						resWithIce 		= expecti(min, childWithIce, counter-1, probability);
						resWithoutIce 	= expecti(min, childWithoutIce, counter-1, probability);

						double score = probability * resWithoutIce.get_value() + (1-probability) * resWithIce.get_value();
						/*
						Debug.print("score: "+score);
						try {
							System.in.read();
						} catch (IOException e) {
							e.printStackTrace();
						}
						*/
						if ( score > maxResultTillNow ){
							maxResultTillNow 	= score;
							chosenNode 			= childWithIce;
						}
					}

					AlgorithmResult res = new AlgorithmResult(maxResultTillNow, chosenNode.getActionThatGotMeHere());
					printResult(res, agentInTurn, node);
					return res;
				}
				else{
					for (Iterator<StateNode> iter = children.iterator(); iter.hasNext();) {
						StateNode child = (StateNode) iter.next();
						Agent min = child.getAgentBad();
						AlgorithmResult res;

						res = expecti(min, child, counter-1, probability);
						double score = res.get_value();

						if (score > maxResultTillNow){
							maxResultTillNow	= score;		// we have found a better best move.
							chosenNode			= child;		// save the node that this move causes you to reach.
						}
					}
				}
				AlgorithmResult res = new AlgorithmResult(maxResultTillNow, chosenNode.getActionThatGotMeHere());	// this is our best move
				printResult(res, agentInTurn, node);
				return res;
			}
			else if (isBadAgentTurn(agentInTurn)){

				//counter--;

				if (isNodeIcyForAgent(node, agentInTurn) && !agentInTurn.is_ignoringIce()){
					Vector<StateNode> childrenWithIce = children;
					Vector<StateNode> childrenWithoutIce = expand(agentInTurn, node, true);


					for (int i = 0; i < childrenWithIce.size(); i++) {
						StateNode childWithIce 		= childrenWithIce.elementAt(i);
						StateNode childWithoutIce 	= childrenWithoutIce.elementAt(i);

						Agent max = childWithIce.getAgentGood();

						AlgorithmResult resWithIce = null;
						AlgorithmResult resWithoutIce = null;

						resWithIce 		= expecti(max, childWithIce, counter-1, probability);
						resWithoutIce 	= expecti(max, childWithoutIce, counter-1, probability);

						double score = probability * resWithoutIce.get_value() + (1-probability) * resWithIce.get_value();

						if ( score < minResultTillNow ){
							minResultTillNow = score;
							chosenNode = childWithIce;
						}
					}

					AlgorithmResult res = new AlgorithmResult(minResultTillNow, chosenNode.getActionThatGotMeHere());
					printResult(res, agentInTurn, node);
					return res;
				}


				else{
					for (Iterator<StateNode> iter = children.iterator(); iter.hasNext();) {
						StateNode child = (StateNode) iter.next();
						Agent max = child.getAgentGood();
						AlgorithmResult res = expecti(max, child, counter-1, probability);
						double score = res.get_value();
						if (score < minResultTillNow){
							minResultTillNow	= score;		// opponent has found a better worse move
							chosenNode			= child;		// save the node that this move causes you to reach.
						}
					}
				}
				AlgorithmResult res = new AlgorithmResult(minResultTillNow, chosenNode.getActionThatGotMeHere());				// this is the opponent's best move
				printResult(res, agentInTurn, node);
				return res;
			}
			else{	// No one's turn?!?
				System.err.println(getClass().getName()+": Not supposed to get here...");
				return null;
			}
		}
	}

	private boolean isNodeIcyForAgent(StateNode node, Agent agentInTurn) {
		return 	isGoodAgentTurn(agentInTurn)&& node.getAgentGood().getSquare() instanceof SquareIce	||
				isBadAgentTurn(agentInTurn)&& node.getAgentBad().getSquare() instanceof SquareIce;
	}//expecti

	/*private boolean choiceNode(Agent agentInTurn) {
		if (agentInTurn.isChoiceMessage()) return true;
		return false;
	}*/

	private boolean endCondition(StateNode node, int counter) {
		return node.isGoalNode() || counter == 0;
	}

	private boolean isGoodAgentTurn(Agent agent) {
		//return agent == this._agentMax;
		return (agent instanceof AgentSmart)/* && !choiceNode(agent)*/;
	}

	private boolean isBadAgentTurn(Agent agent) {
		return !(agent instanceof AgentSmart)/* && !choiceNode(agent)*/;
	}


	// MiniMax Algorithm:
	// ==================
	//	alpha-beta(player,board,alpha,beta)
	//    if(game over in current board position)
	//        return winner
	//
	//    children = all legal moves for player from this board
	//    if(max's turn)
	//        for each child
	//            score = alpha-beta(other player,child,alpha,beta)
	//            if score > alpha then alpha = score (we have found a better best move)
	//            if alpha >= beta then return alpha (cut off)
	//        return alpha (this is our best move)
	//    else (min's turn)
	//        for each child
	//            score = alpha-beta(other player,child,alpha,beta)
	//            if score < beta then beta = score (opponent has found a better worse move)
	//            if alpha >= beta then return beta (cut off)
	//        return beta (this is the opponent's best move)



}
