package agent;

import common.Move;
import java.util.LinkedList;
import java.util.List;

/**
 * This class represents a meta search algorithm that simulates the fictitious
 * hands. It is a meta-algorithm because it uses another algorithm (alpha-beta)
 * to work.
 */
public class HandsSimulator extends GameAlgorithm<AgentCurrentState> {

    private AlphaBeta alphaBeta;

    public HandsSimulator() {
        alphaBeta = new AlphaBeta();
    }

    /**
     * This method has two modes of working: 1) If the agent doesn't know
     * exactly what are the opponent pieces: In this case, the method creates
     * *handsLimit* current fictitious states (in each of these states, the
     * pieces of the opponent are "guessed"). For each of these states its list
     * of successors is generated and the minimaxValues method of the alpha-beta
     * algorithm is called (which receives the successors list). We remember
     * that the minimaxValues method returns the minimax values of each
     * successor. The minimax values obtained by each successor/move in all
     * simulated hands are summed and, at the end, the method returns the move
     * corresponding to the successor that obtained a larger number of minimax
     * values. Notice that, for each current fictitious state, the agent pieces
     * are always the same, which means that the moves that may be played are
     * also the same. 2) If the agent already knows exactly what pieces its
     * opponent has: In this case, an AgentSearchState is built (where the
     * opponent pieces are not guessed), the takeDecision method of alpha-beta
     * is called and the move that resulted from the search process is returned.
     *
     * @param currentState
     * @return
     */
    @Override
    public Move takeDecision(AgentCurrentState currentState) {
        Move move = null;
        if (currentState.getOpponentPossiblePieces().size() > currentState.getNumberOfOpponentPieces()) {
            List<AgentSearchState> possibleHands = new LinkedList<>(currentState.buildGuessedCurrentStates(handsLimit, random));

            LinkedList<AgentSearchState> successor = null;
            //array em que cada posição terá a soma do minimaxValue de cada jogada possivel do agent
            double[] maxValue = new double[currentState.getAgentPossibleMoves().size()];
            //para cada mão gerada obter os seus sucessores
            for (AgentSearchState hands : possibleHands) {
                successor = (LinkedList<AgentSearchState>) hands.getSucessors();
                //array auxiliar que terá os minimaxValues de cada sucessor
                double[] aux = alphaBeta.minimaxValues(successor);
                for (int i = 0; i < maxValue.length; i++) {
                    maxValue[i] += aux[i];
                }
            }
            /**
             * maxValue = [    |   |    ]  -> agent tem 3 peças possiveis para jogar
             * 
             * para cada mão do oponente -> get successor
             * 
             *      mao1        mao2        mao3    .... 
             *    s1 s2 s3    s1 s2 s3    s1 s2 s3
             *
             * maxValue = [s1 de todas as maos   | s2 de todas as maos  | s3 de todas as mãos   ]
             */
            
            int bestIndex = 0;
            //determina-se a melhor peça a jogar do agente (maior valor do maxValue)
            for (int i = 1; i < maxValue.length; i++) {
                if (maxValue[i] > maxValue[bestIndex]) {
                    bestIndex = i;
                }
            }
            
            move = successor.get(bestIndex).getMove();
        } else {
            move = alphaBeta.takeDecision(currentState.getAgentSearchState(currentState.getOpponentPossiblePieces()));
        }
        return move;
    }

    @Override
    public void setSearchDepth(int depthLimit) {
        alphaBeta.setSearchDepth(depthLimit);
    }
}
