package agent;

import common.Move;
import java.util.LinkedList;
import java.util.List;

/**
 * This class represents a meta search algorithm that simulates the fictitious
 * hands. It is a meta-algorithm because it uses another algorithm (alpha-beta)
 * to work.
 */
public class HandsSimulator extends GameAlgorithm<AgentCurrentState> {

    private AlphaBeta alphaBeta;

    public HandsSimulator() {
        alphaBeta = new AlphaBeta();
    }

    /**
     * This method has two modes of working: 1) If the agent doesn't know
     * exactly what are the opponent pieces: In this case, the method creates
     * *handsLimit* current fictitious states (in each of these states, the
     * pieces of the opponent are "guessed"). For each of these states its list
     * of successors is generated and the minimaxValues method of the alpha-beta
     * algorithm is called (which receives the successors list). We remember
     * that the minimaxValues method returns the minimax values of each
     * successor. The minimax values obtained by each successor/move in all
     * simulated hands are summed and, at the end, the method returns the move
     * corresponding to the successor that obtained a larger number of minimax
     * values. Notice that, for each current fictitious state, the agent pieces
     * are always the same, which means that the moves that may be played are
     * also the same. 2) If the agent already knows exactly what pieces its
     * opponent has: In this case, an AgentSearchState is built (where the
     * opponent pieces are not guessed), the takeDecision method of alpha-beta
     * is called and the move that resulted from the search process is returned.
     *
     * @param currentState
     * @return
     */
    @Override
    public Move takeDecision(AgentCurrentState currentState) {
        //TODO
        //In it its present version it will lead to a NullPointerException if the
        //user chooses the alpha-beta algorithm.
        Move move = null;
        if (currentState.getOpponentPossiblePieces().size() > currentState.getNumberOfOpponentPieces()) {
            List<AgentSearchState> possibleHands = new LinkedList<>(currentState.buildGuessedCurrentStates(handsLimit, random));

            LinkedList<AgentSearchState> successor = null;
            double[] maxValue = new double[currentState.getAgentPossibleMoves().size()];
//            Para cada mão vamos calcular a soma de todos os minimax values dos sucessores
            for (AgentSearchState hands : possibleHands) {
                successor = (LinkedList<AgentSearchState>) hands.getSucessors();
                double[] aux = alphaBeta.minimaxValues(successor);
//                hands.move = successor.get(0).getMove();
//                Para cada mão vamos buscar o maxValue de cada sucessor (maxValue[sucessor]) iteramos este array 
//                para descobrir o sucessor com o maior valor (bestMoveScore) e calculamos também para cada mão o sumMiniMaxValue
                for (int i = 0; i < maxValue.length; i++) {
                    maxValue[i] += aux[i];
                }
            }
            
            
            // descobrir qual é a mão que tem o maior valor da soma
            int bestIndex = 0;
            for (int i = 1; i < maxValue.length; i++) {
                if (maxValue[i] > maxValue[bestIndex]) {
                    bestIndex = i;
                }
            }
            
            move = successor.get(bestIndex).getMove();
            //para cada peça de cada mao descobir os sucessores e chamar o metodo minmaxValues do Alphabeta e descobrir a move
        } else {
            move = alphaBeta.takeDecision(currentState.getAgentSearchState(currentState.getOpponentPossiblePieces()));
        }
        return move;
    }

    @Override
    public void setSearchDepth(int depthLimit) {
        alphaBeta.setSearchDepth(depthLimit);
    }
}
