package othello.players.learnplayers;

import java.util.List;


import othello.model.Action;
import othello.model.State;
import othello.neuralnetwork.NeuralNetwork;
import othello.model.IBoard;

public class PosQ extends AbstractActionPlayer {
	
	public PosQ(NeuralNetwork neuralNetwork) {
    	super("PosQ", neuralNetwork);
    }
    
    @Override
    public void beginTurn() {    	
    	if (totalGames == -1){
    		totalGames = board.getRealMaxGames();
    	} 	
    	
    	// observe the current state s
    	State state = new State(cloneByteArray(super.board.getState()));   
        
    	// for all actions use the NN to compute Q(s,a')
        nn.forwardPropagate(state.toDoubles());
        
        // qOutput <- Q(s,a')
        double[] qOutput = nn.getOutput();

        //Action newAction = selectAction(qOutput,board);
        Action newAction = selectActionPos();        
        
        List<Action> availableActions = getAvailableActions(board);        
        Action maxAction = null;        
        if (!availableActions.isEmpty()){
            maxAction = getMaxMove(availableActions, qOutput);
        }
        
        if (newAction == null) { 	
        	// Pass
        	board.pass(this);        	
        } else if (lastAction == null) {
        	// First move
        	newAction.setQValue(qOutput[actionIndex(newAction)]);       
	        lastAction = newAction;
	        lastState = state;
        	board.placePiece(this, newAction.getRow(), newAction.getCol());
        } else {
        	if (this.benchmarkState == BenchmarkState.Training) {
				newAction.setQValue(qOutput[actionIndex(newAction)]);       
        		updateNN(state, maxAction);
        		lastAction = newAction;
				lastState = state;
        	}   
	        // Place the piece
	        board.placePiece(this, newAction.getRow(), newAction.getCol());
        }
    }

	@Override
	public void endTurn() {
	}	
}
