package model;

import java.io.Serializable;
import java.util.Vector;

/**
 * 
 * This class is used to store all game states in for a specific game. This can
 * then be used by any machine learning algorithm as training set.
 *
 */
public class Game implements Serializable 
{

	private static final long serialVersionUID = 1;
	private Vector<double[]> whiteStates = new Vector<double[]>();
	private Vector<double[]> blackStates = new Vector<double[]>();
	private byte winner = -1;
	
	private double rewardWhite = 0.0;
	private double rewardBlack = 0.0;
	/**
	 * Allows you to add the current state of the board (should be called after every move.
	 * @param game - The byte[] representation of a board.
	 */
	public void addBoard(double game[], byte color) 
	{
		if(color == Field.WHITE)
			whiteStates.add(game);
		if(color == Field.BLACK)
			blackStates.add(game);
	}
	
	/**
	 * This method allows you to set a winner. (These constants are defined in model.Field)
	 * @param winner - the value representing the one who won the game.
	 */
	public void setWinner(byte winner) 
	{
		this.winner = winner;
	}
	
	public void setReward(double reward, byte color)
	{
		if(color == Field.WHITE)
			rewardWhite = reward;
		if(color == Field.BLACK)
			rewardBlack = reward;
	}
	/**
	 * This method returns the winner of the game (These constants are defined in model.Field)
	 * @return - The winner
	 */
	public byte getWinner() 
	{
		return winner;
	}
	
	public double getReward(byte color)
	{
		if(color == Field.WHITE)
			return rewardWhite;
		if(color == Field.BLACK)
			return rewardBlack;
		return 0;
	}
	
	/**
	 * This method allows you to obtain a Vector containing a byte array for each state this 
	 * game had. (sorted)
	 * @return - The game.
	 */	
	public Vector<double[]> getStateSequence(byte color) 
	{
		if(color == Field.WHITE)
			return whiteStates;
		if(color == Field.BLACK)
			return blackStates;
		return null;
	}
}
