

import java.util.HashSet;


public class Reinforcement {
	/**
	 * AI Search Algorithm
	 * Called by the Game Class
	 * Learns the best way to find the ship
	 * 
	 * @author Larry Savago
	 * @author Casey Huckins
	 * @date 4/13/2012
	 */
	
	NeuralNetwork neuralNet = new NeuralNetwork(9,9,9);
	Weight wWeight = new Weight(1,2, 3.0);
	Square[] flattenedBoard = new Square[Board.ROWS * Board.COLS];
	final static String PATH = System.getProperty("user.dir") + "\\weights.ser";
	HashSet<Square> alreadySearched = new HashSet<Square>();
	int iOutput = -1;
	boolean trained = true;
	
	public Reinforcement() {
	}
	
	public int search(Board board, Ball ball) {
		boolean found = false;
		int count = 0;
		alreadySearched.clear();
		 // build 1-d array of possible searches.. 
		 int pos = 0;
		 for (int i = 0; i<board.ROWS; i++) {
			 for (int t = 0; t<board.COLS; t++){
				flattenedBoard[pos] = board.getSquare(i, t);
				pos++;
			 }
		 }
		 
		double[] inputs =  {0,0,0,0,0,0,0,0,0};
		double [] output = {0,0,0,0,0,0,0,0};
		
		
		// trained
		if (trained) {
			
			// load weights
			Weights ws = null;
			try {
				ws = (Weights) (FileTool.loadObject(PATH));
			} catch (Exception e) {
				System.out.println(e.getMessage());
			}
			
			if (ws != null) {
				neuralNet.setAllWeights(ws);
			}
		}
			
		while (!found) {
			neuralNet.setInput(inputs);
			neuralNet.activate();
			output = neuralNet.getOutput();
			iOutput = maxQIndex(output);
			System.out.println("debug - iOutput = " + iOutput);
			inputs[iOutput] = 1;
			
			if (ball.search(flattenedBoard[iOutput])) {
				found = true;
				//System.out.println("debug - " + flattenedBoard[iOutput]);
			}
			count++;
		}
			
		return count;	
	}
	
	public void learn() {
		if (!trained) {
			Weights ws = neuralNet.getAllWeights();
			FileTool.saveObject(ws, PATH);
		}
	}
	
	public boolean isTrained() {
		return trained;
	}
	public void setTrained(boolean b) {
		trained = b;
	}
	private double maxQ(double[] a) {

		double max = -Double.MAX_VALUE;
		for (int i = 0; i < a.length; i++) {
			double d = a[i];
			if (d > max && alreadySearched.contains(i)) // legal move
				max = d; // update
		}
		return max == -Double.MAX_VALUE ? 0 : max;
	}

	private int maxQIndex(double[] a) {
		double max = -Double.MAX_VALUE; //-infinity
		int index = 1;
		
		for (int i = 0; i < a.length; i++) {
			double d = a[i];
			if (d > max && !alreadySearched.contains(flattenedBoard[i])) {// legal move
				max = d; // update
				index = i;
			}
		}
		alreadySearched.add(flattenedBoard[index]);
		return index;
	}
	// Elements of system:
	
		// Policy manager who decides what optimal policy should be used.
	
		// Policies
		// 1. Random - initial policy when first called
		// 2. Repeater policy - the user will repeats previous entries
		// 3. Opposite policy - the user doesn't repeat previous entries
		// 4. Center policy - the user always puts ships in the center
		// 5. Outside policy - the user always puts the ship in the outside
	
		// Reward function - determines a rating system to order policies for short term gains.
		
		// Value function - determines a rating system in order to continue to evaluate what the best policy is over the life of the program.
	
		// Environment
			// set of possible states
			// set of possible actions
			// percepts
			// goals, successess and failures
		
	// Algorithm
		
	// Initialization setup
	// while (learning)
	//		Select Policy PolicyManager.getPolicy()
	//		while (searching)  - Each game or each search? 
	//			choose an action based on policy
	//			Carry out action - search(square)
	// 			from success/failure of action now reward policy manager based on result
	//			Go to new State
	
	// 

	
	
	
	
	
	
	
}
