/*
 * Javlov - a Java toolkit for reinforcement learning with multi-agent support.
 * 
 * Copyright (c) 2009 Matthijs Snel
 * 
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */
package net.javlov.policy;

import java.util.List;

import net.javlov.Action;
import net.javlov.ContinuousAction;
import net.javlov.NeuralNet;
import net.javlov.Option;
import net.javlov.Policy;
import net.javlov.State;

/**
 * A policy that uses a neural network to select actions. The class itself is abstract; one of
 * the Continuous, Discrete or DiscreteBinary versions should be instantiated.
 * 
 * @author Matthijs Snel
 *
 */
public abstract class NeuralPolicy implements Policy {

	protected NeuralNet net;
	
	protected NeuralPolicy() {}
	
	public NeuralNet getNet() {
		return net;
	}
	
	public void setNet( NeuralNet network ) {
		net = network;
	}
	
	/**
	 * Uses a template continuous action passed in as parameter to the constructor; the values
	 * of the action are set to the output of the neural network when the state is passed as
	 * input.
	 * 
	 * @author Matthijs Snel
	 *
	 */
	public static class Continuous extends NeuralPolicy {

		protected ContinuousAction template;
		
		public Continuous(NeuralNet net, ContinuousAction action) {
			if ( net.getOutputSize() != action.getDimensionality() )
				throw new IllegalArgumentException("Net output dim does not match action dim: " + action.getDimensionality() );
			setNet(net);
			this.template = action;
		}
		
		/**
		 * @inheritDoc
		 */
		@Override
		public Option getOption(State s) {
			net.feedForward( (double[]) s.getData() );
			template.setValues(net.getOutput());
			return template;
		}

		/**
		 * @inheritDoc
		 */
		@Override
		public Option getOption(double[] qvalues) {
			throw new UnsupportedOperationException();
		}
	}
	
	/**
	 * Output of the net is interpreted as the index of the action that should be returned. If
	 * the net has more than one output neuron, the first neuron that is "on" (output > 0.5) is
	 * interpreted as the index of the action. E.g. second neuron on -> second action selected.
	 * 
	 * If the neuron output should be interpreted as a binary index, use
	 * NeuralPolicy.DiscreteBinary instead.
	 * 
	 * @author Matthijs Snel
	 *
	 */
	public static class Discrete extends NeuralPolicy {

		protected List<? extends Option> optionPool;
		
		public Discrete(NeuralNet net, List<? extends Option> optionPool) {
			setNet(net);
			this.optionPool = optionPool;
		}
		
		/**
		 * @inheritDoc
		 */
		@Override
		public Option getOption(State s) {
			net.feedForward( (double[]) s.getData() );
			return getOptionFromOutput(net.getOutput());
		}
		
		/**
		 * @inheritDoc
		 */
		@Override
		public Option getOption(double[] qvalues) {
			throw new UnsupportedOperationException();
		}
		
		protected Option getOptionFromOutput(double[] output) {
			if ( output.length > 1 ) {
				for ( int i = 0; i < output.length; i++ )
					if ( output[i] > 0.5 )
						return optionPool.get(i);	
			}
			else
				return optionPool.get((int)output[0]);
			return null;
		}
	}
	
	public static class DiscreteBinary extends Discrete {
		public DiscreteBinary(NeuralNet net, List<? extends Action> actionPool) {
			super(net, actionPool);
		}
		
		/**
		 * @inheritDoc
		 */
		@Override
		public Option getOption(double[] qvalues) {
			throw new UnsupportedOperationException();
		}
		
		@Override
		protected Option getOptionFromOutput(double[] output) {
			int result = 0, val = 1;
			for ( int i = output.length-1; i >= 0; i-- ) {
				if ( output[i] > 0.5 )
					result += val;
				val *= 2;
			}
			return optionPool.get(result);
		}
	}
}

