/**
 * 
 */
package cs534.policy;

import java.util.ArrayList;



/**
 * @author jhostetler
 *
 */
public abstract class BoltzmannPolicy<Self, S, A> extends DifferentiablePolicy<Self, S, A>
{
	protected final ArrayList<Function2<Double, S, A>> features_;
	protected final Function2<int[], S, A> feature_filter_;
	
	/**
	 * @param parameters
	 */
	public BoltzmannPolicy( final double[] parameters, 
							final ArrayList<Function2<Double, S, A>> features )
	{
		super( parameters );
		features_ = features;
		feature_filter_ = null;
		// One extra feature for the bias term
//		assert( parameters.length == features.length + 1 );
		assert( parameters.length == features.size() );
	}
	
	/**
	 * @param parameters
	 */
	public BoltzmannPolicy( final double[] parameters, 
							final ArrayList<Function2<Double, S, A>> features,
							final Function2<int[], S, A> feature_filter )
	{
		super( parameters );
		features_ = features;
		feature_filter_ = feature_filter;
		// One extra feature for the bias term
//		assert( parameters.length == features.length + 1 );
		assert( parameters.length == features.size() );
	}
	
	protected abstract double marginal_pi( final int feature );
	
	/**
	 * Override this method if you're able to quickly determine that a
	 * significant number of the features will be 0 in a given state.
	 * @param s
	 * @param a
	 * @return The indices of the relevant features.
	 */
	protected int[] filterFeatures( final S s, final A a )
	{
		if( feature_filter_ != null ) {
			return feature_filter_.apply( s, a );
		}
		else {
			return cs534.util.List.range( 0, features_.size() );
		}
	}
	
	/**
	 * @see cs534.policy.DifferentiablePolicy#gradient(double[])
	 */
	@Override
	public double[] gradient( final A a ) 
	{
		final double[] grad = new double[features_.size()];
		if( feature_filter_ != null ) {
//			for( int i : feature_filter_.apply( s, a ) ) {
			for( int i = 0; i < features_.size(); ++i ) {
				final double f_i = features_.get( i ).apply( s_, a );
				grad[i] = f_i - marginal_pi( i );
			}
		}
		else {
			// Bias term
		//		grad[0] = 1.0 - marginal_pi( s, 0 );
			// Feature terms
			for( int i = 0; i < features_.size(); ++i ) {
				final double f_i = features_.get( i ).apply( s_, a );
				grad[i] = f_i - marginal_pi( i );
			}
		}
		return grad;
	}
}
