/*
 * Licensed to the Apache Software Foundation (ASF) under one or more
 * contributor license agreements.  See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The ASF licenses this file to You under the Apache License, Version 2.0
 * (the "License"); you may not use this file except in compliance with
 * the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package de.mlp_distributed.mlp.core;

import de.mlp_distributed.mlp.math.function.DoubleDoubleFunction;
import de.mlp_distributed.mlp.math.mahout.Matrix;
import de.mlp_distributed.mlp.math.mahout.Vector;

public class SparseAutoencoder extends MultiLayerPerceptron {

	/* Sparse Autoencoder */

	/**
	 * Constructor for MLP Sparse Autoencoder
	 * 
	 * @param nbInputOutputUnits
	 * @throws Exception
	 */
	public SparseAutoencoder(final int nbInputOutputUnits) throws Exception {
		this(nbInputOutputUnits, 0.8);
	}

	public SparseAutoencoder(final int nbInputOutputUnits, final double momentum) throws Exception {
		super(nbInputOutputUnits, nbInputOutputUnits, new int[] { nbInputOutputUnits }, false, momentum);
		this.average_activation = this.vectorFactory.construct(this.nbUnits[1]);
	}

	private Vector average_activation = null;
	private double estimationUpdateWeight = 0.0001;
	private double targetActivation = -0.98;
	private double sparseLearningRate = 0.5;

	@Override
	public Vector trainOnline(final Vector input, final Vector target) {

		super.trainOnline(input, target);

		this.enforceSparseConstraint(this.units[1], this.weights[0]);

		return this.units[this.nbLayer - 1];
	}

	private void enforceSparseConstraint(final Vector activation, final Matrix weights) {
		this.updateAverageActivation(activation);
		this.updateBiasWeights(weights);
	}

	private void updateAverageActivation(final Vector activation) {
		this.average_activation.assign(activation, new DoubleDoubleFunction() {
			@Override
			public double apply(final double a, final double b) {
				return ((1d - SparseAutoencoder.this.estimationUpdateWeight) * a) + (SparseAutoencoder.this.estimationUpdateWeight * b);
			};
		});
	}

	private void updateBiasWeights(final Matrix weights) {
		final int nbUnit = weights.rowSize();
		// init with i=1: not for weights to bias!
		for (int i = 1; i < nbUnit; i++) {
			weights.set(i, 0, weights.get(i, 0)
					- (this.learningRate * this.sparseLearningRate * (this.average_activation.getQuick(i) - this.targetActivation)));
		}
	}

	/**
	 * Chainable configuration option.
	 * 
	 * @param updateWeight
	 * @return This, so other configurations can be chained.
	 */
	public SparseAutoencoder estimationUpdateWeight(final double estimationUpdateWeight) {
		this.estimationUpdateWeight = estimationUpdateWeight;
		return this;
	}

	/**
	 * Chainable configuration option.
	 * 
	 * @param targetActivation
	 * @return This, so other configurations can be chained.
	 */
	public SparseAutoencoder targetActivation(final double targetActivation) {
		this.targetActivation = targetActivation;
		return this;
	}

	/**
	 * Chainable configuration option.
	 * 
	 * @param sparseLearningRate
	 * @return This, so other configurations can be chained.
	 */
	public SparseAutoencoder sparseLearningRate(final double sparseLearningRate) {
		this.sparseLearningRate = sparseLearningRate;
		return this;
	}

}
